repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
yeasy/hyperledger-py | hyperledger/api/transaction.py | 2 | 1751 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class TransactionApiMixin(object):
def transaction_get(self, tran_uuid):
""" GET /transactions/{UUID}
Use the /transactions/{UUID} endpoint to retrieve an individual
transaction matching the UUID from the blockchain. The returned
transaction message is defined inside fabric.proto.
```golang
message Transaction {
enum Type {
UNDEFINED = 0;
CHAINCODE_DEPLOY = 1;
CHAINCODE_INVOKE = 2;
CHAINCODE_QUERY = 3;
CHAINCODE_TERMINATE = 4;
}
Type type = 1;
bytes chaincodeID = 2;
bytes payload = 3;
string uuid = 4;
google.protobuf.Timestamp timestamp = 5;
ConfidentialityLevel confidentialityLevel = 6;
bytes nonce = 7;
bytes cert = 8;
bytes signature = 9;
}
```
:param tran_uuid: The uuid of the transaction to retrieve
:return: json body of the transaction info
"""
res = self._get(self._url("/transactions/{0}", tran_uuid))
return self._result(res, json=True)
| apache-2.0 |
alephu5/Soundbyte | environment/lib/python3.3/site-packages/IPython/core/display.py | 7 | 26478 | # -*- coding: utf-8 -*-
"""Top-level display functions for displaying object in different formats.
Authors:
* Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
import os
import struct
from IPython.core.formatters import _safe_get_formatter_method
from IPython.utils.py3compat import (string_types, cast_bytes_py2, cast_unicode,
unicode_type)
from IPython.testing.skipdoctest import skip_doctest
from .displaypub import publish_display_data
#-----------------------------------------------------------------------------
# utility functions
#-----------------------------------------------------------------------------
def _safe_exists(path):
"""Check path, but don't let exceptions raise"""
try:
return os.path.exists(path)
except Exception:
return False
def _merge(d1, d2):
"""Like update, but merges sub-dicts instead of clobbering at the top level.
Updates d1 in-place
"""
if not isinstance(d2, dict) or not isinstance(d1, dict):
return d2
for key, value in d2.items():
d1[key] = _merge(d1.get(key), value)
return d1
def _display_mimetype(mimetype, objs, raw=False, metadata=None):
"""internal implementation of all display_foo methods
Parameters
----------
mimetype : str
The mimetype to be published (e.g. 'image/png')
objs : tuple of objects
The Python objects to display, or if raw=True raw text data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
if metadata:
metadata = {mimetype: metadata}
if raw:
# turn list of pngdata into list of { 'image/png': pngdata }
objs = [ {mimetype: obj} for obj in objs ]
display(*objs, raw=raw, metadata=metadata, include=[mimetype])
#-----------------------------------------------------------------------------
# Main functions
#-----------------------------------------------------------------------------
def display(*objs, **kwargs):
"""Display a Python object in all frontends.
By default all representations will be computed and sent to the frontends.
Frontends can decide which representation is used and how.
Parameters
----------
objs : tuple of objects
The Python objects to display.
raw : bool, optional
Are the objects to be displayed already mimetype-keyed dicts of raw display data,
or Python objects that need to be formatted before display? [default: False]
include : list or tuple, optional
A list of format type strings (MIME types) to include in the
format data dict. If this is set *only* the format types included
in this list will be computed.
exclude : list or tuple, optional
A list of format type strings (MIME types) to exclude in the format
data dict. If this is set all format types will be computed,
except for those included in this argument.
metadata : dict, optional
A dictionary of metadata to associate with the output.
mime-type keys in this dictionary will be associated with the individual
representation formats, if they exist.
"""
raw = kwargs.get('raw', False)
include = kwargs.get('include')
exclude = kwargs.get('exclude')
metadata = kwargs.get('metadata')
from IPython.core.interactiveshell import InteractiveShell
if not raw:
format = InteractiveShell.instance().display_formatter.format
for obj in objs:
# If _ipython_display_ is defined, use that to display this object.
display_method = _safe_get_formatter_method(obj, '_ipython_display_')
if display_method is not None:
try:
display_method(**kwargs)
except NotImplementedError:
pass
else:
continue
if raw:
publish_display_data('display', obj, metadata)
else:
format_dict, md_dict = format(obj, include=include, exclude=exclude)
if metadata:
# kwarg-specified metadata gets precedence
_merge(md_dict, metadata)
publish_display_data('display', format_dict, md_dict)
def display_pretty(*objs, **kwargs):
"""Display the pretty (default) representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw text data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('text/plain', objs, **kwargs)
def display_html(*objs, **kwargs):
"""Display the HTML representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw HTML data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('text/html', objs, **kwargs)
def display_svg(*objs, **kwargs):
"""Display the SVG representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw svg data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('image/svg+xml', objs, **kwargs)
def display_png(*objs, **kwargs):
"""Display the PNG representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw png data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('image/png', objs, **kwargs)
def display_jpeg(*objs, **kwargs):
"""Display the JPEG representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw JPEG data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('image/jpeg', objs, **kwargs)
def display_latex(*objs, **kwargs):
"""Display the LaTeX representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw latex data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('text/latex', objs, **kwargs)
def display_json(*objs, **kwargs):
"""Display the JSON representation of an object.
Note that not many frontends support displaying JSON.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw json data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('application/json', objs, **kwargs)
def display_javascript(*objs, **kwargs):
"""Display the Javascript representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw javascript data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('application/javascript', objs, **kwargs)
def display_pdf(*objs, **kwargs):
"""Display the PDF representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw javascript data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('application/pdf', objs, **kwargs)
#-----------------------------------------------------------------------------
# Smart classes
#-----------------------------------------------------------------------------
class DisplayObject(object):
"""An object that wraps data to be displayed."""
_read_flags = 'r'
def __init__(self, data=None, url=None, filename=None):
"""Create a display object given raw data.
When this object is returned by an expression or passed to the
display function, it will result in the data being displayed
in the frontend. The MIME type of the data should match the
subclasses used, so the Png subclass should be used for 'image/png'
data. If the data is a URL, the data will first be downloaded
and then displayed. If
Parameters
----------
data : unicode, str or bytes
The raw data or a URL or file to load the data from
url : unicode
A URL to download the data from.
filename : unicode
Path to a local file to load the data from.
"""
if data is not None and isinstance(data, string_types):
if data.startswith('http') and url is None:
url = data
filename = None
data = None
elif _safe_exists(data) and filename is None:
url = None
filename = data
data = None
self.data = data
self.url = url
self.filename = None if filename is None else unicode_type(filename)
self.reload()
self._check_data()
def _check_data(self):
"""Override in subclasses if there's something to check."""
pass
def reload(self):
"""Reload the raw data from file or URL."""
if self.filename is not None:
with open(self.filename, self._read_flags) as f:
self.data = f.read()
elif self.url is not None:
try:
try:
from urllib.request import urlopen # Py3
except ImportError:
from urllib2 import urlopen
response = urlopen(self.url)
self.data = response.read()
# extract encoding from header, if there is one:
encoding = None
for sub in response.headers['content-type'].split(';'):
sub = sub.strip()
if sub.startswith('charset'):
encoding = sub.split('=')[-1].strip()
break
# decode data, if an encoding was specified
if encoding:
self.data = self.data.decode(encoding, 'replace')
except:
self.data = None
class TextDisplayObject(DisplayObject):
"""Validate that display data is text"""
def _check_data(self):
if self.data is not None and not isinstance(self.data, string_types):
raise TypeError("%s expects text, not %r" % (self.__class__.__name__, self.data))
class Pretty(TextDisplayObject):
def _repr_pretty_(self):
return self.data
class HTML(TextDisplayObject):
def _repr_html_(self):
return self.data
def __html__(self):
"""
This method exists to inform other HTML-using modules (e.g. Markupsafe,
htmltag, etc) that this object is HTML and does not need things like
special characters (<>&) escaped.
"""
return self._repr_html_()
class Math(TextDisplayObject):
def _repr_latex_(self):
s = self.data.strip('$')
return "$$%s$$" % s
class Latex(TextDisplayObject):
def _repr_latex_(self):
return self.data
class SVG(DisplayObject):
# wrap data in a property, which extracts the <svg> tag, discarding
# document headers
_data = None
@property
def data(self):
return self._data
@data.setter
def data(self, svg):
if svg is None:
self._data = None
return
# parse into dom object
from xml.dom import minidom
svg = cast_bytes_py2(svg)
x = minidom.parseString(svg)
# get svg tag (should be 1)
found_svg = x.getElementsByTagName('svg')
if found_svg:
svg = found_svg[0].toxml()
else:
# fallback on the input, trust the user
# but this is probably an error.
pass
svg = cast_unicode(svg)
self._data = svg
def _repr_svg_(self):
return self.data
class JSON(TextDisplayObject):
def _repr_json_(self):
return self.data
css_t = """$("head").append($("<link/>").attr({
rel: "stylesheet",
type: "text/css",
href: "%s"
}));
"""
lib_t1 = """$.getScript("%s", function () {
"""
lib_t2 = """});
"""
class Javascript(TextDisplayObject):
def __init__(self, data=None, url=None, filename=None, lib=None, css=None):
"""Create a Javascript display object given raw data.
When this object is returned by an expression or passed to the
display function, it will result in the data being displayed
in the frontend. If the data is a URL, the data will first be
downloaded and then displayed.
In the Notebook, the containing element will be available as `element`,
and jQuery will be available. The output area starts hidden, so if
the js appends content to `element` that should be visible, then
it must call `container.show()` to unhide the area.
Parameters
----------
data : unicode, str or bytes
The Javascript source code or a URL to download it from.
url : unicode
A URL to download the data from.
filename : unicode
Path to a local file to load the data from.
lib : list or str
A sequence of Javascript library URLs to load asynchronously before
running the source code. The full URLs of the libraries should
be given. A single Javascript library URL can also be given as a
string.
css: : list or str
A sequence of css files to load before running the source code.
The full URLs of the css files should be given. A single css URL
can also be given as a string.
"""
if isinstance(lib, string_types):
lib = [lib]
elif lib is None:
lib = []
if isinstance(css, string_types):
css = [css]
elif css is None:
css = []
if not isinstance(lib, (list,tuple)):
raise TypeError('expected sequence, got: %r' % lib)
if not isinstance(css, (list,tuple)):
raise TypeError('expected sequence, got: %r' % css)
self.lib = lib
self.css = css
super(Javascript, self).__init__(data=data, url=url, filename=filename)
def _repr_javascript_(self):
r = ''
for c in self.css:
r += css_t % c
for l in self.lib:
r += lib_t1 % l
r += self.data
r += lib_t2*len(self.lib)
return r
# constants for identifying png/jpeg data
_PNG = b'\x89PNG\r\n\x1a\n'
_JPEG = b'\xff\xd8'
def _pngxy(data):
"""read the (width, height) from a PNG header"""
ihdr = data.index(b'IHDR')
# next 8 bytes are width/height
w4h4 = data[ihdr+4:ihdr+12]
return struct.unpack('>ii', w4h4)
def _jpegxy(data):
"""read the (width, height) from a JPEG header"""
# adapted from http://www.64lines.com/jpeg-width-height
idx = 4
while True:
block_size = struct.unpack('>H', data[idx:idx+2])[0]
idx = idx + block_size
if data[idx:idx+2] == b'\xFF\xC0':
# found Start of Frame
iSOF = idx
break
else:
# read another block
idx += 2
h, w = struct.unpack('>HH', data[iSOF+5:iSOF+9])
return w, h
class Image(DisplayObject):
_read_flags = 'rb'
_FMT_JPEG = u'jpeg'
_FMT_PNG = u'png'
_ACCEPTABLE_EMBEDDINGS = [_FMT_JPEG, _FMT_PNG]
def __init__(self, data=None, url=None, filename=None, format=u'png', embed=None, width=None, height=None, retina=False):
"""Create a PNG/JPEG image object given raw data.
When this object is returned by an input cell or passed to the
display function, it will result in the image being displayed
in the frontend.
Parameters
----------
data : unicode, str or bytes
The raw image data or a URL or filename to load the data from.
This always results in embedded image data.
url : unicode
A URL to download the data from. If you specify `url=`,
the image data will not be embedded unless you also specify `embed=True`.
filename : unicode
Path to a local file to load the data from.
Images from a file are always embedded.
format : unicode
The format of the image data (png/jpeg/jpg). If a filename or URL is given
for format will be inferred from the filename extension.
embed : bool
Should the image data be embedded using a data URI (True) or be
loaded using an <img> tag. Set this to True if you want the image
to be viewable later with no internet connection in the notebook.
Default is `True`, unless the keyword argument `url` is set, then
default value is `False`.
Note that QtConsole is not able to display images if `embed` is set to `False`
width : int
Width to which to constrain the image in html
height : int
Height to which to constrain the image in html
retina : bool
Automatically set the width and height to half of the measured
width and height.
This only works for embedded images because it reads the width/height
from image data.
For non-embedded images, you can just set the desired display width
and height directly.
Examples
--------
# embedded image data, works in qtconsole and notebook
# when passed positionally, the first arg can be any of raw image data,
# a URL, or a filename from which to load image data.
# The result is always embedding image data for inline images.
Image('http://www.google.fr/images/srpr/logo3w.png')
Image('/path/to/image.jpg')
Image(b'RAW_PNG_DATA...')
# Specifying Image(url=...) does not embed the image data,
# it only generates `<img>` tag with a link to the source.
# This will not work in the qtconsole or offline.
Image(url='http://www.google.fr/images/srpr/logo3w.png')
"""
if filename is not None:
ext = self._find_ext(filename)
elif url is not None:
ext = self._find_ext(url)
elif data is None:
raise ValueError("No image data found. Expecting filename, url, or data.")
elif isinstance(data, string_types) and (
data.startswith('http') or _safe_exists(data)
):
ext = self._find_ext(data)
else:
ext = None
if ext is not None:
format = ext.lower()
if ext == u'jpg' or ext == u'jpeg':
format = self._FMT_JPEG
if ext == u'png':
format = self._FMT_PNG
elif isinstance(data, bytes) and format == 'png':
# infer image type from image data header,
# only if format might not have been specified.
if data[:2] == _JPEG:
format = 'jpeg'
self.format = unicode_type(format).lower()
self.embed = embed if embed is not None else (url is None)
if self.embed and self.format not in self._ACCEPTABLE_EMBEDDINGS:
raise ValueError("Cannot embed the '%s' image format" % (self.format))
self.width = width
self.height = height
self.retina = retina
super(Image, self).__init__(data=data, url=url, filename=filename)
if retina:
self._retina_shape()
def _retina_shape(self):
"""load pixel-doubled width and height from image data"""
if not self.embed:
return
if self.format == 'png':
w, h = _pngxy(self.data)
elif self.format == 'jpeg':
w, h = _jpegxy(self.data)
else:
# retina only supports png
return
self.width = w // 2
self.height = h // 2
def reload(self):
"""Reload the raw data from file or URL."""
if self.embed:
super(Image,self).reload()
if self.retina:
self._retina_shape()
def _repr_html_(self):
if not self.embed:
width = height = ''
if self.width:
width = ' width="%d"' % self.width
if self.height:
height = ' height="%d"' % self.height
return u'<img src="%s"%s%s/>' % (self.url, width, height)
def _data_and_metadata(self):
"""shortcut for returning metadata with shape information, if defined"""
md = {}
if self.width:
md['width'] = self.width
if self.height:
md['height'] = self.height
if md:
return self.data, md
else:
return self.data
def _repr_png_(self):
if self.embed and self.format == u'png':
return self._data_and_metadata()
def _repr_jpeg_(self):
if self.embed and (self.format == u'jpeg' or self.format == u'jpg'):
return self._data_and_metadata()
def _find_ext(self, s):
return unicode_type(s.split('.')[-1].lower())
def clear_output(wait=False):
"""Clear the output of the current cell receiving output.
Parameters
----------
wait : bool [default: false]
Wait to clear the output until new output is available to replace it."""
from IPython.core.interactiveshell import InteractiveShell
if InteractiveShell.initialized():
InteractiveShell.instance().display_pub.clear_output(wait)
else:
from IPython.utils import io
print('\033[2K\r', file=io.stdout, end='')
io.stdout.flush()
print('\033[2K\r', file=io.stderr, end='')
io.stderr.flush()
@skip_doctest
def set_matplotlib_formats(*formats, **kwargs):
"""Select figure formats for the inline backend. Optionally pass quality for JPEG.
For example, this enables PNG and JPEG output with a JPEG quality of 90%::
In [1]: set_matplotlib_formats('png', 'jpeg', quality=90)
To set this in your config files use the following::
c.InlineBackend.figure_formats = {'png', 'jpeg'}
c.InlineBackend.print_figure_kwargs.update({'quality' : 90})
Parameters
----------
*formats : strs
One or more figure formats to enable: 'png', 'retina', 'jpeg', 'svg', 'pdf'.
**kwargs :
Keyword args will be relayed to ``figure.canvas.print_figure``.
"""
from IPython.core.interactiveshell import InteractiveShell
from IPython.core.pylabtools import select_figure_formats
from IPython.kernel.zmq.pylab.config import InlineBackend
# build kwargs, starting with InlineBackend config
kw = {}
cfg = InlineBackend.instance()
kw.update(cfg.print_figure_kwargs)
kw.update(**kwargs)
shell = InteractiveShell.instance()
select_figure_formats(shell, formats, **kw)
@skip_doctest
def set_matplotlib_close(close=True):
"""Set whether the inline backend closes all figures automatically or not.
By default, the inline backend used in the IPython Notebook will close all
matplotlib figures automatically after each cell is run. This means that
plots in different cells won't interfere. Sometimes, you may want to make
a plot in one cell and then refine it in later cells. This can be accomplished
by::
In [1]: set_matplotlib_close(False)
To set this in your config files use the following::
c.InlineBackend.close_figures = False
Parameters
----------
close : bool
Should all matplotlib figures be automatically closed after each cell is
run?
"""
from IPython.kernel.zmq.pylab.config import InlineBackend
cfg = InlineBackend.instance()
cfg.close_figures = close
| gpl-3.0 |
tobilg/dcos-commons | tools/github_update.py | 2 | 8749 | #!/usr/bin/python
# Env Vars
# GITHUB_TOKEN: github auth token
# GIT_COMMIT | ghprbActualCommit | sha1 (optional)
# GITHUB_DISABLE (optional): if non-empty, this script performs no action
# GITHUB_REPOSITORY_ROOT (optional): directory in which to look for .git (unused if GIT_COMMIT is set)
# GITHUB_REPO_PATH (optional): path to repo to update (e.g. mesosphere/spark)
import base64
import json
import logging
import os
import os.path
import pprint
import re
import sys
import subprocess
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG, format="%(message)s")
try:
from http.client import HTTPSConnection
except ImportError:
# Python 2
from httplib import HTTPSConnection
class GithubStatusUpdater(object):
def __init__(self, context_label):
self._context_label = context_label
def _get_dotgit_path(self):
'''returns the path to the .git directory for the repo'''
gitdir = '.git'
startdir = os.environ.get('GIT_REPOSITORY_ROOT', '')
if not startdir:
startdir = os.getcwd()
# starting at 'startdir' search up the tree for a directory named '.git':
checkdir = os.path.join(startdir, gitdir)
while not checkdir == '/' + gitdir:
if os.path.isdir(checkdir):
return checkdir
checkdir = os.path.join(os.path.dirname(os.path.dirname(checkdir)), gitdir)
raise Exception('Unable to find {} in any parent of {}. '.format(gitdir, startdir) +
'Run this command within the git repo, or provide GIT_REPOSITORY_ROOT')
def _get_commit_sha(self):
'''returns the sha1 of the commit being reported upon'''
# 1. try 'ghprbActualCommit', 'GIT_COMMIT', and 'sha1' envvars:
commit_sha = os.environ.get('ghprbActualCommit', '')
if not commit_sha:
commit_sha = os.environ.get('GIT_COMMIT', '')
if not commit_sha:
commit_sha = os.environ.get('sha1', '')
if not commit_sha and 'GIT_COMMIT_ENV_NAME' in os.environ:
# 2. grab the commit from the specified custom envvar
commit_sha = os.environ.get(os.environ['GIT_COMMIT_ENV_NAME'], '')
if not commit_sha:
raise Exception('Unable to retrieve git commit id from envvar named "{}". Env is: {}'.format(
os.environ['GIT_COMMIT_ENV_NAME'], os.environ))
if not commit_sha:
# 3. fall back to using current commit according to .git/ (note: not present in teamcity)
dotgit_path = self._get_dotgit_path()
ret = subprocess.Popen(['git', '--git-dir={}'.format(dotgit_path), 'rev-parse', 'HEAD'],
stdout=subprocess.PIPE)
commit_sha = ret.stdout.readline().decode('utf-8').strip()
if not commit_sha:
raise Exception('Failed to retrieve current revision from git: {}'.format(dotgit_path))
return commit_sha
def _get_repo_path(self):
'''returns the repository path, in the form "mesosphere/some-repo"'''
repo_path = os.environ.get('GITHUB_REPO_PATH', '')
if repo_path:
return repo_path
dotgit_path = self._get_dotgit_path()
ret = subprocess.Popen(['git', '--git-dir={}'.format(dotgit_path), 'config', 'remote.origin.url'],
stdout=subprocess.PIPE)
full_url = ret.stdout.readline().decode('utf-8').strip()
# expected url formats:
# 'https://github.com/mesosphere/foo'
# 'git@github.com:/mesosphere/foo.git'
# 'git@github.com:/mesosphere/foo'
# 'git@github.com/mesosphere/foo.git
# 'git@github.com/mesosphere/foo'
# all should result in 'mesosphere/foo'
re_match = re.search('([a-zA-Z0-9-]+/[a-zA-Z0-9-]+)(\\.git)?$', full_url)
if not re_match:
raise Exception('Failed to get remote url from git path {}: no match in {}'.format(
dotgit_path, full_url))
return re_match.group(1)
def _get_details_link_url(self, details_url):
'''returns the url to be included as the details link in the status'''
if not details_url:
details_url = os.environ.get('GITHUB_COMMIT_STATUS_URL', '') # custom URL via env
if not details_url:
details_url = os.environ.get('BUILD_URL', '') # provided by jenkins
if details_url:
details_url += 'console'
if not details_url:
raise Exception(
'Failed to determine URL for details link. ' +
'Provide either GITHUB_COMMIT_STATUS_URL or BUILD_URL in env.')
return details_url
def _get_auth_token(self):
github_token = os.environ.get('GITHUB_TOKEN_REPO_STATUS', '')
if not github_token:
github_token = os.environ.get('GITHUB_TOKEN', '')
if not github_token:
raise Exception(
'Failed to determine auth token to use with GitHub. ' +
'Provide either GITHUB_TOKEN or GITHUB_TOKEN_REPO_STATUS in env.')
encoded_tok = base64.encodestring(github_token.encode('utf-8'))
return encoded_tok.decode('utf-8').rstrip('\n')
def _build_request(self, state, message, details_url = ''):
'''returns everything needed for the HTTP request, except the auth token'''
return {
'method': 'POST',
'path': '/repos/{}/commits/{}/statuses'.format(
self._get_repo_path(),
self._get_commit_sha()),
'headers': {
'User-Agent': 'github_update.py',
'Content-Type': 'application/json',
'Authorization': 'Basic HIDDENTOKEN'}, # replaced within update_query
'payload': {
'context': self._context_label,
'state': state,
'description': message,
'target_url': self._get_details_link_url(details_url)
}
}
def _send_request(self, request, debug = False):
'''sends the provided request which was created by _build_request()'''
request_headers_with_auth = request['headers'].copy()
request_headers_with_auth['Authorization'] = 'Basic {}'.format(self._get_auth_token())
conn = HTTPSConnection('api.github.com')
if debug:
conn.set_debuglevel(999)
conn.request(
request['method'],
request['path'],
body = json.dumps(request['payload']).encode('utf-8'),
headers = request_headers_with_auth)
return conn.getresponse()
def update(self, state, message, details_url = ''):
'''sends an update to github.
returns True on success or False otherwise.
state should be one of 'pending', 'success', 'error', or 'failure'.'''
logger.info('[STATUS] {} {}: {}'.format(self._context_label, state, message))
if details_url:
logger.info('[STATUS] URL: {}'.format(details_url))
if not 'WORKSPACE' in os.environ:
# not running in CI. skip actually sending anything to GitHub
return True
if os.environ.get('GITHUB_DISABLE', ''):
# environment has notifications disabled. skip actually sending anything to GitHub
return True
if not (os.environ.get('GITHUB_COMMIT_STATUS_URL') or os.environ.get('BUILD_URL')):
# CI job did not come from GITHUB
return True
request = self._build_request(state, message, details_url)
response = self._send_request(request)
if response.status < 200 or response.status >= 300:
# log failure, but don't abort the build
logger.error('Got {} response to update request:'.format(response.status))
logger.error('Request:')
logger.error(pprint.pformat(request))
logger.error('Response:')
logger.error(pprint.pformat(response.read()))
return
logger.info('Updated GitHub PR with status: {}'.format(request['path']))
def print_help(argv):
logger.info('Syntax: {} <state: pending|success|error|failure> <context_label> <status message>'.format(argv[0]))
def main(argv):
if len(argv) < 4:
print_help(argv)
return 1
state = argv[1]
if state != 'pending' \
and state != 'success' \
and state != 'error' \
and state != 'failure':
print_help(argv)
return 1
context_label = argv[2]
message = ' '.join(argv[3:])
GithubStatusUpdater(context_label).update(state, message)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| apache-2.0 |
arvinsingla/CouchPotatoServer | libs/guessit/matcher.py | 94 | 7768 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2012 Nicolas Wack <wackou@gmail.com>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
from guessit import PY3, u, base_text_type
from guessit.matchtree import MatchTree
from guessit.textutils import normalize_unicode, clean_string
import logging
log = logging.getLogger(__name__)
class IterativeMatcher(object):
def __init__(self, filename, filetype='autodetect', opts=None, transfo_opts=None):
"""An iterative matcher tries to match different patterns that appear
in the filename.
The 'filetype' argument indicates which type of file you want to match.
If it is 'autodetect', the matcher will try to see whether it can guess
that the file corresponds to an episode, or otherwise will assume it is
a movie.
The recognized 'filetype' values are:
[ autodetect, subtitle, info, movie, moviesubtitle, movieinfo, episode,
episodesubtitle, episodeinfo ]
The IterativeMatcher works mainly in 2 steps:
First, it splits the filename into a match_tree, which is a tree of groups
which have a semantic meaning, such as episode number, movie title,
etc...
The match_tree created looks like the following:
0000000000000000000000000000000000000000000000000000000000000000000000000000000000 111
0000011111111111112222222222222233333333444444444444444455555555666777777778888888 000
0000000000000000000000000000000001111112011112222333333401123334000011233340000000 000
__________________(The.Prestige).______.[____.HP.______.{__-___}.St{__-___}.Chaps].___
xxxxxttttttttttttt ffffff vvvv xxxxxx ll lll xx xxx ccc
[XCT].Le.Prestige.(The.Prestige).DVDRip.[x264.HP.He-Aac.{Fr-Eng}.St{Fr-Eng}.Chaps].mkv
The first 3 lines indicates the group index in which a char in the
filename is located. So for instance, x264 is the group (0, 4, 1), and
it corresponds to a video codec, denoted by the letter'v' in the 4th line.
(for more info, see guess.matchtree.to_string)
Second, it tries to merge all this information into a single object
containing all the found properties, and does some (basic) conflict
resolution when they arise.
When you create the Matcher, you can pass it:
- a list 'opts' of option names, that act as global flags
- a dict 'transfo_opts' of { transfo_name: (transfo_args, transfo_kwargs) }
with which to call the transfo.process() function.
"""
valid_filetypes = ('autodetect', 'subtitle', 'info', 'video',
'movie', 'moviesubtitle', 'movieinfo',
'episode', 'episodesubtitle', 'episodeinfo')
if filetype not in valid_filetypes:
raise ValueError("filetype needs to be one of %s" % valid_filetypes)
if not PY3 and not isinstance(filename, unicode):
log.warning('Given filename to matcher is not unicode...')
filename = filename.decode('utf-8')
filename = normalize_unicode(filename)
if opts is None:
opts = []
if not isinstance(opts, list):
raise ValueError('opts must be a list of option names! Received: type=%s val=%s',
type(opts), opts)
if transfo_opts is None:
transfo_opts = {}
if not isinstance(transfo_opts, dict):
raise ValueError('transfo_opts must be a dict of { transfo_name: (args, kwargs) }. '+
'Received: type=%s val=%s', type(transfo_opts), transfo_opts)
self.match_tree = MatchTree(filename)
# sanity check: make sure we don't process a (mostly) empty string
if clean_string(filename) == '':
return
mtree = self.match_tree
mtree.guess.set('type', filetype, confidence=1.0)
def apply_transfo(transfo_name, *args, **kwargs):
transfo = __import__('guessit.transfo.' + transfo_name,
globals=globals(), locals=locals(),
fromlist=['process'], level=0)
default_args, default_kwargs = transfo_opts.get(transfo_name, ((), {}))
all_args = args or default_args
all_kwargs = dict(default_kwargs)
all_kwargs.update(kwargs) # keep all kwargs merged together
transfo.process(mtree, *all_args, **all_kwargs)
# 1- first split our path into dirs + basename + ext
apply_transfo('split_path_components')
# 2- guess the file type now (will be useful later)
apply_transfo('guess_filetype', filetype)
if mtree.guess['type'] == 'unknown':
return
# 3- split each of those into explicit groups (separated by parentheses
# or square brackets)
apply_transfo('split_explicit_groups')
# 4- try to match information for specific patterns
# NOTE: order needs to comply to the following:
# - website before language (eg: tvu.org.ru vs russian)
# - language before episodes_rexps
# - properties before language (eg: he-aac vs hebrew)
# - release_group before properties (eg: XviD-?? vs xvid)
if mtree.guess['type'] in ('episode', 'episodesubtitle', 'episodeinfo'):
strategy = [ 'guess_date', 'guess_website', 'guess_release_group',
'guess_properties', 'guess_language',
'guess_video_rexps',
'guess_episodes_rexps', 'guess_weak_episodes_rexps' ]
else:
strategy = [ 'guess_date', 'guess_website', 'guess_release_group',
'guess_properties', 'guess_language',
'guess_video_rexps' ]
if 'nolanguage' in opts:
strategy.remove('guess_language')
for name in strategy:
apply_transfo(name)
# more guessers for both movies and episodes
apply_transfo('guess_bonus_features')
apply_transfo('guess_year', skip_first_year=('skip_first_year' in opts))
if 'nocountry' not in opts:
apply_transfo('guess_country')
apply_transfo('guess_idnumber')
# split into '-' separated subgroups (with required separator chars
# around the dash)
apply_transfo('split_on_dash')
# 5- try to identify the remaining unknown groups by looking at their
# position relative to other known elements
if mtree.guess['type'] in ('episode', 'episodesubtitle', 'episodeinfo'):
apply_transfo('guess_episode_info_from_position')
else:
apply_transfo('guess_movie_title_from_position')
# 6- perform some post-processing steps
apply_transfo('post_process')
log.debug('Found match tree:\n%s' % u(mtree))
def matched(self):
return self.match_tree.matched()
| gpl-3.0 |
minhphung171093/OpenERP_V7 | openerp/tools/misc.py | 3 | 38733 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2013 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#.apidoc title: Utilities: tools.misc
"""
Miscellaneous tools used by OpenERP.
"""
from functools import wraps
import cProfile
import subprocess
import logging
import os
import socket
import sys
import threading
import time
import zipfile
from collections import defaultdict
from datetime import datetime
from itertools import islice, izip, groupby
from lxml import etree
from which import which
from threading import local
try:
from html2text import html2text
except ImportError:
html2text = None
from config import config
from cache import *
# get_encodings, ustr and exception_to_unicode were originally from tools.misc.
# There are moved to loglevels until we refactor tools.
from openerp.loglevels import get_encodings, ustr, exception_to_unicode
_logger = logging.getLogger(__name__)
# List of etree._Element subclasses that we choose to ignore when parsing XML.
# We include the *Base ones just in case, currently they seem to be subclasses of the _* ones.
SKIPPED_ELEMENT_TYPES = (etree._Comment, etree._ProcessingInstruction, etree.CommentBase, etree.PIBase)
def find_in_path(name):
try:
return which(name)
except IOError:
return None
def find_pg_tool(name):
path = None
if config['pg_path'] and config['pg_path'] != 'None':
path = config['pg_path']
try:
return which(name, path=path)
except IOError:
return None
def exec_pg_command(name, *args):
prog = find_pg_tool(name)
if not prog:
raise Exception('Couldn\'t find %s' % name)
args2 = (prog,) + args
return subprocess.call(args2)
def exec_pg_command_pipe(name, *args):
prog = find_pg_tool(name)
if not prog:
raise Exception('Couldn\'t find %s' % name)
# on win32, passing close_fds=True is not compatible
# with redirecting std[in/err/out]
pop = subprocess.Popen((prog,) + args, bufsize= -1,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=(os.name=="posix"))
return pop.stdin, pop.stdout
def exec_command_pipe(name, *args):
prog = find_in_path(name)
if not prog:
raise Exception('Couldn\'t find %s' % name)
# on win32, passing close_fds=True is not compatible
# with redirecting std[in/err/out]
pop = subprocess.Popen((prog,) + args, bufsize= -1,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=(os.name=="posix"))
return pop.stdin, pop.stdout
#----------------------------------------------------------
# File paths
#----------------------------------------------------------
#file_path_root = os.getcwd()
#file_path_addons = os.path.join(file_path_root, 'addons')
def file_open(name, mode="r", subdir='addons', pathinfo=False):
"""Open a file from the OpenERP root, using a subdir folder.
Example::
>>> file_open('hr/report/timesheer.xsl')
>>> file_open('addons/hr/report/timesheet.xsl')
>>> file_open('../../base/report/rml_template.xsl', subdir='addons/hr/report', pathinfo=True)
@param name name of the file
@param mode file open mode
@param subdir subdirectory
@param pathinfo if True returns tuple (fileobject, filepath)
@return fileobject if pathinfo is False else (fileobject, filepath)
"""
import openerp.modules as addons
adps = addons.module.ad_paths
rtp = os.path.normcase(os.path.abspath(config['root_path']))
basename = name
if os.path.isabs(name):
# It is an absolute path
# Is it below 'addons_path' or 'root_path'?
name = os.path.normcase(os.path.normpath(name))
for root in adps + [rtp]:
root = os.path.normcase(os.path.normpath(root)) + os.sep
if name.startswith(root):
base = root.rstrip(os.sep)
name = name[len(base) + 1:]
break
else:
# It is outside the OpenERP root: skip zipfile lookup.
base, name = os.path.split(name)
return _fileopen(name, mode=mode, basedir=base, pathinfo=pathinfo, basename=basename)
if name.replace(os.sep, '/').startswith('addons/'):
subdir = 'addons'
name2 = name[7:]
elif subdir:
name = os.path.join(subdir, name)
if name.replace(os.sep, '/').startswith('addons/'):
subdir = 'addons'
name2 = name[7:]
else:
name2 = name
# First, try to locate in addons_path
if subdir:
for adp in adps:
try:
return _fileopen(name2, mode=mode, basedir=adp,
pathinfo=pathinfo, basename=basename)
except IOError:
pass
# Second, try to locate in root_path
return _fileopen(name, mode=mode, basedir=rtp, pathinfo=pathinfo, basename=basename)
def _fileopen(path, mode, basedir, pathinfo, basename=None):
name = os.path.normpath(os.path.join(basedir, path))
if basename is None:
basename = name
# Give higher priority to module directories, which is
# a more common case than zipped modules.
if os.path.isfile(name):
fo = open(name, mode)
if pathinfo:
return fo, name
return fo
# Support for loading modules in zipped form.
# This will not work for zipped modules that are sitting
# outside of known addons paths.
head = os.path.normpath(path)
zipname = False
while os.sep in head:
head, tail = os.path.split(head)
if not tail:
break
if zipname:
zipname = os.path.join(tail, zipname)
else:
zipname = tail
zpath = os.path.join(basedir, head + '.zip')
if zipfile.is_zipfile(zpath):
from cStringIO import StringIO
zfile = zipfile.ZipFile(zpath)
try:
fo = StringIO()
fo.write(zfile.read(os.path.join(
os.path.basename(head), zipname).replace(
os.sep, '/')))
fo.seek(0)
if pathinfo:
return fo, name
return fo
except Exception:
pass
# Not found
if name.endswith('.rml'):
raise IOError('Report %r doesn\'t exist or deleted' % basename)
raise IOError('File not found: %s' % basename)
#----------------------------------------------------------
# iterables
#----------------------------------------------------------
def flatten(list):
"""Flatten a list of elements into a uniqu list
Author: Christophe Simonis (christophe@tinyerp.com)
Examples::
>>> flatten(['a'])
['a']
>>> flatten('b')
['b']
>>> flatten( [] )
[]
>>> flatten( [[], [[]]] )
[]
>>> flatten( [[['a','b'], 'c'], 'd', ['e', [], 'f']] )
['a', 'b', 'c', 'd', 'e', 'f']
>>> t = (1,2,(3,), [4, 5, [6, [7], (8, 9), ([10, 11, (12, 13)]), [14, [], (15,)], []]])
>>> flatten(t)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
"""
def isiterable(x):
return hasattr(x, "__iter__")
r = []
for e in list:
if isiterable(e):
map(r.append, flatten(e))
else:
r.append(e)
return r
def reverse_enumerate(l):
"""Like enumerate but in the other sens
Usage::
>>> a = ['a', 'b', 'c']
>>> it = reverse_enumerate(a)
>>> it.next()
(2, 'c')
>>> it.next()
(1, 'b')
>>> it.next()
(0, 'a')
>>> it.next()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
StopIteration
"""
return izip(xrange(len(l)-1, -1, -1), reversed(l))
#----------------------------------------------------------
# SMS
#----------------------------------------------------------
# text must be latin-1 encoded
def sms_send(user, password, api_id, text, to):
import urllib
url = "http://api.urlsms.com/SendSMS.aspx"
#url = "http://196.7.150.220/http/sendmsg"
params = urllib.urlencode({'UserID': user, 'Password': password, 'SenderID': api_id, 'MsgText': text, 'RecipientMobileNo':to})
urllib.urlopen(url+"?"+params)
# FIXME: Use the logger if there is an error
return True
class UpdateableStr(local):
""" Class that stores an updateable string (used in wizards)
"""
def __init__(self, string=''):
self.string = string
def __str__(self):
return str(self.string)
def __repr__(self):
return str(self.string)
def __nonzero__(self):
return bool(self.string)
class UpdateableDict(local):
"""Stores an updateable dict to use in wizards
"""
def __init__(self, dict=None):
if dict is None:
dict = {}
self.dict = dict
def __str__(self):
return str(self.dict)
def __repr__(self):
return str(self.dict)
def clear(self):
return self.dict.clear()
def keys(self):
return self.dict.keys()
def __setitem__(self, i, y):
self.dict.__setitem__(i, y)
def __getitem__(self, i):
return self.dict.__getitem__(i)
def copy(self):
return self.dict.copy()
def iteritems(self):
return self.dict.iteritems()
def iterkeys(self):
return self.dict.iterkeys()
def itervalues(self):
return self.dict.itervalues()
def pop(self, k, d=None):
return self.dict.pop(k, d)
def popitem(self):
return self.dict.popitem()
def setdefault(self, k, d=None):
return self.dict.setdefault(k, d)
def update(self, E, **F):
return self.dict.update(E, F)
def values(self):
return self.dict.values()
def get(self, k, d=None):
return self.dict.get(k, d)
def has_key(self, k):
return self.dict.has_key(k)
def items(self):
return self.dict.items()
def __cmp__(self, y):
return self.dict.__cmp__(y)
def __contains__(self, k):
return self.dict.__contains__(k)
def __delitem__(self, y):
return self.dict.__delitem__(y)
def __eq__(self, y):
return self.dict.__eq__(y)
def __ge__(self, y):
return self.dict.__ge__(y)
def __gt__(self, y):
return self.dict.__gt__(y)
def __hash__(self):
return self.dict.__hash__()
def __iter__(self):
return self.dict.__iter__()
def __le__(self, y):
return self.dict.__le__(y)
def __len__(self):
return self.dict.__len__()
def __lt__(self, y):
return self.dict.__lt__(y)
def __ne__(self, y):
return self.dict.__ne__(y)
class currency(float):
""" Deprecate
.. warning::
Don't use ! Use res.currency.round()
"""
def __init__(self, value, accuracy=2, rounding=None):
if rounding is None:
rounding=10**-accuracy
self.rounding=rounding
self.accuracy=accuracy
def __new__(cls, value, accuracy=2, rounding=None):
return float.__new__(cls, round(value, accuracy))
#def __str__(self):
# display_value = int(self*(10**(-self.accuracy))/self.rounding)*self.rounding/(10**(-self.accuracy))
# return str(display_value)
def to_xml(s):
return s.replace('&','&').replace('<','<').replace('>','>')
def get_iso_codes(lang):
if lang.find('_') != -1:
if lang.split('_')[0] == lang.split('_')[1].lower():
lang = lang.split('_')[0]
return lang
ALL_LANGUAGES = {
'am_ET': u'Amharic / አምሃርኛ',
'ar_SY': u'Arabic / الْعَرَبيّة',
'bg_BG': u'Bulgarian / български език',
'bs_BA': u'Bosnian / bosanski jezik',
'ca_ES': u'Catalan / Català',
'cs_CZ': u'Czech / Čeština',
'da_DK': u'Danish / Dansk',
'de_DE': u'German / Deutsch',
'el_GR': u'Greek / Ελληνικά',
'en_AU': u'English (AU)',
'en_GB': u'English (UK)',
'en_US': u'English (US)',
'es_AR': u'Spanish (AR) / Español (AR)',
'es_BO': u'Spanish (BO) / Español (BO)',
'es_CL': u'Spanish (CL) / Español (CL)',
'es_CO': u'Spanish (CO) / Español (CO)',
'es_CR': u'Spanish (CR) / Español (CR)',
'es_DO': u'Spanish (DO) / Español (DO)',
'es_EC': u'Spanish (EC) / Español (EC)',
'es_ES': u'Spanish / Español',
'es_GT': u'Spanish (GT) / Español (GT)',
'es_MX': u'Spanish (MX) / Español (MX)',
'es_PA': u'Spanish (PA) / Español (PA)',
'es_PE': u'Spanish (PE) / Español (PE)',
'es_PY': u'Spanish (PY) / Español (PY)',
'es_UY': u'Spanish (UY) / Español (UY)',
'es_VE': u'Spanish (VE) / Español (VE)',
'et_EE': u'Estonian / Eesti keel',
'fa_IR': u'Persian / فارس',
'fi_FI': u'Finnish / Suomi',
'fr_BE': u'French (BE) / Français (BE)',
'fr_CH': u'French (CH) / Français (CH)',
'fr_CA': u'French (CA) / Français (CA)',
'fr_FR': u'French / Français',
'gl_ES': u'Galician / Galego',
'gu_IN': u'Gujarati / ગુજરાતી',
'he_IL': u'Hebrew / עִבְרִי',
'hi_IN': u'Hindi / हिंदी',
'hr_HR': u'Croatian / hrvatski jezik',
'hu_HU': u'Hungarian / Magyar',
'id_ID': u'Indonesian / Bahasa Indonesia',
'it_IT': u'Italian / Italiano',
'ja_JP': u'Japanese / 日本語',
'ko_KP': u'Korean (KP) / 한국어 (KP)',
'ko_KR': u'Korean (KR) / 한국어 (KR)',
'lo_LA': u'Lao / ພາສາລາວ',
'lt_LT': u'Lithuanian / Lietuvių kalba',
'lv_LV': u'Latvian / latviešu valoda',
'mk_MK': u'Macedonian / македонски јазик',
'mn_MN': u'Mongolian / монгол',
'nb_NO': u'Norwegian Bokmål / Norsk bokmål',
'nl_NL': u'Dutch / Nederlands',
'nl_BE': u'Flemish (BE) / Vlaams (BE)',
'pl_PL': u'Polish / Język polski',
'pt_BR': u'Portuguese (BR) / Português (BR)',
'pt_PT': u'Portuguese / Português',
'ro_RO': u'Romanian / română',
'ru_RU': u'Russian / русский язык',
'sl_SI': u'Slovenian / slovenščina',
'sk_SK': u'Slovak / Slovenský jazyk',
'sq_AL': u'Albanian / Shqip',
'sr_RS': u'Serbian (Cyrillic) / српски',
'sr@latin': u'Serbian (Latin) / srpski',
'sv_SE': u'Swedish / svenska',
'te_IN': u'Telugu / తెలుగు',
'tr_TR': u'Turkish / Türkçe',
'vi_VN': u'Vietnamese / Tiếng Việt',
'uk_UA': u'Ukrainian / українська',
'zh_CN': u'Chinese (CN) / 简体中文',
'zh_HK': u'Chinese (HK)',
'zh_TW': u'Chinese (TW) / 正體字',
'th_TH': u'Thai / ภาษาไทย',
}
def scan_languages():
""" Returns all languages supported by OpenERP for translation
:returns: a list of (lang_code, lang_name) pairs
:rtype: [(str, unicode)]
"""
return sorted(ALL_LANGUAGES.iteritems(), key=lambda k: k[1])
def get_user_companies(cr, user):
def _get_company_children(cr, ids):
if not ids:
return []
cr.execute('SELECT id FROM res_company WHERE parent_id IN %s', (tuple(ids),))
res = [x[0] for x in cr.fetchall()]
res.extend(_get_company_children(cr, res))
return res
cr.execute('SELECT company_id FROM res_users WHERE id=%s', (user,))
user_comp = cr.fetchone()[0]
if not user_comp:
return []
return [user_comp] + _get_company_children(cr, [user_comp])
def mod10r(number):
"""
Input number : account or invoice number
Output return: the same number completed with the recursive mod10
key
"""
codec=[0,9,4,6,8,2,7,1,3,5]
report = 0
result=""
for digit in number:
result += digit
if digit.isdigit():
report = codec[ (int(digit) + report) % 10 ]
return result + str((10 - report) % 10)
def human_size(sz):
"""
Return the size in a human readable format
"""
if not sz:
return False
units = ('bytes', 'Kb', 'Mb', 'Gb')
if isinstance(sz,basestring):
sz=len(sz)
s, i = float(sz), 0
while s >= 1024 and i < len(units)-1:
s /= 1024
i += 1
return "%0.2f %s" % (s, units[i])
def logged(f):
@wraps(f)
def wrapper(*args, **kwargs):
from pprint import pformat
vector = ['Call -> function: %r' % f]
for i, arg in enumerate(args):
vector.append(' arg %02d: %s' % (i, pformat(arg)))
for key, value in kwargs.items():
vector.append(' kwarg %10s: %s' % (key, pformat(value)))
timeb4 = time.time()
res = f(*args, **kwargs)
vector.append(' result: %s' % pformat(res))
vector.append(' time delta: %s' % (time.time() - timeb4))
_logger.debug('\n'.join(vector))
return res
return wrapper
class profile(object):
def __init__(self, fname=None):
self.fname = fname
def __call__(self, f):
@wraps(f)
def wrapper(*args, **kwargs):
profile = cProfile.Profile()
result = profile.runcall(f, *args, **kwargs)
profile.dump_stats(self.fname or ("%s.cprof" % (f.func_name,)))
return result
return wrapper
__icons_list = ['STOCK_ABOUT', 'STOCK_ADD', 'STOCK_APPLY', 'STOCK_BOLD',
'STOCK_CANCEL', 'STOCK_CDROM', 'STOCK_CLEAR', 'STOCK_CLOSE', 'STOCK_COLOR_PICKER',
'STOCK_CONNECT', 'STOCK_CONVERT', 'STOCK_COPY', 'STOCK_CUT', 'STOCK_DELETE',
'STOCK_DIALOG_AUTHENTICATION', 'STOCK_DIALOG_ERROR', 'STOCK_DIALOG_INFO',
'STOCK_DIALOG_QUESTION', 'STOCK_DIALOG_WARNING', 'STOCK_DIRECTORY', 'STOCK_DISCONNECT',
'STOCK_DND', 'STOCK_DND_MULTIPLE', 'STOCK_EDIT', 'STOCK_EXECUTE', 'STOCK_FILE',
'STOCK_FIND', 'STOCK_FIND_AND_REPLACE', 'STOCK_FLOPPY', 'STOCK_GOTO_BOTTOM',
'STOCK_GOTO_FIRST', 'STOCK_GOTO_LAST', 'STOCK_GOTO_TOP', 'STOCK_GO_BACK',
'STOCK_GO_DOWN', 'STOCK_GO_FORWARD', 'STOCK_GO_UP', 'STOCK_HARDDISK',
'STOCK_HELP', 'STOCK_HOME', 'STOCK_INDENT', 'STOCK_INDEX', 'STOCK_ITALIC',
'STOCK_JUMP_TO', 'STOCK_JUSTIFY_CENTER', 'STOCK_JUSTIFY_FILL',
'STOCK_JUSTIFY_LEFT', 'STOCK_JUSTIFY_RIGHT', 'STOCK_MEDIA_FORWARD',
'STOCK_MEDIA_NEXT', 'STOCK_MEDIA_PAUSE', 'STOCK_MEDIA_PLAY',
'STOCK_MEDIA_PREVIOUS', 'STOCK_MEDIA_RECORD', 'STOCK_MEDIA_REWIND',
'STOCK_MEDIA_STOP', 'STOCK_MISSING_IMAGE', 'STOCK_NETWORK', 'STOCK_NEW',
'STOCK_NO', 'STOCK_OK', 'STOCK_OPEN', 'STOCK_PASTE', 'STOCK_PREFERENCES',
'STOCK_PRINT', 'STOCK_PRINT_PREVIEW', 'STOCK_PROPERTIES', 'STOCK_QUIT',
'STOCK_REDO', 'STOCK_REFRESH', 'STOCK_REMOVE', 'STOCK_REVERT_TO_SAVED',
'STOCK_SAVE', 'STOCK_SAVE_AS', 'STOCK_SELECT_COLOR', 'STOCK_SELECT_FONT',
'STOCK_SORT_ASCENDING', 'STOCK_SORT_DESCENDING', 'STOCK_SPELL_CHECK',
'STOCK_STOP', 'STOCK_STRIKETHROUGH', 'STOCK_UNDELETE', 'STOCK_UNDERLINE',
'STOCK_UNDO', 'STOCK_UNINDENT', 'STOCK_YES', 'STOCK_ZOOM_100',
'STOCK_ZOOM_FIT', 'STOCK_ZOOM_IN', 'STOCK_ZOOM_OUT',
'terp-account', 'terp-crm', 'terp-mrp', 'terp-product', 'terp-purchase',
'terp-sale', 'terp-tools', 'terp-administration', 'terp-hr', 'terp-partner',
'terp-project', 'terp-report', 'terp-stock', 'terp-calendar', 'terp-graph',
'terp-check','terp-go-month','terp-go-year','terp-go-today','terp-document-new','terp-camera_test',
'terp-emblem-important','terp-gtk-media-pause','terp-gtk-stop','terp-gnome-cpu-frequency-applet+',
'terp-dialog-close','terp-gtk-jump-to-rtl','terp-gtk-jump-to-ltr','terp-accessories-archiver',
'terp-stock_align_left_24','terp-stock_effects-object-colorize','terp-go-home','terp-gtk-go-back-rtl',
'terp-gtk-go-back-ltr','terp-personal','terp-personal-','terp-personal+','terp-accessories-archiver-minus',
'terp-accessories-archiver+','terp-stock_symbol-selection','terp-call-start','terp-dolar',
'terp-face-plain','terp-folder-blue','terp-folder-green','terp-folder-orange','terp-folder-yellow',
'terp-gdu-smart-failing','terp-go-week','terp-gtk-select-all','terp-locked','terp-mail-forward',
'terp-mail-message-new','terp-mail-replied','terp-rating-rated','terp-stage','terp-stock_format-scientific',
'terp-dolar_ok!','terp-idea','terp-stock_format-default','terp-mail-','terp-mail_delete'
]
def icons(*a, **kw):
global __icons_list
return [(x, x) for x in __icons_list ]
def extract_zip_file(zip_file, outdirectory):
zf = zipfile.ZipFile(zip_file, 'r')
out = outdirectory
for path in zf.namelist():
tgt = os.path.join(out, path)
tgtdir = os.path.dirname(tgt)
if not os.path.exists(tgtdir):
os.makedirs(tgtdir)
if not tgt.endswith(os.sep):
fp = open(tgt, 'wb')
fp.write(zf.read(path))
fp.close()
zf.close()
def detect_ip_addr():
"""Try a very crude method to figure out a valid external
IP or hostname for the current machine. Don't rely on this
for binding to an interface, but it could be used as basis
for constructing a remote URL to the server.
"""
def _detect_ip_addr():
from array import array
from struct import pack, unpack
try:
import fcntl
except ImportError:
fcntl = None
ip_addr = None
if not fcntl: # not UNIX:
host = socket.gethostname()
ip_addr = socket.gethostbyname(host)
else: # UNIX:
# get all interfaces:
nbytes = 128 * 32
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
names = array('B', '\0' * nbytes)
#print 'names: ', names
outbytes = unpack('iL', fcntl.ioctl( s.fileno(), 0x8912, pack('iL', nbytes, names.buffer_info()[0])))[0]
namestr = names.tostring()
# try 64 bit kernel:
for i in range(0, outbytes, 40):
name = namestr[i:i+16].split('\0', 1)[0]
if name != 'lo':
ip_addr = socket.inet_ntoa(namestr[i+20:i+24])
break
# try 32 bit kernel:
if ip_addr is None:
ifaces = filter(None, [namestr[i:i+32].split('\0', 1)[0] for i in range(0, outbytes, 32)])
for ifname in [iface for iface in ifaces if iface != 'lo']:
ip_addr = socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915, pack('256s', ifname[:15]))[20:24])
break
return ip_addr or 'localhost'
try:
ip_addr = _detect_ip_addr()
except Exception:
ip_addr = 'localhost'
return ip_addr
# RATIONALE BEHIND TIMESTAMP CALCULATIONS AND TIMEZONE MANAGEMENT:
# The server side never does any timestamp calculation, always
# sends them in a naive (timezone agnostic) format supposed to be
# expressed within the server timezone, and expects the clients to
# provide timestamps in the server timezone as well.
# It stores all timestamps in the database in naive format as well,
# which also expresses the time in the server timezone.
# For this reason the server makes its timezone name available via the
# common/timezone_get() rpc method, which clients need to read
# to know the appropriate time offset to use when reading/writing
# times.
def get_win32_timezone():
"""Attempt to return the "standard name" of the current timezone on a win32 system.
@return the standard name of the current win32 timezone, or False if it cannot be found.
"""
res = False
if sys.platform == "win32":
try:
import _winreg
hklm = _winreg.ConnectRegistry(None,_winreg.HKEY_LOCAL_MACHINE)
current_tz_key = _winreg.OpenKey(hklm, r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation", 0,_winreg.KEY_ALL_ACCESS)
res = str(_winreg.QueryValueEx(current_tz_key,"StandardName")[0]) # [0] is value, [1] is type code
_winreg.CloseKey(current_tz_key)
_winreg.CloseKey(hklm)
except Exception:
pass
return res
def detect_server_timezone():
"""Attempt to detect the timezone to use on the server side.
Defaults to UTC if no working timezone can be found.
@return the timezone identifier as expected by pytz.timezone.
"""
try:
import pytz
except Exception:
_logger.warning("Python pytz module is not available. "
"Timezone will be set to UTC by default.")
return 'UTC'
# Option 1: the configuration option (did not exist before, so no backwards compatibility issue)
# Option 2: to be backwards compatible with 5.0 or earlier, the value from time.tzname[0], but only if it is known to pytz
# Option 3: the environment variable TZ
sources = [ (config['timezone'], 'OpenERP configuration'),
(time.tzname[0], 'time.tzname'),
(os.environ.get('TZ',False),'TZ environment variable'), ]
# Option 4: OS-specific: /etc/timezone on Unix
if os.path.exists("/etc/timezone"):
tz_value = False
try:
f = open("/etc/timezone")
tz_value = f.read(128).strip()
except Exception:
pass
finally:
f.close()
sources.append((tz_value,"/etc/timezone file"))
# Option 5: timezone info from registry on Win32
if sys.platform == "win32":
# Timezone info is stored in windows registry.
# However this is not likely to work very well as the standard name
# of timezones in windows is rarely something that is known to pytz.
# But that's ok, it is always possible to use a config option to set
# it explicitly.
sources.append((get_win32_timezone(),"Windows Registry"))
for (value,source) in sources:
if value:
try:
tz = pytz.timezone(value)
_logger.info("Using timezone %s obtained from %s.", tz.zone, source)
return value
except pytz.UnknownTimeZoneError:
_logger.warning("The timezone specified in %s (%s) is invalid, ignoring it.", source, value)
_logger.warning("No valid timezone could be detected, using default UTC "
"timezone. You can specify it explicitly with option 'timezone' in "
"the server configuration.")
return 'UTC'
def get_server_timezone():
return "UTC"
DEFAULT_SERVER_DATE_FORMAT = "%Y-%m-%d"
DEFAULT_SERVER_TIME_FORMAT = "%H:%M:%S"
DEFAULT_SERVER_DATETIME_FORMAT = "%s %s" % (
DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_TIME_FORMAT)
# Python's strftime supports only the format directives
# that are available on the platform's libc, so in order to
# be cross-platform we map to the directives required by
# the C standard (1989 version), always available on platforms
# with a C standard implementation.
DATETIME_FORMATS_MAP = {
'%C': '', # century
'%D': '%m/%d/%Y', # modified %y->%Y
'%e': '%d',
'%E': '', # special modifier
'%F': '%Y-%m-%d',
'%g': '%Y', # modified %y->%Y
'%G': '%Y',
'%h': '%b',
'%k': '%H',
'%l': '%I',
'%n': '\n',
'%O': '', # special modifier
'%P': '%p',
'%R': '%H:%M',
'%r': '%I:%M:%S %p',
'%s': '', #num of seconds since epoch
'%T': '%H:%M:%S',
'%t': ' ', # tab
'%u': ' %w',
'%V': '%W',
'%y': '%Y', # Even if %y works, it's ambiguous, so we should use %Y
'%+': '%Y-%m-%d %H:%M:%S',
# %Z is a special case that causes 2 problems at least:
# - the timezone names we use (in res_user.context_tz) come
# from pytz, but not all these names are recognized by
# strptime(), so we cannot convert in both directions
# when such a timezone is selected and %Z is in the format
# - %Z is replaced by an empty string in strftime() when
# there is not tzinfo in a datetime value (e.g when the user
# did not pick a context_tz). The resulting string does not
# parse back if the format requires %Z.
# As a consequence, we strip it completely from format strings.
# The user can always have a look at the context_tz in
# preferences to check the timezone.
'%z': '',
'%Z': '',
}
def server_to_local_timestamp(src_tstamp_str, src_format, dst_format, dst_tz_name,
tz_offset=True, ignore_unparsable_time=True):
"""
Convert a source timestamp string into a destination timestamp string, attempting to apply the
correct offset if both the server and local timezone are recognized, or no
offset at all if they aren't or if tz_offset is false (i.e. assuming they are both in the same TZ).
WARNING: This method is here to allow formatting dates correctly for inclusion in strings where
the client would not be able to format/offset it correctly. DO NOT use it for returning
date fields directly, these are supposed to be handled by the client!!
@param src_tstamp_str: the str value containing the timestamp in the server timezone.
@param src_format: the format to use when parsing the server timestamp.
@param dst_format: the format to use when formatting the resulting timestamp for the local/client timezone.
@param dst_tz_name: name of the destination timezone (such as the 'tz' value of the client context)
@param ignore_unparsable_time: if True, return False if src_tstamp_str cannot be parsed
using src_format or formatted using dst_format.
@return local/client formatted timestamp, expressed in the local/client timezone if possible
and if tz_offset is true, or src_tstamp_str if timezone offset could not be determined.
"""
if not src_tstamp_str:
return False
res = src_tstamp_str
if src_format and dst_format:
# find out server timezone
server_tz = get_server_timezone()
try:
# dt_value needs to be a datetime.datetime object (so no time.struct_time or mx.DateTime.DateTime here!)
dt_value = datetime.strptime(src_tstamp_str, src_format)
if tz_offset and dst_tz_name:
try:
import pytz
src_tz = pytz.timezone(server_tz)
dst_tz = pytz.timezone(dst_tz_name)
src_dt = src_tz.localize(dt_value, is_dst=True)
dt_value = src_dt.astimezone(dst_tz)
except Exception:
pass
res = dt_value.strftime(dst_format)
except Exception:
# Normal ways to end up here are if strptime or strftime failed
if not ignore_unparsable_time:
return False
return res
def split_every(n, iterable, piece_maker=tuple):
"""Splits an iterable into length-n pieces. The last piece will be shorter
if ``n`` does not evenly divide the iterable length.
@param ``piece_maker``: function to build the pieces
from the slices (tuple,list,...)
"""
iterator = iter(iterable)
piece = piece_maker(islice(iterator, n))
while piece:
yield piece
piece = piece_maker(islice(iterator, n))
if __name__ == '__main__':
import doctest
doctest.testmod()
class upload_data_thread(threading.Thread):
def __init__(self, email, data, type):
self.args = [('email',email),('type',type),('data',data)]
super(upload_data_thread,self).__init__()
def run(self):
try:
import urllib
args = urllib.urlencode(self.args)
fp = urllib.urlopen('http://www.openerp.com/scripts/survey.php', args)
fp.read()
fp.close()
except Exception:
pass
def upload_data(email, data, type='SURVEY'):
a = upload_data_thread(email, data, type)
a.start()
return True
def get_and_group_by_field(cr, uid, obj, ids, field, context=None):
""" Read the values of ``field´´ for the given ``ids´´ and group ids by value.
:param string field: name of the field we want to read and group by
:return: mapping of field values to the list of ids that have it
:rtype: dict
"""
res = {}
for record in obj.read(cr, uid, ids, [field], context=context):
key = record[field]
res.setdefault(key[0] if isinstance(key, tuple) else key, []).append(record['id'])
return res
def get_and_group_by_company(cr, uid, obj, ids, context=None):
return get_and_group_by_field(cr, uid, obj, ids, field='company_id', context=context)
# port of python 2.6's attrgetter with support for dotted notation
def resolve_attr(obj, attr):
for name in attr.split("."):
obj = getattr(obj, name)
return obj
def attrgetter(*items):
if len(items) == 1:
attr = items[0]
def g(obj):
return resolve_attr(obj, attr)
else:
def g(obj):
return tuple(resolve_attr(obj, attr) for attr in items)
return g
class unquote(str):
"""A subclass of str that implements repr() without enclosing quotation marks
or escaping, keeping the original string untouched. The name come from Lisp's unquote.
One of the uses for this is to preserve or insert bare variable names within dicts during eval()
of a dict's repr(). Use with care.
Some examples (notice that there are never quotes surrounding
the ``active_id`` name:
>>> unquote('active_id')
active_id
>>> d = {'test': unquote('active_id')}
>>> d
{'test': active_id}
>>> print d
{'test': active_id}
"""
def __repr__(self):
return self
class UnquoteEvalContext(defaultdict):
"""Defaultdict-based evaluation context that returns
an ``unquote`` string for any missing name used during
the evaluation.
Mostly useful for evaluating OpenERP domains/contexts that
may refer to names that are unknown at the time of eval,
so that when the context/domain is converted back to a string,
the original names are preserved.
**Warning**: using an ``UnquoteEvalContext`` as context for ``eval()`` or
``safe_eval()`` will shadow the builtins, which may cause other
failures, depending on what is evaluated.
Example (notice that ``section_id`` is preserved in the final
result) :
>>> context_str = "{'default_user_id': uid, 'default_section_id': section_id}"
>>> eval(context_str, UnquoteEvalContext(uid=1))
{'default_user_id': 1, 'default_section_id': section_id}
"""
def __init__(self, *args, **kwargs):
super(UnquoteEvalContext, self).__init__(None, *args, **kwargs)
def __missing__(self, key):
return unquote(key)
class mute_logger(object):
"""Temporary suppress the logging.
Can be used as context manager or decorator.
@mute_logger('openerp.plic.ploc')
def do_stuff():
blahblah()
with mute_logger('openerp.foo.bar'):
do_suff()
"""
def __init__(self, *loggers):
self.loggers = loggers
def filter(self, record):
return 0
def __enter__(self):
for logger in self.loggers:
logging.getLogger(logger).addFilter(self)
def __exit__(self, exc_type=None, exc_val=None, exc_tb=None):
for logger in self.loggers:
logging.getLogger(logger).removeFilter(self)
def __call__(self, func):
@wraps(func)
def deco(*args, **kwargs):
with self:
return func(*args, **kwargs)
return deco
_ph = object()
class CountingStream(object):
""" Stream wrapper counting the number of element it has yielded. Similar
role to ``enumerate``, but for use when the iteration process of the stream
isn't fully under caller control (the stream can be iterated from multiple
points including within a library)
``start`` allows overriding the starting index (the index before the first
item is returned).
On each iteration (call to :meth:`~.next`), increases its :attr:`~.index`
by one.
.. attribute:: index
``int``, index of the last yielded element in the stream. If the stream
has ended, will give an index 1-past the stream
"""
def __init__(self, stream, start=-1):
self.stream = iter(stream)
self.index = start
self.stopped = False
def __iter__(self):
return self
def next(self):
if self.stopped: raise StopIteration()
self.index += 1
val = next(self.stream, _ph)
if val is _ph:
self.stopped = True
raise StopIteration()
return val
def stripped_sys_argv(*strip_args):
"""Return sys.argv with some arguments stripped, suitable for reexecution or subprocesses"""
strip_args = sorted(set(strip_args) | set(['-s', '--save', '-d', '--database', '-u', '--update', '-i', '--init']))
assert all(config.parser.has_option(s) for s in strip_args)
takes_value = dict((s, config.parser.get_option(s).takes_value()) for s in strip_args)
longs, shorts = list(tuple(y) for _, y in groupby(strip_args, lambda x: x.startswith('--')))
longs_eq = tuple(l + '=' for l in longs if takes_value[l])
args = sys.argv[:]
def strip(args, i):
return args[i].startswith(shorts) \
or args[i].startswith(longs_eq) or (args[i] in longs) \
or (i >= 1 and (args[i - 1] in strip_args) and takes_value[args[i - 1]])
return [x for i, x in enumerate(args) if not strip(args, i)]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
CopeX/odoo | openerp/addons/test_workflow/tests/test_workflow.py | 392 | 6232 | # -*- coding: utf-8 -*-
import openerp
from openerp import SUPERUSER_ID
from openerp.tests import common
class test_workflows(common.TransactionCase):
def check_activities(self, model_name, i, names):
""" Check that the record i has workitems in the given activity names.
"""
instance = self.registry('workflow.instance')
workitem = self.registry('workflow.workitem')
# Given the workflow instance associated to the record ...
instance_id = instance.search(
self.cr, SUPERUSER_ID,
[('res_type', '=', model_name), ('res_id', '=', i)])
self.assertTrue( instance_id, 'A workflow instance is expected.')
# ... get all its workitems ...
workitem_ids = workitem.search(
self.cr, SUPERUSER_ID,
[('inst_id', '=', instance_id[0])])
self.assertTrue(
workitem_ids,
'The workflow instance should have workitems.')
# ... and check the activity the are in against the provided names.
workitem_records = workitem.browse(
self.cr, SUPERUSER_ID, workitem_ids)
self.assertEqual(
sorted([item.act_id.name for item in workitem_records]),
sorted(names))
def check_value(self, model_name, i, value):
""" Check that the record i has the given value.
"""
model = self.registry(model_name)
record = model.read(self.cr, SUPERUSER_ID, [i], ['value'])[0]
self.assertEqual(record['value'], value)
def test_workflow(self):
model = self.registry('test.workflow.model')
trigger = self.registry('test.workflow.trigger')
i = model.create(self.cr, SUPERUSER_ID, {})
self.check_activities(model._name, i, ['a'])
# a -> b is just a signal.
model.signal_workflow(self.cr, SUPERUSER_ID, [i], 'a-b')
self.check_activities(model._name, i, ['b'])
# b -> c is a trigger (which is False),
# so we remain in the b activity.
model.trigger(self.cr, SUPERUSER_ID)
self.check_activities(model._name, i, ['b'])
# b -> c is a trigger (which is set to True).
# so we go in c when the trigger is called.
trigger.write(self.cr, SUPERUSER_ID, [1], {'value': True})
model.trigger(self.cr, SUPERUSER_ID)
self.check_activities(model._name, i, ['c'])
self.assertEqual(
True,
True)
model.unlink(self.cr, SUPERUSER_ID, [i])
def test_workflow_a(self):
model = self.registry('test.workflow.model.a')
i = model.create(self.cr, SUPERUSER_ID, {})
self.check_activities(model._name, i, ['a'])
self.check_value(model._name, i, 0)
model.unlink(self.cr, SUPERUSER_ID, [i])
def test_workflow_b(self):
model = self.registry('test.workflow.model.b')
i = model.create(self.cr, SUPERUSER_ID, {})
self.check_activities(model._name, i, ['a'])
self.check_value(model._name, i, 1)
model.unlink(self.cr, SUPERUSER_ID, [i])
def test_workflow_c(self):
model = self.registry('test.workflow.model.c')
i = model.create(self.cr, SUPERUSER_ID, {})
self.check_activities(model._name, i, ['a'])
self.check_value(model._name, i, 0)
model.unlink(self.cr, SUPERUSER_ID, [i])
def test_workflow_d(self):
model = self.registry('test.workflow.model.d')
i = model.create(self.cr, SUPERUSER_ID, {})
self.check_activities(model._name, i, ['a'])
self.check_value(model._name, i, 1)
model.unlink(self.cr, SUPERUSER_ID, [i])
def test_workflow_e(self):
model = self.registry('test.workflow.model.e')
i = model.create(self.cr, SUPERUSER_ID, {})
self.check_activities(model._name, i, ['b'])
self.check_value(model._name, i, 2)
model.unlink(self.cr, SUPERUSER_ID, [i])
def test_workflow_f(self):
model = self.registry('test.workflow.model.f')
i = model.create(self.cr, SUPERUSER_ID, {})
self.check_activities(model._name, i, ['a'])
self.check_value(model._name, i, 1)
model.signal_workflow(self.cr, SUPERUSER_ID, [i], 'a-b')
self.check_activities(model._name, i, ['b'])
self.check_value(model._name, i, 2)
model.unlink(self.cr, SUPERUSER_ID, [i])
def test_workflow_g(self):
model = self.registry('test.workflow.model.g')
i = model.create(self.cr, SUPERUSER_ID, {})
self.check_activities(model._name, i, ['a'])
self.check_value(model._name, i, 1)
model.unlink(self.cr, SUPERUSER_ID, [i])
def test_workflow_h(self):
model = self.registry('test.workflow.model.h')
i = model.create(self.cr, SUPERUSER_ID, {})
self.check_activities(model._name, i, ['b', 'c'])
self.check_value(model._name, i, 2)
model.unlink(self.cr, SUPERUSER_ID, [i])
def test_workflow_i(self):
model = self.registry('test.workflow.model.i')
i = model.create(self.cr, SUPERUSER_ID, {})
self.check_activities(model._name, i, ['b'])
self.check_value(model._name, i, 2)
model.unlink(self.cr, SUPERUSER_ID, [i])
def test_workflow_j(self):
model = self.registry('test.workflow.model.j')
i = model.create(self.cr, SUPERUSER_ID, {})
self.check_activities(model._name, i, ['a'])
self.check_value(model._name, i, 1)
model.unlink(self.cr, SUPERUSER_ID, [i])
def test_workflow_k(self):
model = self.registry('test.workflow.model.k')
i = model.create(self.cr, SUPERUSER_ID, {})
# Non-determinisitic: can be b or c
# self.check_activities(model._name, i, ['b'])
# self.check_activities(model._name, i, ['c'])
self.check_value(model._name, i, 2)
model.unlink(self.cr, SUPERUSER_ID, [i])
def test_workflow_l(self):
model = self.registry('test.workflow.model.l')
i = model.create(self.cr, SUPERUSER_ID, {})
self.check_activities(model._name, i, ['c', 'c', 'd'])
self.check_value(model._name, i, 3)
model.unlink(self.cr, SUPERUSER_ID, [i])
| agpl-3.0 |
junmin-zhu/chromium-rivertrail | third_party/tlslite/tlslite/utils/dateFuncs.py | 407 | 2181 |
import os
#Functions for manipulating datetime objects
#CCYY-MM-DDThh:mm:ssZ
def parseDateClass(s):
year, month, day = s.split("-")
day, tail = day[:2], day[2:]
hour, minute, second = tail[1:].split(":")
second = second[:2]
year, month, day = int(year), int(month), int(day)
hour, minute, second = int(hour), int(minute), int(second)
return createDateClass(year, month, day, hour, minute, second)
if os.name != "java":
from datetime import datetime, timedelta
#Helper functions for working with a date/time class
def createDateClass(year, month, day, hour, minute, second):
return datetime(year, month, day, hour, minute, second)
def printDateClass(d):
#Split off fractional seconds, append 'Z'
return d.isoformat().split(".")[0]+"Z"
def getNow():
return datetime.utcnow()
def getHoursFromNow(hours):
return datetime.utcnow() + timedelta(hours=hours)
def getMinutesFromNow(minutes):
return datetime.utcnow() + timedelta(minutes=minutes)
def isDateClassExpired(d):
return d < datetime.utcnow()
def isDateClassBefore(d1, d2):
return d1 < d2
else:
#Jython 2.1 is missing lots of python 2.3 stuff,
#which we have to emulate here:
import java
import jarray
def createDateClass(year, month, day, hour, minute, second):
c = java.util.Calendar.getInstance()
c.setTimeZone(java.util.TimeZone.getTimeZone("UTC"))
c.set(year, month-1, day, hour, minute, second)
return c
def printDateClass(d):
return "%04d-%02d-%02dT%02d:%02d:%02dZ" % \
(d.get(d.YEAR), d.get(d.MONTH)+1, d.get(d.DATE), \
d.get(d.HOUR_OF_DAY), d.get(d.MINUTE), d.get(d.SECOND))
def getNow():
c = java.util.Calendar.getInstance()
c.setTimeZone(java.util.TimeZone.getTimeZone("UTC"))
c.get(c.HOUR) #force refresh?
return c
def getHoursFromNow(hours):
d = getNow()
d.add(d.HOUR, hours)
return d
def isDateClassExpired(d):
n = getNow()
return d.before(n)
def isDateClassBefore(d1, d2):
return d1.before(d2)
| bsd-3-clause |
ychen820/microblog | y/google-cloud-sdk/platform/gsutil/third_party/boto/tests/unit/cloudsearch2/test_exceptions.py | 4 | 1399 | import mock
from boto.compat import json
from tests.unit import unittest
from .test_search import HOSTNAME, CloudSearchSearchBaseTest
from boto.cloudsearch2.search import SearchConnection, SearchServiceException
def fake_loads_value_error(content, *args, **kwargs):
"""Callable to generate a fake ValueError"""
raise ValueError("HAHAHA! Totally not simplejson & you gave me bad JSON.")
def fake_loads_json_error(content, *args, **kwargs):
"""Callable to generate a fake JSONDecodeError"""
raise json.JSONDecodeError('Using simplejson & you gave me bad JSON.',
'', 0)
class CloudSearchJSONExceptionTest(CloudSearchSearchBaseTest):
response = '{}'
def test_no_simplejson_value_error(self):
with mock.patch.object(json, 'loads', fake_loads_value_error):
search = SearchConnection(endpoint=HOSTNAME)
with self.assertRaisesRegexp(SearchServiceException, 'non-json'):
search.search(q='test')
@unittest.skipUnless(hasattr(json, 'JSONDecodeError'),
'requires simplejson')
def test_simplejson_jsondecodeerror(self):
with mock.patch.object(json, 'loads', fake_loads_json_error):
search = SearchConnection(endpoint=HOSTNAME)
with self.assertRaisesRegexp(SearchServiceException, 'non-json'):
search.search(q='test')
| bsd-3-clause |
yfried/ansible | lib/ansible/modules/cloud/digital_ocean/digital_ocean_image_facts.py | 35 | 4218 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Ansible Project
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: digital_ocean_image_facts
short_description: Gather facts about DigitalOcean images
description:
- This module can be used to gather facts about DigitalOcean provided images.
- These images can be either of type C(distribution), C(application) and C(private).
author: "Abhijeet Kasurde (@Akasurde)"
version_added: "2.6"
options:
image_type:
description:
- Specifies the type of image facts to be retrived.
- If set to C(application), then facts are gathered related to all application images.
- If set to C(distribution), then facts are gathered related to all distribution images.
- If set to C(private), then facts are gathered related to all private images.
- If not set to any of above, then facts are gathered related to all images.
default: 'all'
choices: [ 'all', 'application', 'distribution', 'private' ]
required: false
requirements:
- "python >= 2.6"
extends_documentation_fragment: digital_ocean.documentation
'''
EXAMPLES = '''
- name: Gather facts about all images
digital_ocean_image_facts:
image_type: all
oauth_token: "{{ oauth_token }}"
- name: Gather facts about application images
digital_ocean_image_facts:
image_type: application
oauth_token: "{{ oauth_token }}"
- name: Gather facts about distribution images
digital_ocean_image_facts:
image_type: distribution
oauth_token: "{{ oauth_token }}"
- name: Get distribution about image with slug coreos-beta
digital_ocean_image_facts:
register: resp_out
- set_fact:
distribution_name: "{{ item.distribution }}"
with_items: "{{ resp_out.data|json_query(name) }}"
vars:
name: "[?slug=='coreos-beta']"
- debug: var=distribution_name
'''
RETURN = '''
data:
description: DigitalOcean image facts
returned: success
type: list
sample: [
{
"created_at": "2018-02-02T07:11:43Z",
"distribution": "CoreOS",
"id": 31434061,
"min_disk_size": 20,
"name": "1662.1.0 (beta)",
"public": true,
"regions": [
"nyc1",
"sfo1",
"nyc2",
"ams2",
"sgp1",
"lon1",
"nyc3",
"ams3",
"fra1",
"tor1",
"sfo2",
"blr1"
],
"size_gigabytes": 0.42,
"slug": "coreos-beta",
"type": "snapshot"
},
]
'''
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.digital_ocean import DigitalOceanHelper
from ansible.module_utils._text import to_native
def core(module):
image_type = module.params['image_type']
rest = DigitalOceanHelper(module)
base_url = 'images?'
if image_type == 'distribution':
base_url += "type=distribution&"
elif image_type == 'application':
base_url += "type=application&"
elif image_type == 'private':
base_url += "private=true&"
images = rest.get_paginated_data(base_url=base_url, data_key_name='images')
module.exit_json(changed=False, data=images)
def main():
argument_spec = DigitalOceanHelper.digital_ocean_argument_spec()
argument_spec.update(
image_type=dict(type='str',
required=False,
choices=['all', 'application', 'distribution', 'private'],
default='all'
)
)
module = AnsibleModule(argument_spec=argument_spec)
try:
core(module)
except Exception as e:
module.fail_json(msg=to_native(e), exception=format_exc())
if __name__ == '__main__':
main()
| gpl-3.0 |
itsjeyd/edx-platform | lms/djangoapps/courseware/features/problems_setup.py | 12 | 17529 | # pylint: disable=missing-docstring
# EVERY PROBLEM TYPE MUST HAVE THE FOLLOWING:
# -Section in Dictionary containing:
# -factory
# -kwargs
# -(optional metadata)
# -Correct, Incorrect and Unanswered CSS selectors
# -A way to answer the problem correctly and incorrectly
# -A way to check the problem was answered correctly, incorrectly and blank
from lettuce import world
import random
import textwrap
from common import section_location
from capa.tests.response_xml_factory import (
ChoiceResponseXMLFactory,
ChoiceTextResponseXMLFactory,
CodeResponseXMLFactory,
CustomResponseXMLFactory,
FormulaResponseXMLFactory,
ImageResponseXMLFactory,
MultipleChoiceResponseXMLFactory,
NumericalResponseXMLFactory,
OptionResponseXMLFactory,
StringResponseXMLFactory,
)
# Factories from capa.tests.response_xml_factory that we will use
# to generate the problem XML, with the keyword args used to configure
# the output.
# 'correct', 'incorrect', and 'unanswered' keys are lists of CSS selectors
# the presence of any in the list is sufficient
PROBLEM_DICT = {
'drop down': {
'factory': OptionResponseXMLFactory(),
'kwargs': {
'question_text': 'The correct answer is Option 2',
'options': ['Option 1', 'Option 2', 'Option 3', 'Option 4'],
'correct_option': 'Option 2'},
'correct': ['span.correct'],
'incorrect': ['span.incorrect'],
'unanswered': ['span.unanswered']},
'multiple choice': {
'factory': MultipleChoiceResponseXMLFactory(),
'kwargs': {
'question_text': 'The correct answer is Choice 3',
'choices': [False, False, True, False],
'choice_names': ['choice_0', 'choice_1', 'choice_2', 'choice_3']},
'correct': ['label.choicegroup_correct', 'span.correct'],
'incorrect': ['label.choicegroup_incorrect', 'span.incorrect'],
'unanswered': ['span.unanswered']},
'checkbox': {
'factory': ChoiceResponseXMLFactory(),
'kwargs': {
'question_text': 'The correct answer is Choices 1 and 3',
'choice_type': 'checkbox',
'choices': [True, False, True, False, False],
'choice_names': ['Choice 1', 'Choice 2', 'Choice 3', 'Choice 4']},
'correct': ['span.correct'],
'incorrect': ['span.incorrect'],
'unanswered': ['span.unanswered']},
'radio': {
'factory': ChoiceResponseXMLFactory(),
'kwargs': {
'question_text': 'The correct answer is Choice 3',
'choice_type': 'radio',
'choices': [False, False, True, False],
'choice_names': ['Choice 1', 'Choice 2', 'Choice 3', 'Choice 4']},
'correct': ['label.choicegroup_correct', 'span.correct'],
'incorrect': ['label.choicegroup_incorrect', 'span.incorrect'],
'unanswered': ['span.unanswered']},
'string': {
'factory': StringResponseXMLFactory(),
'kwargs': {
'question_text': 'The answer is "correct string"',
'case_sensitive': False,
'answer': 'correct string'},
'correct': ['div.correct'],
'incorrect': ['div.incorrect'],
'unanswered': ['div.unanswered', 'div.unsubmitted']},
'numerical': {
'factory': NumericalResponseXMLFactory(),
'kwargs': {
'question_text': 'The answer is pi + 1',
'answer': '4.14159',
'tolerance': '0.00001',
'math_display': True},
'correct': ['div.correct'],
'incorrect': ['div.incorrect'],
'unanswered': ['div.unanswered', 'div.unsubmitted']},
'formula': {
'factory': FormulaResponseXMLFactory(),
'kwargs': {
'question_text': 'The solution is [mathjax]x^2+2x+y[/mathjax]',
'sample_dict': {'x': (-100, 100), 'y': (-100, 100)},
'num_samples': 10,
'tolerance': 0.00001,
'math_display': True,
'answer': 'x^2+2*x+y'},
'correct': ['div.correct'],
'incorrect': ['div.incorrect'],
'unanswered': ['div.unanswered', 'div.unsubmitted']},
'script': {
'factory': CustomResponseXMLFactory(),
'kwargs': {
'question_text': 'Enter two integers that sum to 10.',
'cfn': 'test_add_to_ten',
'expect': '10',
'num_inputs': 2,
'script': textwrap.dedent("""
def test_add_to_ten(expect,ans):
try:
a1=int(ans[0])
a2=int(ans[1])
except ValueError:
a1=0
a2=0
return (a1+a2)==int(expect)
""")},
'correct': ['div.correct'],
'incorrect': ['div.incorrect'],
'unanswered': ['div.unanswered', 'div.unsubmitted']},
'code': {
'factory': CodeResponseXMLFactory(),
'kwargs': {
'question_text': 'Submit code to an external grader',
'initial_display': 'print "Hello world!"',
'grader_payload': '{"grader": "ps1/Spring2013/test_grader.py"}', },
'correct': ['span.correct'],
'incorrect': ['span.incorrect'],
'unanswered': ['span.unanswered']},
'radio_text': {
'factory': ChoiceTextResponseXMLFactory(),
'kwargs': {
'question_text': 'The correct answer is Choice 0 and input 8',
'type': 'radiotextgroup',
'choices': [("true", {"answer": "8", "tolerance": "1"}),
("false", {"answer": "8", "tolerance": "1"})
]
},
'correct': ['section.choicetextgroup_correct'],
'incorrect': ['section.choicetextgroup_incorrect', 'span.incorrect'],
'unanswered': ['span.unanswered']},
'checkbox_text': {
'factory': ChoiceTextResponseXMLFactory(),
'kwargs': {
'question_text': 'The correct answer is Choice 0 and input 8',
'type': 'checkboxtextgroup',
'choices': [("true", {"answer": "8", "tolerance": "1"}),
("false", {"answer": "8", "tolerance": "1"})
]
},
'correct': ['span.correct'],
'incorrect': ['span.incorrect'],
'unanswered': ['span.unanswered']},
'image': {
'factory': ImageResponseXMLFactory(),
'kwargs': {
'src': '/static/images/placeholder-image.png',
'rectangle': '(50,50)-(100,100)'
},
'correct': ['span.correct'],
'incorrect': ['span.incorrect'],
'unanswered': ['span.unanswered']}
}
def answer_problem(course, problem_type, correctness):
# Make sure that the problem has been completely rendered before
# starting to input an answer.
world.wait_for_ajax_complete()
section_loc = section_location(course)
if problem_type == "drop down":
select_name = "input_{}_2_1".format(
section_loc.course_key.make_usage_key('problem', 'drop_down').html_id()
)
option_text = 'Option 2' if correctness == 'correct' else 'Option 3'
world.select_option(select_name, option_text)
elif problem_type == "multiple choice":
if correctness == 'correct':
world.css_check(inputfield(course, 'multiple choice', choice='choice_2'))
else:
world.css_check(inputfield(course, 'multiple choice', choice='choice_1'))
elif problem_type == "checkbox":
if correctness == 'correct':
world.css_check(inputfield(course, 'checkbox', choice='choice_0'))
world.css_check(inputfield(course, 'checkbox', choice='choice_2'))
else:
world.css_check(inputfield(course, 'checkbox', choice='choice_3'))
elif problem_type == 'radio':
if correctness == 'correct':
world.css_check(inputfield(course, 'radio', choice='choice_2'))
else:
world.css_check(inputfield(course, 'radio', choice='choice_1'))
elif problem_type == 'string':
textvalue = 'correct string' if correctness == 'correct' else 'incorrect'
world.css_fill(inputfield(course, 'string'), textvalue)
elif problem_type == 'numerical':
textvalue = "pi + 1" if correctness == 'correct' else str(random.randint(-2, 2))
world.css_fill(inputfield(course, 'numerical'), textvalue)
elif problem_type == 'formula':
textvalue = "x^2+2*x+y" if correctness == 'correct' else 'x^2'
world.css_fill(inputfield(course, 'formula'), textvalue)
elif problem_type == 'script':
# Correct answer is any two integers that sum to 10
first_addend = random.randint(-100, 100)
second_addend = 10 - first_addend
# If we want an incorrect answer, then change
# the second addend so they no longer sum to 10
if correctness == 'incorrect':
second_addend += random.randint(1, 10)
world.css_fill(inputfield(course, 'script', input_num=1), str(first_addend))
world.css_fill(inputfield(course, 'script', input_num=2), str(second_addend))
elif problem_type == 'code':
# The fake xqueue server is configured to respond
# correct / incorrect no matter what we submit.
# Furthermore, since the inline code response uses
# JavaScript to make the code display nicely, it's difficult
# to programatically input text
# (there's not <textarea> we can just fill text into)
# For this reason, we submit the initial code in the response
# (configured in the problem XML above)
pass
elif problem_type == 'radio_text' or problem_type == 'checkbox_text':
input_value = "8" if correctness == 'correct' else "5"
choice = "choiceinput_0bc" if correctness == 'correct' else "choiceinput_1bc"
world.css_fill(
inputfield(
course,
problem_type,
choice="choiceinput_0_numtolerance_input_0"
),
input_value
)
world.css_check(inputfield(course, problem_type, choice=choice))
elif problem_type == 'image':
offset = 25 if correctness == "correct" else -25
def try_click():
problem_html_loc = section_loc.course_key.make_usage_key('problem', 'image').html_id()
image_selector = "#imageinput_{}_2_1".format(problem_html_loc)
input_selector = "#input_{}_2_1".format(problem_html_loc)
world.browser.execute_script('$("body").on("click", function(event) {console.log(event);})')
initial_input = world.css_value(input_selector)
world.wait_for_visible(image_selector)
image = world.css_find(image_selector).first
(image.action_chains
.move_to_element(image._element)
.move_by_offset(offset, offset)
.click()
.perform())
world.wait_for(lambda _: world.css_value(input_selector) != initial_input)
world.retry_on_exception(try_click)
def problem_has_answer(course, problem_type, answer_class):
if problem_type == "drop down":
if answer_class == 'blank':
assert world.is_css_not_present('option[selected="true"]')
else:
actual = world.css_value('option[selected="true"]')
expected = 'Option 2' if answer_class == 'correct' else 'Option 3'
assert actual == expected
elif problem_type == "multiple choice":
if answer_class == 'correct':
assert_submitted(course, 'multiple choice', ['choice_2'])
elif answer_class == 'incorrect':
assert_submitted(course, 'multiple choice', ['choice_1'])
else:
assert_submitted(course, 'multiple choice', [])
elif problem_type == "checkbox":
if answer_class == 'correct':
assert_submitted(course, 'checkbox', ['choice_0', 'choice_2'])
elif answer_class == 'incorrect':
assert_submitted(course, 'checkbox', ['choice_3'])
else:
assert_submitted(course, 'checkbox', [])
elif problem_type == "radio":
if answer_class == 'correct':
assert_submitted(course, 'radio', ['choice_2'])
elif answer_class == 'incorrect':
assert_submitted(course, 'radio', ['choice_1'])
else:
assert_submitted(course, 'radio', [])
elif problem_type == 'string':
if answer_class == 'blank':
expected = ''
else:
expected = 'correct string' if answer_class == 'correct' else 'incorrect'
assert_textfield(course, 'string', expected)
elif problem_type == 'formula':
if answer_class == 'blank':
expected = ''
else:
expected = "x^2+2*x+y" if answer_class == 'correct' else 'x^2'
assert_textfield(course, 'formula', expected)
elif problem_type in ("radio_text", "checkbox_text"):
if answer_class == 'blank':
expected = ('', '')
assert_choicetext_values(course, problem_type, (), expected)
elif answer_class == 'incorrect':
expected = ('5', '')
assert_choicetext_values(course, problem_type, ["choiceinput_1bc"], expected)
else:
expected = ('8', '')
assert_choicetext_values(course, problem_type, ["choiceinput_0bc"], expected)
else:
# The other response types use random data,
# which would be difficult to check
# We trade input value coverage in the other tests for
# input type coverage in this test.
pass
def add_problem_to_course(course, problem_type, extra_meta=None):
'''
Add a problem to the course we have created using factories.
'''
assert problem_type in PROBLEM_DICT
# Generate the problem XML using capa.tests.response_xml_factory
factory_dict = PROBLEM_DICT[problem_type]
problem_xml = factory_dict['factory'].build_xml(**factory_dict['kwargs'])
metadata = {'rerandomize': 'always'} if 'metadata' not in factory_dict else factory_dict['metadata']
if extra_meta:
metadata = dict(metadata, **extra_meta)
# Create a problem item using our generated XML
# We set rerandomize=always in the metadata so that the "Reset" button
# will appear.
category_name = "problem"
return world.ItemFactory.create(
parent_location=section_location(course),
category=category_name,
display_name=str(problem_type),
data=problem_xml,
metadata=metadata
)
def inputfield(course, problem_type, choice=None, input_num=1):
""" Return the css selector for `problem_type`.
For example, if problem_type is 'string', return
the text field for the string problem in the test course.
`choice` is the name of the checkbox input in a group
of checkboxes. """
section_loc = section_location(course)
ptype = problem_type.replace(" ", "_")
# this is necessary due to naming requirement for this problem type
if problem_type in ("radio_text", "checkbox_text"):
selector_template = "input#{}_2_{input}"
else:
selector_template = "input#input_{}_2_{input}"
sel = selector_template.format(
section_loc.course_key.make_usage_key('problem', ptype).html_id(),
input=input_num,
)
if choice is not None:
base = "_choice_" if problem_type == "multiple choice" else "_"
sel = sel + base + str(choice)
# If the input element doesn't exist, fail immediately
assert world.is_css_present(sel)
# Retrieve the input element
return sel
def assert_submitted(course, problem_type, choices):
'''
Assert that choice names given in *choices* are the only
ones submitted.
Works for both radio and checkbox problems
'''
all_choices = ['choice_0', 'choice_1', 'choice_2', 'choice_3']
for this_choice in all_choices:
def submit_problem():
element = world.css_find(inputfield(course, problem_type, choice=this_choice))
if this_choice in choices:
assert element.checked
else:
assert not element.checked
world.retry_on_exception(submit_problem)
def assert_textfield(course, problem_type, expected_text, input_num=1):
element_value = world.css_value(inputfield(course, problem_type, input_num=input_num))
assert element_value == expected_text
def assert_choicetext_values(course, problem_type, choices, expected_values):
"""
Asserts that only the given choices are checked, and given
text fields have a desired value
"""
# Names of the radio buttons or checkboxes
all_choices = ['choiceinput_0bc', 'choiceinput_1bc']
# Names of the numtolerance_inputs
all_inputs = [
"choiceinput_0_numtolerance_input_0",
"choiceinput_1_numtolerance_input_0"
]
for this_choice in all_choices:
element = world.css_find(inputfield(course, problem_type, choice=this_choice))
if this_choice in choices:
assert element.checked
else:
assert not element.checked
for (name, expected) in zip(all_inputs, expected_values):
element = world.css_find(inputfield(course, problem_type, name))
# Remove any trailing spaces that may have been added
assert element.value.strip() == expected
| agpl-3.0 |
mapr/hue | desktop/core/ext-py/Pygments-1.3.1/setup.py | 42 | 2796 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Pygments
~~~~~~~~
Pygments is a syntax highlighting package written in Python.
It is a generic syntax highlighter for general use in all kinds of software
such as forum systems, wikis or other applications that need to prettify
source code. Highlights are:
* a wide range of common languages and markup formats is supported
* special attention is paid to details, increasing quality by a fair amount
* support for new languages and formats are added easily
* a number of output formats, presently HTML, LaTeX, RTF, SVG, all image \
formats that PIL supports and ANSI sequences
* it is usable as a command-line tool and as a library
* ... and it highlights even Brainfuck!
The `Pygments tip`_ is installable with ``easy_install Pygments==dev``.
.. _Pygments tip:
http://dev.pocoo.org/hg/pygments-main/archive/tip.tar.gz#egg=Pygments-dev
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
try:
from setuptools import setup, find_packages
have_setuptools = True
except ImportError:
from distutils.core import setup
def find_packages():
return [
'pygments',
'pygments.lexers',
'pygments.formatters',
'pygments.styles',
'pygments.filters',
]
have_setuptools = False
try:
from distutils.command.build_py import build_py_2to3 as build_py
except ImportError:
from distutils.command.build_py import build_py
if have_setuptools:
add_keywords = dict(
entry_points = {
'console_scripts': ['pygmentize = pygments.cmdline:main'],
},
)
else:
add_keywords = dict(
scripts = ['pygmentize'],
)
setup(
name = 'Pygments',
version = '1.3.1',
url = 'http://pygments.org/',
license = 'BSD License',
author = 'Georg Brandl',
author_email = 'georg@python.org',
description = 'Pygments is a syntax highlighting package written in Python.',
long_description = __doc__,
keywords = 'syntax highlighting',
packages = find_packages(),
platforms = 'any',
zip_safe = False,
include_package_data = True,
classifiers = [
'License :: OSI Approved :: BSD License',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: System Administrators',
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Operating System :: OS Independent',
],
cmdclass = {'build_py': build_py},
**add_keywords
)
| apache-2.0 |
xiandiancloud/edx-platform | lms/djangoapps/courseware/features/common.py | 16 | 7839 | # pylint: disable=C0111
# pylint: disable=W0621
from __future__ import absolute_import
import time
from lettuce import world, step
from lettuce.django import django_url
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from student.models import CourseEnrollment
from xmodule.modulestore.django import modulestore
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.course_module import CourseDescriptor
from courseware.courses import get_course_by_id
from xmodule import seq_module, vertical_module
from logging import getLogger
logger = getLogger(__name__)
@step('I (.*) capturing of screenshots before and after each step$')
def configure_screenshots_for_all_steps(_step, action):
"""
A step to be used in *.feature files. Enables/disables
automatic saving of screenshots before and after each step in a
scenario.
"""
action = action.strip()
if action == 'enable':
world.auto_capture_screenshots = True
elif action == 'disable':
world.auto_capture_screenshots = False
else:
raise ValueError('Parameter `action` should be one of "enable" or "disable".')
@world.absorb
def capture_screenshot_before_after(func):
"""
A decorator that will take a screenshot before and after the applied
function is run. Use this if you do not want to capture screenshots
for each step in a scenario, but rather want to debug a single function.
"""
def inner(*args, **kwargs):
prefix = round(time.time() * 1000)
world.capture_screenshot("{}_{}_{}".format(
prefix, func.func_name, 'before'
))
ret_val = func(*args, **kwargs)
world.capture_screenshot("{}_{}_{}".format(
prefix, func.func_name, 'after'
))
return ret_val
return inner
@step(u'The course "([^"]*)" exists$')
def create_course(_step, course):
# First clear the modulestore so we don't try to recreate
# the same course twice
# This also ensures that the necessary templates are loaded
world.clear_courses()
# Create the course
# We always use the same org and display name,
# but vary the course identifier (e.g. 600x or 191x)
world.scenario_dict['COURSE'] = world.CourseFactory.create(
org='edx',
number=course,
display_name='Test Course'
)
# Add a chapter to the course to contain problems
world.scenario_dict['CHAPTER'] = world.ItemFactory.create(
parent_location=world.scenario_dict['COURSE'].location,
category='chapter',
display_name='Test Chapter',
)
world.scenario_dict['SECTION'] = world.ItemFactory.create(
parent_location=world.scenario_dict['CHAPTER'].location,
category='sequential',
display_name='Test Section',
)
@step(u'I am registered for the course "([^"]*)"$')
def i_am_registered_for_the_course(step, course):
# Create the course
create_course(step, course)
# Create the user
world.create_user('robot', 'test')
user = User.objects.get(username='robot')
# If the user is not already enrolled, enroll the user.
# TODO: change to factory
CourseEnrollment.enroll(user, course_id(course))
world.log_in(username='robot', password='test')
@step(u'The course "([^"]*)" has extra tab "([^"]*)"$')
def add_tab_to_course(_step, course, extra_tab_name):
world.ItemFactory.create(
parent_location=course_location(course),
category="static_tab",
display_name=str(extra_tab_name))
@step(u'I am in a course$')
def go_into_course(step):
step.given('I am registered for the course "6.002x"')
step.given('And I am logged in')
step.given('And I click on View Courseware')
def course_id(course_num):
return SlashSeparatedCourseKey(
world.scenario_dict['COURSE'].org,
course_num,
world.scenario_dict['COURSE'].url_name
)
def course_location(course_num):
return world.scenario_dict['COURSE'].location.replace(course=course_num)
def section_location(course_num):
return world.scenario_dict['SECTION'].location.replace(course=course_num)
def visit_scenario_item(item_key):
"""
Go to the courseware page containing the item stored in `world.scenario_dict`
under the key `item_key`
"""
url = django_url(reverse(
'jump_to',
kwargs={
'course_id': world.scenario_dict['COURSE'].id.to_deprecated_string(),
'location': world.scenario_dict[item_key].location.to_deprecated_string(),
}
))
world.browser.visit(url)
def get_courses():
'''
Returns dict of lists of courses available, keyed by course.org (ie university).
Courses are sorted by course.number.
'''
courses = [c for c in modulestore().get_courses()
if isinstance(c, CourseDescriptor)]
courses = sorted(courses, key=lambda course: course.number)
return courses
def get_courseware_with_tabs(course_id):
"""
Given a course_id (string), return a courseware array of dictionaries for the
top three levels of navigation. Same as get_courseware() except include
the tabs on the right hand main navigation page.
This hides the appropriate courseware as defined by the hide_from_toc field:
chapter.hide_from_toc
Example:
[{
'chapter_name': 'Overview',
'sections': [{
'clickable_tab_count': 0,
'section_name': 'Welcome',
'tab_classes': []
}, {
'clickable_tab_count': 1,
'section_name': 'System Usage Sequence',
'tab_classes': ['VerticalDescriptor']
}, {
'clickable_tab_count': 0,
'section_name': 'Lab0: Using the tools',
'tab_classes': ['HtmlDescriptor', 'HtmlDescriptor', 'CapaDescriptor']
}, {
'clickable_tab_count': 0,
'section_name': 'Circuit Sandbox',
'tab_classes': []
}]
}, {
'chapter_name': 'Week 1',
'sections': [{
'clickable_tab_count': 4,
'section_name': 'Administrivia and Circuit Elements',
'tab_classes': ['VerticalDescriptor', 'VerticalDescriptor', 'VerticalDescriptor', 'VerticalDescriptor']
}, {
'clickable_tab_count': 0,
'section_name': 'Basic Circuit Analysis',
'tab_classes': ['CapaDescriptor', 'CapaDescriptor', 'CapaDescriptor']
}, {
'clickable_tab_count': 0,
'section_name': 'Resistor Divider',
'tab_classes': []
}, {
'clickable_tab_count': 0,
'section_name': 'Week 1 Tutorials',
'tab_classes': []
}]
}, {
'chapter_name': 'Midterm Exam',
'sections': [{
'clickable_tab_count': 2,
'section_name': 'Midterm Exam',
'tab_classes': ['VerticalDescriptor', 'VerticalDescriptor']
}]
}]
"""
course = get_course_by_id(course_id)
chapters = [chapter for chapter in course.get_children() if not chapter.hide_from_toc]
courseware = [{'chapter_name': c.display_name_with_default,
'sections': [{'section_name': s.display_name_with_default,
'clickable_tab_count': len(s.get_children()) if (type(s) == seq_module.SequenceDescriptor) else 0,
'tabs': [{'children_count': len(t.get_children()) if (type(t) == vertical_module.VerticalDescriptor) else 0,
'class': t.__class__.__name__}
for t in s.get_children()]}
for s in c.get_children() if not s.hide_from_toc]}
for c in chapters]
return courseware
| agpl-3.0 |
anastue/netforce | netforce_general/netforce_general/models/theme.py | 4 | 4767 | # Copyright (c) 2012-2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from netforce.model import Model, fields, get_model
from netforce.database import get_active_db
from netforce.static import export_module_file_all
from netforce import utils
from netforce import ipc
import netforce.template
import os
import zipfile
from netforce import module
import pkg_resources
class Theme(Model):
_name = "theme"
_string = "Theme"
_fields = {
"name": fields.Char("Name", required=True),
"description": fields.Text("Description"),
"file": fields.File("ZIP File"),
"templates": fields.One2Many("template","theme_id","Templates"),
}
_defaults = {
"state": "inactive",
}
def activate(self, ids, context={}):
obj = self.browse(ids)[0]
all_ids = self.search([])
self.write(all_ids, {"state": "inactive"})
obj.write({"state": "active"})
obj.update()
def update(self, ids, context={}):
obj = self.browse(ids)[0]
obj.export_static_files()
obj.load_templates()
def export_static_files(self, ids, context={}):
obj = self.browse(ids)[0]
theme = obj.name
dbname = get_active_db()
if obj.file:
zip_path = utils.get_file_path(obj.file)
zf = zipfile.ZipFile(zip_path)
for n in zf.namelist():
if not n.startswith("static/"):
continue
if n[-1] == "/":
continue
n2 = n[7:]
if n2.find("..") != -1:
continue
data = zf.read(n)
f_path = "static/db/" + dbname + "/themes/" + theme + "/" + n2
dir_path = os.path.dirname(f_path)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
print("export file", f_path)
open(f_path, "wb").write(data)
else:
export_module_file_all("themes/" + theme + "/static", "static/db/" + dbname + "/themes/" + theme)
def load_templates(self, ids, context={}):
obj = self.browse(ids[0])
if obj.file:
zip_path = utils.get_file_path(obj.file)
zf = zipfile.ZipFile(zip_path)
for n in zf.namelist():
if not n.startswith("templates/"):
continue
if not n.endswith(".hbs"):
continue
n2 = n[10:-4]
if n2.find("..") != -1:
continue
print("load template", n2)
data = zf.read(n)
vals = {
"name": n2,
"template": data.decode(),
"theme_id": obj.id,
}
get_model("template").merge(vals)
else:
theme = obj.name
loaded_modules = module.get_loaded_modules()
for m in reversed(loaded_modules):
if not pkg_resources.resource_isdir(m, "themes/" + theme + "/templates"):
continue
for f in pkg_resources.resource_listdir(m, "themes/" + theme + "/templates"):
if not f.endswith(".hbs"):
continue
f2 = f[:-4]
print("load template", f2)
data = pkg_resources.resource_string(m, "themes/" + theme + "/templates/" + f)
vals = {
"name": f2,
"template": data.decode(),
"theme_id": obj.id,
}
get_model("template").merge(vals)
Theme.register()
| mit |
nraynaud/three.js | utils/exporters/blender/addons/io_three/exporter/utilities.py | 225 | 1229 | import uuid
import hashlib
from .. import constants
ROUND = constants.DEFAULT_PRECISION
def bit_mask(flags):
"""Generate a bit mask.
:type flags: dict
:return: int
"""
bit = 0
true = lambda x, y: (x | (1 << y))
false = lambda x, y: (x & (~(1 << y)))
for mask, position in constants.MASK.items():
func = true if flags.get(mask) else false
bit = func(bit, position)
return bit
def hash(value):
"""Generate a hash from a given value
:param value:
:rtype: str
"""
hash_ = hashlib.md5()
hash_.update(repr(value).encode('utf8'))
return hash_.hexdigest()
def id():
"""Generate a random UUID
:rtype: str
"""
return str(uuid.uuid4()).upper()
def id_from_name(name):
"""Generate a UUID using a name as the namespace
:type name: str
:rtype: str
"""
return str(uuid.uuid3(uuid.NAMESPACE_DNS, name)).upper()
def rgb2int(rgb):
"""Convert a given rgb value to an integer
:type rgb: list|tuple
:rtype: int
"""
is_tuple = isinstance(rgb, tuple)
rgb = list(rgb) if is_tuple else rgb
colour = (int(rgb[0]*255) << 16) + (int(rgb[1]*255) << 8) + int(rgb[2]*255)
return colour
| mit |
mattip/numpy | pavement.py | 8 | 7028 | r"""
This paver file is intended to help with the release process as much as
possible. It relies on virtualenv to generate 'bootstrap' environments as
independent from the user system as possible (e.g. to make sure the sphinx doc
is built against the built numpy, not an installed one).
Building changelog + notes
==========================
Assumes you have git and the binaries/tarballs in installers/::
paver write_release
paver write_note
This automatically put the checksum into README.rst, and writes the Changelog.
TODO
====
- the script is messy, lots of global variables
- make it more easily customizable (through command line args)
- missing targets: install & test, sdist test, debian packaging
- fix bdist_mpkg: we build the same source twice -> how to make sure we use
the same underlying python for egg install in venv and for bdist_mpkg
"""
import os
import sys
import shutil
import hashlib
# The paver package needs to be installed to run tasks
import paver
from paver.easy import Bunch, options, task, sh
#-----------------------------------
# Things to be changed for a release
#-----------------------------------
# Path to the release notes
RELEASE_NOTES = 'doc/source/release/1.18.0-notes.rst'
#-------------------------------------------------------
# Hardcoded build/install dirs, virtualenv options, etc.
#-------------------------------------------------------
# Where to put the release installers
options(installers=Bunch(releasedir="release",
installersdir=os.path.join("release", "installers")),)
#-----------------------------
# Generate the release version
#-----------------------------
sys.path.insert(0, os.path.dirname(__file__))
try:
setup_py = __import__("setup")
FULLVERSION = setup_py.VERSION
# This is duplicated from setup.py
if os.path.exists('.git'):
GIT_REVISION = setup_py.git_version()
elif os.path.exists('numpy/version.py'):
# must be a source distribution, use existing version file
from numpy.version import git_revision as GIT_REVISION
else:
GIT_REVISION = "Unknown"
if not setup_py.ISRELEASED:
FULLVERSION += '.dev0+' + GIT_REVISION[:7]
finally:
sys.path.pop(0)
#--------------------------
# Source distribution stuff
#--------------------------
def tarball_name(ftype='gztar'):
"""Generate source distribution name
Parameters
----------
ftype : {'zip', 'gztar'}
Type of archive, default is 'gztar'.
"""
root = f'numpy-{FULLVERSION}'
if ftype == 'gztar':
return root + '.tar.gz'
elif ftype == 'zip':
return root + '.zip'
raise ValueError(f"Unknown type {type}")
@task
def sdist(options):
"""Make source distributions.
Parameters
----------
options :
Set by ``task`` decorator.
"""
# First clean the repo and update submodules (for up-to-date doc html theme
# and Sphinx extensions)
sh('git clean -xdf')
sh('git submodule init')
sh('git submodule update')
# To be sure to bypass paver when building sdist... paver + numpy.distutils
# do not play well together.
# Cython is run over all Cython files in setup.py, so generated C files
# will be included.
sh('python3 setup.py sdist --formats=gztar,zip')
# Copy the superpack into installers dir
idirs = options.installers.installersdir
if not os.path.exists(idirs):
os.makedirs(idirs)
for ftype in ['gztar', 'zip']:
source = os.path.join('dist', tarball_name(ftype))
target = os.path.join(idirs, tarball_name(ftype))
shutil.copy(source, target)
#-------------
# README stuff
#-------------
def _compute_hash(idirs, hashfunc):
"""Hash files using given hashfunc.
Parameters
----------
idirs : directory path
Directory containing files to be hashed.
hashfunc : hash function
Function to be used to hash the files.
"""
released = paver.path.path(idirs).listdir()
checksums = []
for fpath in sorted(released):
with open(fpath, 'rb') as fin:
fhash = hashfunc(fin.read())
checksums.append(
'%s %s' % (fhash.hexdigest(), os.path.basename(fpath)))
return checksums
def compute_md5(idirs):
"""Compute md5 hash of files in idirs.
Parameters
----------
idirs : directory path
Directory containing files to be hashed.
"""
return _compute_hash(idirs, hashlib.md5)
def compute_sha256(idirs):
"""Compute sha256 hash of files in idirs.
Parameters
----------
idirs : directory path
Directory containing files to be hashed.
"""
# better checksum so gpg signed README.rst containing the sums can be used
# to verify the binaries instead of signing all binaries
return _compute_hash(idirs, hashlib.sha256)
def write_release_task(options, filename='README'):
"""Append hashes of release files to release notes.
This appends file hashes to the release notes ane creates
four README files of the result in various formats:
- README.rst
- README.rst.gpg
- README.md
- README.md.gpg
The md file are created using `pandoc` so that the links are
properly updated. The gpg files are kept separate, so that
the unsigned files may be edited before signing if needed.
Parameters
----------
options :
Set by ``task`` decorator.
filename : string
Filename of the modified notes. The file is written
in the release directory.
"""
idirs = options.installers.installersdir
notes = paver.path.path(RELEASE_NOTES)
rst_readme = paver.path.path(filename + '.rst')
md_readme = paver.path.path(filename + '.md')
# append hashes
with open(rst_readme, 'w') as freadme:
with open(notes) as fnotes:
freadme.write(fnotes.read())
freadme.writelines("""
Checksums
=========
MD5
---
::
""")
freadme.writelines([f' {c}\n' for c in compute_md5(idirs)])
freadme.writelines("""
SHA256
------
::
""")
freadme.writelines([f' {c}\n' for c in compute_sha256(idirs)])
# generate md file using pandoc before signing
sh(f"pandoc -s -o {md_readme} {rst_readme}")
# Sign files
if hasattr(options, 'gpg_key'):
cmd = f'gpg --clearsign --armor --default_key {options.gpg_key}'
else:
cmd = 'gpg --clearsign --armor'
sh(cmd + f' --output {rst_readme}.gpg {rst_readme}')
sh(cmd + f' --output {md_readme}.gpg {md_readme}')
@task
def write_release(options):
"""Write the README files.
Two README files are generated from the release notes, one in ``rst``
markup for the general release, the other in ``md`` markup for the github
release notes.
Parameters
----------
options :
Set by ``task`` decorator.
"""
rdir = options.installers.releasedir
write_release_task(options, os.path.join(rdir, 'README'))
| bsd-3-clause |
albertomurillo/ansible | test/units/module_utils/xenserver/test_set_vm_power_state.py | 15 | 17604 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2019, Bojan Vitnik <bvitnik@mainstream.rs>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
from .FakeAnsibleModule import FakeAnsibleModule, ExitJsonException, FailJsonException
from .common import fake_xenapi_ref, testcase_bad_xenapi_refs
testcase_set_vm_power_state_bad_transitions = {
"params": [
('restarted', 'Halted', "Cannot restart VM in state 'poweredoff'!"),
('restarted', 'Suspended', "Cannot restart VM in state 'suspended'!"),
('suspended', 'Halted', "Cannot suspend VM in state 'poweredoff'!"),
('suspended', 'Paused', "Cannot suspend VM in state 'paused'!"),
('shutdownguest', 'Halted', "Cannot shutdown guest when VM is in state 'poweredoff'!"),
('shutdownguest', 'Suspended', "Cannot shutdown guest when VM is in state 'suspended'!"),
('shutdownguest', 'Paused', "Cannot shutdown guest when VM is in state 'paused'!"),
('rebootguest', 'Halted', "Cannot reboot guest when VM is in state 'poweredoff'!"),
('rebootguest', 'Suspended', "Cannot reboot guest when VM is in state 'suspended'!"),
('rebootguest', 'Paused', "Cannot reboot guest when VM is in state 'paused'!"),
],
"ids": [
"poweredoff->restarted",
"suspended->restarted",
"poweredoff->suspended",
"paused->suspended",
"poweredoff->shutdownguest",
"suspended->shutdownguest",
"paused->shutdownguest",
"poweredoff->rebootguest",
"suspended->rebootguest",
"paused->rebootguest",
],
}
testcase_set_vm_power_state_task_timeout = {
"params": [
('shutdownguest', "Guest shutdown task failed: 'timeout'!"),
('rebootguest', "Guest reboot task failed: 'timeout'!"),
],
"ids": [
"shutdownguest-timeout",
"rebootguest-timeout",
],
}
testcase_set_vm_power_state_no_transitions = {
"params": [
('poweredon', "Running"),
('Poweredon', "Running"),
('powered-on', "Running"),
('Powered_on', "Running"),
('poweredoff', "Halted"),
('Poweredoff', "Halted"),
('powered-off', "Halted"),
('powered_off', "Halted"),
('suspended', "Suspended"),
('Suspended', "Suspended"),
],
"ids": [
"poweredon",
"poweredon-cap",
"poweredon-dash",
"poweredon-under",
"poweredoff",
"poweredoff-cap",
"poweredoff-dash",
"poweredoff-under",
"suspended",
"suspended-cap",
],
}
testcase_set_vm_power_state_transitions = {
"params": [
('poweredon', 'Halted', 'running', 'VM.start'),
('Poweredon', 'Halted', 'running', 'VM.start'),
('powered-on', 'Halted', 'running', 'VM.start'),
('Powered_on', 'Halted', 'running', 'VM.start'),
('poweredon', 'Suspended', 'running', 'VM.resume'),
('Poweredon', 'Suspended', 'running', 'VM.resume'),
('powered-on', 'Suspended', 'running', 'VM.resume'),
('Powered_on', 'Suspended', 'running', 'VM.resume'),
('poweredon', 'Paused', 'running', 'VM.unpause'),
('Poweredon', 'Paused', 'running', 'VM.unpause'),
('powered-on', 'Paused', 'running', 'VM.unpause'),
('Powered_on', 'Paused', 'running', 'VM.unpause'),
('poweredoff', 'Running', 'halted', 'VM.hard_shutdown'),
('Poweredoff', 'Running', 'halted', 'VM.hard_shutdown'),
('powered-off', 'Running', 'halted', 'VM.hard_shutdown'),
('powered_off', 'Running', 'halted', 'VM.hard_shutdown'),
('poweredoff', 'Suspended', 'halted', 'VM.hard_shutdown'),
('Poweredoff', 'Suspended', 'halted', 'VM.hard_shutdown'),
('powered-off', 'Suspended', 'halted', 'VM.hard_shutdown'),
('powered_off', 'Suspended', 'halted', 'VM.hard_shutdown'),
('poweredoff', 'Paused', 'halted', 'VM.hard_shutdown'),
('Poweredoff', 'Paused', 'halted', 'VM.hard_shutdown'),
('powered-off', 'Paused', 'halted', 'VM.hard_shutdown'),
('powered_off', 'Paused', 'halted', 'VM.hard_shutdown'),
('restarted', 'Running', 'running', 'VM.hard_reboot'),
('Restarted', 'Running', 'running', 'VM.hard_reboot'),
('restarted', 'Paused', 'running', 'VM.hard_reboot'),
('Restarted', 'Paused', 'running', 'VM.hard_reboot'),
('suspended', 'Running', 'suspended', 'VM.suspend'),
('Suspended', 'Running', 'suspended', 'VM.suspend'),
('shutdownguest', 'Running', 'halted', 'VM.clean_shutdown'),
('Shutdownguest', 'Running', 'halted', 'VM.clean_shutdown'),
('shutdown-guest', 'Running', 'halted', 'VM.clean_shutdown'),
('shutdown_guest', 'Running', 'halted', 'VM.clean_shutdown'),
('rebootguest', 'Running', 'running', 'VM.clean_reboot'),
('rebootguest', 'Running', 'running', 'VM.clean_reboot'),
('reboot-guest', 'Running', 'running', 'VM.clean_reboot'),
('reboot_guest', 'Running', 'running', 'VM.clean_reboot'),
],
"ids": [
"poweredoff->poweredon",
"poweredoff->poweredon-cap",
"poweredoff->poweredon-dash",
"poweredoff->poweredon-under",
"suspended->poweredon",
"suspended->poweredon-cap",
"suspended->poweredon-dash",
"suspended->poweredon-under",
"paused->poweredon",
"paused->poweredon-cap",
"paused->poweredon-dash",
"paused->poweredon-under",
"poweredon->poweredoff",
"poweredon->poweredoff-cap",
"poweredon->poweredoff-dash",
"poweredon->poweredoff-under",
"suspended->poweredoff",
"suspended->poweredoff-cap",
"suspended->poweredoff-dash",
"suspended->poweredoff-under",
"paused->poweredoff",
"paused->poweredoff-cap",
"paused->poweredoff-dash",
"paused->poweredoff-under",
"poweredon->restarted",
"poweredon->restarted-cap",
"paused->restarted",
"paused->restarted-cap",
"poweredon->suspended",
"poweredon->suspended-cap",
"poweredon->shutdownguest",
"poweredon->shutdownguest-cap",
"poweredon->shutdownguest-dash",
"poweredon->shutdownguest-under",
"poweredon->rebootguest",
"poweredon->rebootguest-cap",
"poweredon->rebootguest-dash",
"poweredon->rebootguest-under",
],
}
testcase_set_vm_power_state_transitions_async = {
"params": [
('shutdownguest', 'Running', 'halted', 'Async.VM.clean_shutdown'),
('Shutdownguest', 'Running', 'halted', 'Async.VM.clean_shutdown'),
('shutdown-guest', 'Running', 'halted', 'Async.VM.clean_shutdown'),
('shutdown_guest', 'Running', 'halted', 'Async.VM.clean_shutdown'),
('rebootguest', 'Running', 'running', 'Async.VM.clean_reboot'),
('rebootguest', 'Running', 'running', 'Async.VM.clean_reboot'),
('reboot-guest', 'Running', 'running', 'Async.VM.clean_reboot'),
('reboot_guest', 'Running', 'running', 'Async.VM.clean_reboot'),
],
"ids": [
"poweredon->shutdownguest",
"poweredon->shutdownguest-cap",
"poweredon->shutdownguest-dash",
"poweredon->shutdownguest-under",
"poweredon->rebootguest",
"poweredon->rebootguest-cap",
"poweredon->rebootguest-dash",
"poweredon->rebootguest-under",
],
}
@pytest.mark.parametrize('vm_ref', testcase_bad_xenapi_refs['params'], ids=testcase_bad_xenapi_refs['ids'])
def test_set_vm_power_state_bad_vm_ref(fake_ansible_module, xenserver, vm_ref):
"""Tests failure on bad vm_ref."""
with pytest.raises(FailJsonException) as exc_info:
xenserver.set_vm_power_state(fake_ansible_module, vm_ref, None)
assert exc_info.value.kwargs['msg'] == "Cannot set VM power state. Invalid VM reference supplied!"
def test_set_vm_power_state_xenapi_failure(mock_xenapi_failure, fake_ansible_module, xenserver):
"""Tests catching of XenAPI failures."""
with pytest.raises(FailJsonException) as exc_info:
xenserver.set_vm_power_state(fake_ansible_module, fake_xenapi_ref('VM'), "poweredon")
assert exc_info.value.kwargs['msg'] == "XAPI ERROR: %s" % mock_xenapi_failure[1]
def test_set_vm_power_state_bad_power_state(mocker, fake_ansible_module, XenAPI, xenserver):
"""Tests failure on unsupported power state."""
mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True)
mocked_returns = {
"VM.get_power_state.return_value": "Running",
}
mocked_xenapi.configure_mock(**mocked_returns)
with pytest.raises(FailJsonException) as exc_info:
xenserver.set_vm_power_state(fake_ansible_module, fake_xenapi_ref('VM'), "bad")
# Beside VM.get_power_state() no other method should have been
# called additionally.
assert len(mocked_xenapi.method_calls) == 1
assert exc_info.value.kwargs['msg'] == "Requested VM power state 'bad' is unsupported!"
@pytest.mark.parametrize('power_state_desired, power_state_current, error_msg',
testcase_set_vm_power_state_bad_transitions['params'],
ids=testcase_set_vm_power_state_bad_transitions['ids'])
def test_set_vm_power_state_bad_transition(mocker, fake_ansible_module, XenAPI, xenserver, power_state_desired, power_state_current, error_msg):
"""Tests failure on bad power state transition."""
mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True)
mocked_returns = {
"VM.get_power_state.return_value": power_state_current,
}
mocked_xenapi.configure_mock(**mocked_returns)
with pytest.raises(FailJsonException) as exc_info:
xenserver.set_vm_power_state(fake_ansible_module, fake_xenapi_ref('VM'), power_state_desired)
# Beside VM.get_power_state() no other method should have been
# called additionally.
assert len(mocked_xenapi.method_calls) == 1
assert exc_info.value.kwargs['msg'] == error_msg
@pytest.mark.parametrize('power_state, error_msg',
testcase_set_vm_power_state_task_timeout['params'],
ids=testcase_set_vm_power_state_task_timeout['ids'])
def test_set_vm_power_state_task_timeout(mocker, fake_ansible_module, XenAPI, xenserver, power_state, error_msg):
"""Tests failure on async task timeout."""
mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True)
mocked_returns = {
"VM.get_power_state.return_value": "Running",
"Async.VM.clean_shutdown.return_value": fake_xenapi_ref('task'),
"Async.VM.clean_reboot.return_value": fake_xenapi_ref('task'),
}
mocked_xenapi.configure_mock(**mocked_returns)
mocker.patch('ansible.module_utils.xenserver.wait_for_task', return_value="timeout")
with pytest.raises(FailJsonException) as exc_info:
xenserver.set_vm_power_state(fake_ansible_module, fake_xenapi_ref('VM'), power_state, timeout=1)
# Beside VM.get_power_state() only one of Async.VM.clean_shutdown or
# Async.VM.clean_reboot should have been called additionally.
assert len(mocked_xenapi.method_calls) == 2
assert exc_info.value.kwargs['msg'] == error_msg
@pytest.mark.parametrize('power_state_desired, power_state_current',
testcase_set_vm_power_state_no_transitions['params'],
ids=testcase_set_vm_power_state_no_transitions['ids'])
def test_set_vm_power_state_no_transition(mocker, fake_ansible_module, XenAPI, xenserver, power_state_desired, power_state_current):
"""Tests regular invocation without power state transition."""
mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True)
mocked_returns = {
"VM.get_power_state.return_value": power_state_current,
}
mocked_xenapi.configure_mock(**mocked_returns)
result = xenserver.set_vm_power_state(fake_ansible_module, fake_xenapi_ref('VM'), power_state_desired)
# Beside VM.get_power_state() no other method should have been
# called additionally.
assert len(mocked_xenapi.method_calls) == 1
assert result[0] is False
assert result[1] == power_state_current.lower()
@pytest.mark.parametrize('power_state_desired, power_state_current, power_state_resulting, activated_xenapi_method',
testcase_set_vm_power_state_transitions['params'],
ids=testcase_set_vm_power_state_transitions['ids'])
def test_set_vm_power_state_transition(mocker,
fake_ansible_module,
XenAPI,
xenserver,
power_state_desired,
power_state_current,
power_state_resulting,
activated_xenapi_method):
"""Tests regular invocation with power state transition."""
mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True)
mocked_returns = {
"VM.get_power_state.return_value": power_state_current,
}
mocked_xenapi.configure_mock(**mocked_returns)
result = xenserver.set_vm_power_state(fake_ansible_module, fake_xenapi_ref('VM'), power_state_desired, timeout=0)
mocked_xenapi_method = mocked_xenapi
for activated_xenapi_class in activated_xenapi_method.split('.'):
mocked_xenapi_method = getattr(mocked_xenapi_method, activated_xenapi_class)
mocked_xenapi_method.assert_called_once()
# Beside VM.get_power_state() only activated_xenapi_method should have
# been called additionally.
assert len(mocked_xenapi.method_calls) == 2
assert result[0] is True
assert result[1] == power_state_resulting
@pytest.mark.parametrize('power_state_desired, power_state_current, power_state_resulting, activated_xenapi_method',
testcase_set_vm_power_state_transitions_async['params'],
ids=testcase_set_vm_power_state_transitions_async['ids'])
def test_set_vm_power_state_transition_async(mocker,
fake_ansible_module,
XenAPI,
xenserver,
power_state_desired,
power_state_current,
power_state_resulting,
activated_xenapi_method):
"""
Tests regular invocation with async power state transition
(shutdownguest and rebootguest only).
"""
mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True)
mocked_returns = {
"VM.get_power_state.return_value": power_state_current,
"%s.return_value" % activated_xenapi_method: fake_xenapi_ref('task'),
}
mocked_xenapi.configure_mock(**mocked_returns)
mocker.patch('ansible.module_utils.xenserver.wait_for_task', return_value="")
result = xenserver.set_vm_power_state(fake_ansible_module, fake_xenapi_ref('VM'), power_state_desired, timeout=1)
mocked_xenapi_method = mocked_xenapi
for activated_xenapi_class in activated_xenapi_method.split('.'):
mocked_xenapi_method = getattr(mocked_xenapi_method, activated_xenapi_class)
mocked_xenapi_method.assert_called_once()
# Beside VM.get_power_state() only activated_xenapi_method should have
# been called additionally.
assert len(mocked_xenapi.method_calls) == 2
assert result[0] is True
assert result[1] == power_state_resulting
@pytest.mark.parametrize('power_state_desired, power_state_current, power_state_resulting, activated_xenapi_method',
testcase_set_vm_power_state_transitions['params'],
ids=testcase_set_vm_power_state_transitions['ids'])
def test_set_vm_power_state_transition_check_mode(mocker,
fake_ansible_module,
XenAPI,
xenserver,
power_state_desired,
power_state_current,
power_state_resulting,
activated_xenapi_method):
"""Tests regular invocation with power state transition in check mode."""
mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True)
mocked_returns = {
"VM.get_power_state.return_value": power_state_current,
}
mocked_xenapi.configure_mock(**mocked_returns)
fake_ansible_module.check_mode = True
result = xenserver.set_vm_power_state(fake_ansible_module, fake_xenapi_ref('VM'), power_state_desired, timeout=0)
mocked_xenapi_method = mocked_xenapi
for activated_xenapi_class in activated_xenapi_method.split('.'):
mocked_xenapi_method = getattr(mocked_xenapi_method, activated_xenapi_class)
mocked_xenapi_method.assert_not_called()
# Beside VM.get_power_state() no other method should have been
# called additionally.
assert len(mocked_xenapi.method_calls) == 1
assert result[0] is True
assert result[1] == power_state_resulting
| gpl-3.0 |
ikasumiwt/zulip | zilencer/error_notify.py | 120 | 3370 | from collections import defaultdict
import logging
from django.conf import settings
from django.core.mail import mail_admins
from zerver.lib.actions import internal_send_message
def format_subject(subject):
"""
Escape CR and LF characters.
"""
return subject.replace('\n', '\\n').replace('\r', '\\r')
def user_info_str(report):
if report['user_full_name'] and report['user_email']:
user_info = "%(user_full_name)s (%(user_email)s)" % (report)
else:
user_info = "Anonymous user (not logged in)"
user_info += " on %s deployment" % (report['deployment'],)
return user_info
def notify_browser_error(report):
report = defaultdict(lambda: None, report)
if settings.ERROR_BOT:
zulip_browser_error(report)
email_browser_error(report)
def email_browser_error(report):
subject = "Browser error for %s" % (user_info_str(report))
body = ("User: %(user_full_name)s <%(user_email)s> on %(deployment)s\n\n"
"Message:\n%(message)s\n\nStacktrace:\n%(stacktrace)s\n\n"
"User agent: %(user_agent)s\n"
"href: %(href)s\n"
"Server path: %(server_path)s\n"
"Deployed version: %(version)s\n"
% report)
more_info = report['more_info']
if more_info is not None:
body += "\nAdditional information:"
for (key, value) in more_info.iteritems():
body += "\n %s: %s" % (key, value)
body += "\n\nLog:\n%s" % (report['log'],)
mail_admins(subject, body)
def zulip_browser_error(report):
subject = "JS error: %s" % (report['user_email'],)
user_info = user_info_str(report)
body = "User: %s\n" % (user_info,)
body += ("Message: %(message)s\n"
% report )
internal_send_message(settings.ERROR_BOT,
"stream", "errors", format_subject(subject), body)
def notify_server_error(report):
report = defaultdict(lambda: None, report)
email_server_error(report)
if settings.ERROR_BOT:
zulip_server_error(report)
def zulip_server_error(report):
subject = '%(node)s: %(message)s' % report
stack_trace = report['stack_trace'] or "No stack trace available"
user_info = user_info_str(report)
request_repr = (
"Request info:\n~~~~\n"
"- path: %(path)s\n"
"- %(method)s: %(data)s\n") % (report)
for field in ["REMOTE_ADDR", "QUERY_STRING", "SERVER_NAME"]:
request_repr += "- %s: \"%s\"\n" % (field, report.get(field.lower()))
request_repr += "~~~~"
internal_send_message(settings.ERROR_BOT,
"stream", "errors", format_subject(subject),
"Error generated by %s\n\n~~~~ pytb\n%s\n\n~~~~\n%s" % (
user_info, stack_trace, request_repr))
def email_server_error(report):
subject = '%(node)s: %(message)s' % (report)
user_info = user_info_str(report)
request_repr = (
"Request info:\n"
"- path: %(path)s\n"
"- %(method)s: %(data)s\n") % (report)
for field in ["REMOTE_ADDR", "QUERY_STRING", "SERVER_NAME"]:
request_repr += "- %s: \"%s\"\n" % (field, report.get(field.lower()))
message = "Error generated by %s\n\n%s\n\n%s" % (user_info, report['stack_trace'],
request_repr)
mail_admins(format_subject(subject), message, fail_silently=True)
| apache-2.0 |
DeepThoughtTeam/tensorflow | tensorflow/python/framework/tensor_shape.py | 1 | 25203 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper classes for tensor shape inference."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.python.platform
from tensorflow.core.framework import tensor_shape_pb2
class Dimension(object):
"""Represents the value of one dimension in a TensorShape."""
def __init__(self, value):
"""Creates a new Dimension with the given value."""
if value is None:
self._value = None
else:
self._value = int(value)
if self._value < 0:
raise ValueError("Dimension %d must be >= 0" % self._value)
def __repr__(self):
return "Dimension(%s)" % repr(self._value)
def __eq__(self, other):
"""Returns true if `other` has the same known value as this Dimension."""
other = as_dimension(other)
if self._value is None or other.value is None:
return None
return self._value == other.value
def __ne__(self, other):
"""Returns true if `other` has a different known value from `self`."""
other = as_dimension(other)
if self._value is None or other.value is None:
return None
return self._value != other.value
def __int__(self):
return self._value
def __index__(self):
# Allow use in Python 3 range
return self._value
@property
def value(self):
"""The value of this dimension, or None if it is unknown."""
return self._value
def is_compatible_with(self, other):
"""Returns true if `other` is compatible with this Dimension.
Two known Dimensions are compatible if they have the same value.
An unknown Dimension is compatible with all other Dimensions.
Args:
other: Another Dimension.
Returns:
True if this Dimension and `other` are compatible.
"""
other = as_dimension(other)
return (self._value is None
or other.value is None
or self._value == other.value)
def assert_is_compatible_with(self, other):
"""Raises an exception if `other` is not compatible with this Dimension.
Args:
other: Another Dimension.
Raises:
ValueError: If `self` and `other` are not compatible (see
is_compatible_with).
"""
if not self.is_compatible_with(other):
raise ValueError("Dimensions %s and %s are not compatible"
% (self, other))
def merge_with(self, other):
"""Returns a Dimension that combines the information in `self` and `other`.
Dimensions are combined as follows:
Dimension(n) .merge_with(Dimension(n)) == Dimension(n)
Dimension(n) .merge_with(Dimension(None)) == Dimension(n)
Dimension(None).merge_with(Dimension(n)) == Dimension(n)
Dimension(None).merge_with(Dimension(None)) == Dimension(None)
Dimension(n) .merge_with(Dimension(m)) raises ValueError for n != m
Args:
other: Another Dimension.
Returns:
A Dimension containing the combined information of `self` and
`other`.
Raises:
ValueError: If `self` and `other` are not compatible (see
is_compatible_with).
"""
other = as_dimension(other)
self.assert_is_compatible_with(other)
if self._value is None:
return Dimension(other.value)
else:
return Dimension(self._value)
def __add__(self, other):
"""Returns the sum of `self` and `other`.
Dimensions are summed as follows:
Dimension(m) + Dimension(n) == Dimension(m + n)
Dimension(m) + Dimension(None) == Dimension(None)
Dimension(None) + Dimension(n) == Dimension(None)
Dimension(None) + Dimension(None) == Dimension(None)
Args:
other: Another Dimension.
Returns:
A Dimension whose value is the sum of `self` and `other`.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value + other.value)
def __sub__(self, other):
"""Returns the subtraction of `other` from `self`.
Dimensions are subtracted as follows:
Dimension(m) - Dimension(n) == Dimension(m - n)
Dimension(m) - Dimension(None) == Dimension(None)
Dimension(None) - Dimension(n) == Dimension(None)
Dimension(None) - Dimension(None) == Dimension(None)
Args:
other: Another Dimension.
Returns:
A Dimension whose value is the subtraction of sum of `other` from `self`.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value - other.value)
def __mul__(self, other):
"""Returns the product of `self` and `other`.
Dimensions are summed as follows:
Dimension(m) * Dimension(n) == Dimension(m * n)
Dimension(m) * Dimension(None) == Dimension(None)
Dimension(None) * Dimension(n) == Dimension(None)
Dimension(None) * Dimension(None) == Dimension(None)
Args:
other: Another Dimension.
Returns:
A Dimension whose value is the sum of `self` and `other`.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value * other.value)
def __floordiv__(self, other):
"""Returns the quotient of `self` and `other` rounded down.
Dimensions are divided as follows:
Dimension(m) // Dimension(n) == Dimension(m // n)
Dimension(m) // Dimension(None) == Dimension(None)
Dimension(None) // Dimension(n) == Dimension(None)
Dimension(None) // Dimension(None) == Dimension(None)
Args:
other: Another `Dimension`.
Returns:
A `Dimension` whose value is the integer quotient of `self` and `other`.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value // other.value)
def __div__(self, other):
"""DEPRECATED: Use `__floordiv__` via `x // y` instead.
This function exists only for backwards compatibility purposes; new code
should use `__floordiv__` via the syntax `x // y`. Using `x // y`
communicates clearly that the result rounds down, and is forward compatible
to Python 3.
Args:
other: Another `Dimension`.
Returns:
A `Dimension` whose value is the integer quotient of `self` and `other`.
"""
return self // other
def __mod__(self, other):
"""Returns `self` modulo `other.
Dimension moduli are computed as follows:
Dimension(m) % Dimension(n) == Dimension(m % n)
Dimension(m) % Dimension(None) == Dimension(None)
Dimension(None) % Dimension(n) == Dimension(None)
Dimension(None) % Dimension(None) == Dimension(None)
Args:
other: Another Dimension.
Returns:
A Dimension whose value is `self` modulo `other`.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value % other.value)
def __lt__(self, other):
"""Returns True if `self` is known to be less than `other`.
Dimensions are compared as follows:
Dimension(m) < Dimension(n) == m < n
Dimension(m) < Dimension(None) == None
Dimension(None) < Dimension(n) == None
Dimension(None) < Dimension(None) == None
Args:
other: Another Dimension.
Returns:
The value of `self.value < other.value` if both are known, otherwise
None.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return None
else:
return self._value < other.value
def __le__(self, other):
"""Returns True if `self` is known to be less than or equal to `other`.
Dimensions are compared as follows:
Dimension(m) <= Dimension(n) == m <= n
Dimension(m) <= Dimension(None) == None
Dimension(None) <= Dimension(n) == None
Dimension(None) <= Dimension(None) == None
Args:
other: Another Dimension.
Returns:
The value of `self.value <= other.value` if both are known, otherwise
None.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return None
else:
return self._value <= other.value
def __gt__(self, other):
"""Returns True if `self` is known to be greater than `other`.
Dimensions are compared as follows:
Dimension(m) > Dimension(n) == m > n
Dimension(m) > Dimension(None) == None
Dimension(None) > Dimension(n) == None
Dimension(None) > Dimension(None) == None
Args:
other: Another Dimension.
Returns:
The value of `self.value > other.value` if both are known, otherwise
None.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return None
else:
return self._value > other.value
def __ge__(self, other):
"""Returns True if `self` is known to be greater than or equal to `other`.
Dimensions are compared as follows:
Dimension(m) >= Dimension(n) == m >= n
Dimension(m) >= Dimension(None) == None
Dimension(None) >= Dimension(n) == None
Dimension(None) >= Dimension(None) == None
Args:
other: Another Dimension.
Returns:
The value of `self.value >= other.value` if both are known, otherwise
None.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return None
else:
return self._value >= other.value
def as_dimension(value):
"""Converts the given value to a Dimension.
A Dimenson input will be returned unmodified.
An input of `None` will be converted to an unknown Dimension.
An integer input will be converted to a Dimension with that value.
Args:
value: The value to be converted.
Returns:
A Dimension corresponding to the given value.
"""
if isinstance(value, Dimension):
return value
else:
return Dimension(value)
class TensorShape(object):
"""Represents the shape of a `Tensor`.
A `TensorShape` represents a possibly-partial shape specification for a
`Tensor`. It may be one of the following:
* *Fully-known shape:* has a known number of dimensions and a known size
for each dimension.
* *Partially-known shape:* has a known number of dimensions, and an unknown
size for one or more dimension.
* *Unknown shape:* has an unknown number of dimensions, and an unknown
size in all dimensions.
If a tensor is produced by an operation of type `"Foo"`, its shape
may be inferred if there is a registered shape function for
`"Foo"`. See [`tf.RegisterShape()`](../../api_docs/python/framework.md#RegisterShape)
for details of shape
functions and how to register them. Alternatively, the shape may be set
explicitly using [`Tensor.set_shape()`](../../api_docs/python/framework.md#Tensor.set_shape).
@@merge_with
@@concatenate
@@ndims
@@dims
@@as_list
@@is_compatible_with
@@is_fully_defined
@@with_rank
@@with_rank_at_least
@@with_rank_at_most
@@assert_has_rank
@@assert_same_rank
@@assert_is_compatible_with
@@assert_is_fully_defined
"""
def __init__(self, dims):
"""Creates a new TensorShape with the given dimensions.
Args:
dims: A list of Dimensions, or None if the shape is unspecified.
DEPRECATED: A single integer is treated as a singleton list.
"""
# TODO(irving): Eliminate the single integer special case.
if dims is None:
self._dims = None
elif isinstance(dims, tensor_shape_pb2.TensorShapeProto):
self._dims = [
# Protos store variable-size dimensions as -1
as_dimension(dim.size if dim.size != -1 else None)
for dim in dims.dim]
else:
try:
dims_iter = iter(dims)
except TypeError:
# Treat as a singleton dimension
self._dims = [as_dimension(dims)]
else:
# Got a list of dimensions
self._dims = [as_dimension(d) for d in dims_iter]
def __repr__(self):
return "TensorShape(%s)" % self._dims
def __str__(self):
if self.ndims is None:
return "<unknown>"
elif self.ndims == 1:
length = self._dims[0].value
return "(%s,)" % (str(length) if length is not None else "?")
else:
return "(%s)" % ", ".join(str(d.value) if d.value is not None else "?"
for d in self._dims)
@property
def dims(self):
"""Returns a list of Dimensions, or None if the shape is unspecified."""
return self._dims
@property
def ndims(self):
"""Returns the rank of this shape, or None if it is unspecified."""
if self._dims is None:
return None
else:
return len(self._dims)
def __len__(self):
"""Returns the rank of this shape, or raises ValueError if unspecified."""
if self._dims is None:
raise ValueError("Cannot take the length of Shape with unknown rank.")
return len(self._dims)
def __bool__(self):
"""Returns True if this shape contains non-zero information."""
return self._dims is not None
# Python 3 wants __bool__, Python 2.7 wants __nonzero__
__nonzero__ = __bool__
def __getitem__(self, key):
"""Returns the value of a dimension or a shape, depending on the key.
Args:
key: If `key` is an integer, returns the dimension at that index;
otherwise if `key` is a slice, returns a TensorShape whose
dimensions are those selected by the slice from `self`.
Returns:
A dimension if `key` is an integer, or a `TensorShape` if `key` is a
slice.
Raises:
ValueError: If `key` is a slice, and any of its elements are negative, or
if `self` is completely unknown and the step is set.
"""
if self._dims is not None:
if isinstance(key, slice):
return TensorShape(self._dims[key])
else:
return self._dims[key]
else:
if isinstance(key, slice):
start = key.start if key.start is not None else 0
stop = key.stop
if key.step is not None:
# TODO(mrry): Handle these maybe.
raise ValueError("Steps are not yet handled")
if stop is None:
# NOTE(mrry): This implies that TensorShape(None) is compatible with
# TensorShape(None)[1:], which is obviously not true. It would be
# possible to track the number of dimensions symbolically,
# and perhaps we should do that.
return unknown_shape()
elif start < 0 or stop < 0:
# TODO(mrry): Handle this better, as it will be useful for handling
# suffixes of otherwise unknown shapes.
return unknown_shape()
else:
return unknown_shape(ndims=stop-start)
else:
return Dimension(None)
def num_elements(self):
"""Returns the total number of elements, or none for incomplete shapes."""
if self.is_fully_defined():
size = 1
for dim in self._dims:
size *= dim.value
return size
else:
return None
def merge_with(self, other):
"""Returns a `TensorShape` combining the information in `self` and `other`.
The dimensions in `self` and `other` are merged elementwise,
according to the rules defined for `Dimension.merge_with()`.
Args:
other: Another `TensorShape`.
Returns:
A `TensorShape` containing the combined information of `self` and
`other`.
Raises:
ValueError: If `self` and `other` are not compatible.
"""
other = as_shape(other)
if self._dims is None:
return other
else:
self.assert_same_rank(other)
new_dims = []
for i, dim in enumerate(self._dims):
new_dims.append(dim.merge_with(other[i]))
return TensorShape(new_dims)
def concatenate(self, other):
"""Returns the concatenation of the dimension in `self` and `other`.
*N.B.* If either `self` or `other` is completely unknown,
concatenation will discard information about the other shape. In
future, we might support concatenation that preserves this
information for use with slicing.
Args:
other: Another `TensorShape`.
Returns:
A `TensorShape` whose dimensions are the concatenation of the
dimensions in `self` and `other`.
"""
# TODO(mrry): Handle the case where we concatenate a known shape with a
# completely unknown shape, so that we can use the partial information.
other = as_shape(other)
if self._dims is None or other.dims is None:
return unknown_shape()
else:
return TensorShape(self._dims + other.dims)
def assert_same_rank(self, other):
"""Raises an exception if `self` and `other` do not have compatible ranks.
Args:
other: Another `TensorShape`.
Raises:
ValueError: If `self` and `other` do not represent shapes with the
same rank.
"""
other = as_shape(other)
if self.ndims is not None and other.ndims is not None:
if self.ndims != other.ndims:
raise ValueError(
"Shapes %s and %s must have the same rank" % (self, other))
def assert_has_rank(self, rank):
"""Raises an exception if `self` is not compatible with the given `rank`.
Args:
rank: An integer.
Raises:
ValueError: If `self` does not represent a shape with the given `rank`.
"""
if self.ndims not in (None, rank):
raise ValueError("Shape %s must have rank %d" % (self, rank))
def with_rank(self, rank):
"""Returns a shape based on `self` with the given rank.
This method promotes a completely unknown shape to one with a
known rank.
Args:
rank: An integer.
Returns:
A shape that is at least as specific as `self` with the given rank.
Raises:
ValueError: If `self` does not represent a shape with the given `rank`.
"""
try:
return self.merge_with(unknown_shape(ndims=rank))
except ValueError:
raise ValueError("Shape %s must have rank %d" % (self, rank))
def with_rank_at_least(self, rank):
"""Returns a shape based on `self` with at least the given rank.
Args:
rank: An integer.
Returns:
A shape that is at least as specific as `self` with at least the given
rank.
Raises:
ValueError: If `self` does not represent a shape with at least the given
`rank`.
"""
if self.ndims is not None and self.ndims < rank:
raise ValueError("Shape %s must have rank at least %d" % (self, rank))
else:
return self
def with_rank_at_most(self, rank):
"""Returns a shape based on `self` with at most the given rank.
Args:
rank: An integer.
Returns:
A shape that is at least as specific as `self` with at most the given
rank.
Raises:
ValueError: If `self` does not represent a shape with at most the given
`rank`.
"""
if self.ndims is not None and self.ndims > rank:
raise ValueError("Shape %s must have rank at most %d" % (self, rank))
else:
return self
def is_compatible_with(self, other):
"""Returns True iff `self` is compatible with `other`.
Two possibly-partially-defined shapes are compatible if there
exists a fully-defined shape that both shapes can represent. Thus,
compatibility allows the shape inference code to reason about
partially-defined shapes. For example:
* TensorShape(None) is compatible with all shapes.
* TensorShape([None, None]) is compatible with all two-dimensional
shapes, such as TensorShape([32, 784]), and also TensorShape(None). It is
not compatible with, for example, TensorShape([None]) or
TensorShape([None, None, None]).
* TensorShape([32, None]) is compatible with all two-dimensional shapes
with size 32 in the 0th dimension, and also TensorShape([None, None])
and TensorShape(None). It is not compatible with, for example,
TensorShape([32]), TensorShape([32, None, 1]) or TensorShape([64, None]).
* TensorShape([32, 784]) is compatible with itself, and also
TensorShape([32, None]), TensorShape([None, 784]), TensorShape([None,
None]) and TensorShape(None). It is not compatible with, for example,
TensorShape([32, 1, 784]) or TensorShape([None]).
The compatibility relation is reflexive and symmetric, but not
transitive. For example, TensorShape([32, 784]) is compatible with
TensorShape(None), and TensorShape(None) is compatible with
TensorShape([4, 4]), but TensorShape([32, 784]) is not compatible with
TensorShape([4, 4]).
Args:
other: Another TensorShape.
Returns:
True iff `self` is compatible with `other`.
"""
other = as_shape(other)
if self._dims is not None and other.dims is not None:
if self.ndims != other.ndims:
return False
for x_dim, y_dim in zip(self._dims, other.dims):
if not x_dim.is_compatible_with(y_dim):
return False
return True
def assert_is_compatible_with(self, other):
"""Raises exception if `self` and `other` do not represent the same shape.
This method can be used to assert that there exists a shape that both
`self` and `other` represent.
Args:
other: Another TensorShape.
Raises:
ValueError: If `self` and `other` do not represent the same shape.
"""
if not self.is_compatible_with(other):
raise ValueError("Shapes %s and %s are incompatible" % (self, other))
def is_fully_defined(self):
"""Returns True iff `self` is fully defined in every dimension."""
return (self._dims is not None
and all(dim.value is not None for dim in self._dims))
def assert_is_fully_defined(self):
"""Raises an exception if `self` is not fully defined in every dimension.
Raises:
ValueError: If `self` does not have a known value for every dimension.
"""
if not self.is_fully_defined():
raise ValueError("Shape %s is not fully defined" % self)
def as_dimension_list(self):
"""DEPRECATED: use `as_list()`."""
if self.dims is None:
raise ValueError("Shape %s does not have a rank" % self)
return self.as_list(to_proto=True)
def as_list(self, to_proto=False):
"""Returns a list of integers or None for each dimension.
If `to_proto` is True, returns -1 instead of None for unknown dimensions.
Args:
to_proto: boolean. Determines how unknown dimensions are treated.
Returns:
A list of integers or None for each dimension.
"""
return [dim.value if not (to_proto and dim.value is None) else -1
for dim in self._dims]
def __eq__(self, other):
"""Returns True if `self` is equivalent to `other`."""
other = as_shape(other)
return self._dims == other.dims
def __ne__(self, other):
"""Returns True if `self` is known to be different from `other`."""
other = as_shape(other)
if self.ndims is None or other.ndims is None:
raise ValueError("The inequality of unknown TensorShapes is undefined.")
if self.ndims != other.ndims:
return True
return self._dims != other.dims
def as_shape(shape):
"""Converts the given object to a TensorShape."""
if isinstance(shape, TensorShape):
return shape
else:
return TensorShape(shape)
def unknown_shape(ndims=None):
"""Returns an unknown TensorShape, optionally with a known rank.
Args:
ndims: (Optional) If specified, the number of dimensions in the shape.
Returns:
An unknown TensorShape.
"""
if ndims is None:
return TensorShape(None)
else:
return TensorShape([Dimension(None)] * ndims)
def scalar():
"""Returns a shape representing a scalar."""
return TensorShape([])
def vector(length):
"""Returns a shape representing a vector.
Args:
length: The length of the vector, which may be None if unknown.
Returns:
A TensorShape representing a vector of the given length.
"""
return TensorShape([length])
def matrix(rows, cols):
"""Returns a shape representing a matrix.
Args:
rows: The number of rows in the matrix, which may be None if unknown.
cols: The number of columns in the matrix, which may be None if unknown.
Returns:
A TensorShape representing a matrix of the given size.
"""
return TensorShape([rows, cols])
| apache-2.0 |
Ant-OS/android_packages_apps_OTAUpdates | jni/boost_1_57_0/tools/build/test/preprocessor.py | 58 | 1070 | #!/usr/bin/python
# Copyright 2003 Vladimir Prus
# Copyright 2011 Steven Watanabe
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
# Test the C/C++ preprocessor.
import BoostBuild
t = BoostBuild.Tester()
t.write("jamroot.jam", """
project ;
preprocessed hello : hello.cpp ;
preprocessed a : a.c ;
exe hello.exe : hello a : <define>FAIL ;
""")
t.write("hello.cpp", """
#ifndef __cplusplus
#error "This file must be compiled as C++"
#endif
#ifdef FAIL
#error "Not preprocessed?"
#endif
extern "C" int foo();
int main() { return foo(); }
""")
t.write("a.c", """
/* This will not compile unless in C mode. */
#ifdef __cplusplus
#error "This file must be compiled as C"
#endif
#ifdef FAIL
#error "Not preprocessed?"
#endif
int foo()
{
int new = 0;
new = (new+1)*7;
return new;
}
""")
t.run_build_system()
t.expect_addition("bin/$toolset/debug/hello.ii")
t.expect_addition("bin/$toolset/debug/a.i")
t.expect_addition("bin/$toolset/debug/hello.exe")
t.cleanup()
| apache-2.0 |
ioanpocol/superdesk-core | superdesk/factory/sentry.py | 3 | 1067 |
import logging
from raven.contrib.flask import Sentry
from raven.contrib.celery import register_signal, register_logger_signal
SENTRY_DSN = 'SENTRY_DSN'
class SuperdeskSentry():
"""Sentry proxy that will do nothing in case sentry is not configured."""
def __init__(self, app):
if app.config.get(SENTRY_DSN):
if 'verify_ssl' not in app.config[SENTRY_DSN]:
app.config[SENTRY_DSN] += '?verify_ssl=0'
app.config.setdefault('SENTRY_NAME', app.config.get('SERVER_DOMAIN'))
self.sentry = Sentry(app, register_signal=False, wrap_wsgi=False, logging=True, level=logging.WARNING)
register_logger_signal(self.sentry.client)
register_signal(self.sentry.client)
else:
self.sentry = None
def captureException(self, exc_info=None, **kwargs):
if self.sentry:
self.sentry.captureException(exc_info, **kwargs)
def captureMessage(self, message, **kwargs):
if self.sentry:
self.sentry.captureMessage(message, **kwargs)
| agpl-3.0 |
sbbic/core | wizards/com/sun/star/wizards/ui/event/RadioDataAware.py | 13 | 1747 | #
# This file is part of the LibreOffice project.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# This file incorporates work covered by the following license notice:
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed
# with this work for additional information regarding copyright
# ownership. The ASF licenses this file to you under the Apache
# License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0 .
#
from .CommonListener import ItemListenerProcAdapter
from .DataAware import DataAware
class RadioDataAware(DataAware):
def __init__(self, data, value, radioButtons):
super(RadioDataAware,self).__init__(data, value)
self.radioButtons = radioButtons
def setToUI(self, value):
selected = int(value)
if selected == -1:
for i in self.radioButtons:
i.State = False
else:
self.radioButtons[selected].State = True
def getFromUI(self):
for index, workwith in enumerate(self.radioButtons):
if workwith.State:
return index
return -1
@classmethod
def attachRadioButtons(self, data, prop, buttons, field):
da = RadioDataAware(data, prop, buttons)
method = getattr(da,"updateData")
for i in da.radioButtons:
i.addItemListener(ItemListenerProcAdapter(method))
return da
| gpl-3.0 |
Lektorium-LLC/edx-platform | cms/djangoapps/contentstore/features/course-updates.py | 11 | 2701 | # pylint: disable=missing-docstring
from lettuce import step, world
from nose.tools import assert_in
from cms.djangoapps.contentstore.features.common import get_codemirror_value, type_in_codemirror
@step(u'I go to the course updates page')
def go_to_updates(_step):
menu_css = 'li.nav-course-courseware'
updates_css = 'li.nav-course-courseware-updates a'
world.css_click(menu_css)
world.css_click(updates_css)
world.wait_for_visible('#course-handouts-view')
@step(u'I change the handout from "([^"]*)" to "([^"]*)"$')
def change_existing_handout(_step, before, after):
verify_text_in_editor_and_update('div.course-handouts .edit-button', before, after)
@step(u'I modify the handout to "([^"]*)"$')
def edit_handouts(_step, text):
edit_css = 'div.course-handouts > .edit-button'
world.css_click(edit_css)
change_text(text)
@step(u'I see the handout "([^"]*)"$')
def check_handout(_step, handout):
handout_css = 'div.handouts-content'
assert_in(handout, world.css_html(handout_css))
@step(u'I see the handout image link "([^"]*)"$')
def check_handout_image_link(_step, image_file):
handout_css = 'div.handouts-content'
handout_html = world.css_html(handout_css)
asset_key = world.scenario_dict['COURSE'].id.make_asset_key(asset_type='asset', path=image_file)
assert_in(unicode(asset_key), handout_html)
@step(u'I see the handout error text')
def check_handout_error(_step):
handout_error_css = 'div#handout_error'
assert world.css_has_class(handout_error_css, 'is-shown')
@step(u'I see handout save button disabled')
def check_handout_error(_step):
handout_save_button = 'form.edit-handouts-form .save-button'
assert world.css_has_class(handout_save_button, 'is-disabled')
@step(u'I edit the handout to "([^"]*)"$')
def edit_handouts(_step, text):
type_in_codemirror(0, text)
@step(u'I see handout save button re-enabled')
def check_handout_error(_step):
handout_save_button = 'form.edit-handouts-form .save-button'
assert not world.css_has_class(handout_save_button, 'is-disabled')
@step(u'I save handout edit')
def check_handout_error(_step):
save_css = '.save-button'
world.css_click(save_css)
def change_text(text):
type_in_codemirror(0, text)
save_css = '.save-button'
world.css_click(save_css)
def verify_text_in_editor_and_update(button_css, before, after):
world.css_click(button_css)
text = get_codemirror_value()
assert_in(before, text)
change_text(after)
@step('I see a "(saving|deleting)" notification')
def i_see_a_mini_notification(_step, _type):
saving_css = '.wrapper-notification-mini'
assert world.is_css_present(saving_css)
| agpl-3.0 |
ryfeus/lambda-packs | Selenium_PhantomJS/source/concurrent/futures/thread.py | 34 | 4658 | # Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Implements ThreadPoolExecutor."""
import atexit
from concurrent.futures import _base
import Queue as queue
import threading
import weakref
import sys
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
# Workers are created as daemon threads. This is done to allow the interpreter
# to exit when there are still idle threads in a ThreadPoolExecutor's thread
# pool (i.e. shutdown() was not called). However, allowing workers to die with
# the interpreter has two undesirable properties:
# - The workers would still be running during interpretor shutdown,
# meaning that they would fail in unpredictable ways.
# - The workers could be killed while evaluating a work item, which could
# be bad if the callable being evaluated has external side-effects e.g.
# writing to a file.
#
# To work around this problem, an exit handler is installed which tells the
# workers to exit when their work queues are empty and then waits until the
# threads finish.
_threads_queues = weakref.WeakKeyDictionary()
_shutdown = False
def _python_exit():
global _shutdown
_shutdown = True
items = list(_threads_queues.items()) if _threads_queues else ()
for t, q in items:
q.put(None)
for t, q in items:
t.join(sys.maxint)
atexit.register(_python_exit)
class _WorkItem(object):
def __init__(self, future, fn, args, kwargs):
self.future = future
self.fn = fn
self.args = args
self.kwargs = kwargs
def run(self):
if not self.future.set_running_or_notify_cancel():
return
try:
result = self.fn(*self.args, **self.kwargs)
except BaseException:
e, tb = sys.exc_info()[1:]
self.future.set_exception_info(e, tb)
else:
self.future.set_result(result)
def _worker(executor_reference, work_queue):
try:
while True:
work_item = work_queue.get(block=True)
if work_item is not None:
work_item.run()
# Delete references to object. See issue16284
del work_item
continue
executor = executor_reference()
# Exit if:
# - The interpreter is shutting down OR
# - The executor that owns the worker has been collected OR
# - The executor that owns the worker has been shutdown.
if _shutdown or executor is None or executor._shutdown:
# Notice other workers
work_queue.put(None)
return
del executor
except BaseException:
_base.LOGGER.critical('Exception in worker', exc_info=True)
class ThreadPoolExecutor(_base.Executor):
def __init__(self, max_workers):
"""Initializes a new ThreadPoolExecutor instance.
Args:
max_workers: The maximum number of threads that can be used to
execute the given calls.
"""
self._max_workers = max_workers
self._work_queue = queue.Queue()
self._threads = set()
self._shutdown = False
self._shutdown_lock = threading.Lock()
def submit(self, fn, *args, **kwargs):
with self._shutdown_lock:
if self._shutdown:
raise RuntimeError('cannot schedule new futures after shutdown')
f = _base.Future()
w = _WorkItem(f, fn, args, kwargs)
self._work_queue.put(w)
self._adjust_thread_count()
return f
submit.__doc__ = _base.Executor.submit.__doc__
def _adjust_thread_count(self):
# When the executor gets lost, the weakref callback will wake up
# the worker threads.
def weakref_cb(_, q=self._work_queue):
q.put(None)
# TODO(bquinlan): Should avoid creating new threads if there are more
# idle threads than items in the work queue.
if len(self._threads) < self._max_workers:
t = threading.Thread(target=_worker,
args=(weakref.ref(self, weakref_cb),
self._work_queue))
t.daemon = True
t.start()
self._threads.add(t)
_threads_queues[t] = self._work_queue
def shutdown(self, wait=True):
with self._shutdown_lock:
self._shutdown = True
self._work_queue.put(None)
if wait:
for t in self._threads:
t.join(sys.maxint)
shutdown.__doc__ = _base.Executor.shutdown.__doc__
| mit |
bryceliu/ansible-modules-core | cloud/openstack/os_security_group_rule.py | 72 | 10710 | #!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2013, Benno Joy <benno@ansible.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_security_group_rule
short_description: Add/Delete rule from an existing security group
extends_documentation_fragment: openstack
version_added: "2.0"
description:
- Add or Remove rule from an existing security group
options:
security_group:
description:
- Name of the security group
required: true
protocol:
description:
- IP protocol
choices: ['tcp', 'udp', 'icmp', None]
default: None
port_range_min:
description:
- Starting port
required: false
default: None
port_range_max:
description:
- Ending port
required: false
default: None
remote_ip_prefix:
description:
- Source IP address(es) in CIDR notation (exclusive with remote_group)
required: false
remote_group:
description:
- ID of Security group to link (exclusive with remote_ip_prefix)
required: false
ethertype:
description:
- Must be IPv4 or IPv6, and addresses represented in CIDR must
match the ingress or egress rules. Not all providers support IPv6.
choices: ['IPv4', 'IPv6']
default: IPv4
direction:
description:
- The direction in which the security group rule is applied. Not
all providers support egress.
choices: ['egress', 'ingress']
default: ingress
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
requirements: ["shade"]
'''
EXAMPLES = '''
# Create a security group rule
- os_security_group_rule:
cloud: mordred
security_group: foo
protocol: tcp
port_range_min: 80
port_range_max: 80
remote_ip_prefix: 0.0.0.0/0
# Create a security group rule for ping
- os_security_group_rule:
cloud: mordred
security_group: foo
protocol: icmp
remote_ip_prefix: 0.0.0.0/0
# Another way to create the ping rule
- os_security_group_rule:
cloud: mordred
security_group: foo
protocol: icmp
port_range_min: -1
port_range_max: -1
remote_ip_prefix: 0.0.0.0/0
# Create a TCP rule covering all ports
- os_security_group_rule:
cloud: mordred
security_group: foo
protocol: tcp
port_range_min: 1
port_range_max: 65535
remote_ip_prefix: 0.0.0.0/0
# Another way to create the TCP rule above (defaults to all ports)
- os_security_group_rule:
cloud: mordred
security_group: foo
protocol: tcp
remote_ip_prefix: 0.0.0.0/0
'''
RETURN = '''
id:
description: Unique rule UUID.
type: string
direction:
description: The direction in which the security group rule is applied.
type: string
sample: 'egress'
ethertype:
description: One of IPv4 or IPv6.
type: string
sample: 'IPv4'
port_range_min:
description: The minimum port number in the range that is matched by
the security group rule.
type: int
sample: 8000
port_range_max:
description: The maximum port number in the range that is matched by
the security group rule.
type: int
sample: 8000
protocol:
description: The protocol that is matched by the security group rule.
type: string
sample: 'tcp'
remote_ip_prefix:
description: The remote IP prefix to be associated with this security group rule.
type: string
sample: '0.0.0.0/0'
security_group_id:
description: The security group ID to associate with this security group rule.
type: string
'''
def _ports_match(protocol, module_min, module_max, rule_min, rule_max):
"""
Capture the complex port matching logic.
The port values coming in for the module might be -1 (for ICMP),
which will work only for Nova, but this is handled by shade. Likewise,
they might be None, which works for Neutron, but not Nova. This too is
handled by shade. Since shade will consistently return these port
values as None, we need to convert any -1 values input to the module
to None here for comparison.
For TCP and UDP protocols, None values for both min and max are
represented as the range 1-65535 for Nova, but remain None for
Neutron. Shade returns the full range when Nova is the backend (since
that is how Nova stores them), and None values for Neutron. If None
values are input to the module for both values, then we need to adjust
for comparison.
"""
# Check if the user is supplying -1 for ICMP.
if protocol == 'icmp':
if module_min and int(module_min) == -1:
module_min = None
if module_max and int(module_max) == -1:
module_max = None
# Check if user is supplying None values for full TCP/UDP port range.
if protocol in ['tcp', 'udp'] and module_min is None and module_max is None:
if (rule_min and int(rule_min) == 1
and rule_max and int(rule_max) == 65535):
# (None, None) == (1, 65535)
return True
# Sanity check to make sure we don't have type comparison issues.
if module_min:
module_min = int(module_min)
if module_max:
module_max = int(module_max)
if rule_min:
rule_min = int(rule_min)
if rule_max:
rule_max = int(rule_max)
return module_min == rule_min and module_max == rule_max
def _find_matching_rule(module, secgroup):
"""
Find a rule in the group that matches the module parameters.
:returns: The matching rule dict, or None if no matches.
"""
protocol = module.params['protocol']
remote_ip_prefix = module.params['remote_ip_prefix']
ethertype = module.params['ethertype']
direction = module.params['direction']
remote_group_id = module.params['remote_group']
for rule in secgroup['security_group_rules']:
if (protocol == rule['protocol']
and remote_ip_prefix == rule['remote_ip_prefix']
and ethertype == rule['ethertype']
and direction == rule['direction']
and remote_group_id == rule['remote_group_id']
and _ports_match(protocol,
module.params['port_range_min'],
module.params['port_range_max'],
rule['port_range_min'],
rule['port_range_max'])):
return rule
return None
def _system_state_change(module, secgroup):
state = module.params['state']
if secgroup:
rule_exists = _find_matching_rule(module, secgroup)
else:
return False
if state == 'present' and not rule_exists:
return True
if state == 'absent' and rule_exists:
return True
return False
def main():
argument_spec = openstack_full_argument_spec(
security_group = dict(required=True),
# NOTE(Shrews): None is an acceptable protocol value for
# Neutron, but Nova will balk at this.
protocol = dict(default=None,
choices=[None, 'tcp', 'udp', 'icmp']),
port_range_min = dict(required=False, type='int'),
port_range_max = dict(required=False, type='int'),
remote_ip_prefix = dict(required=False, default=None),
# TODO(mordred): Make remote_group handle name and id
remote_group = dict(required=False, default=None),
ethertype = dict(default='IPv4',
choices=['IPv4', 'IPv6']),
direction = dict(default='ingress',
choices=['egress', 'ingress']),
state = dict(default='present',
choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs(
mutually_exclusive=[
['remote_ip_prefix', 'remote_group'],
]
)
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
state = module.params['state']
security_group = module.params['security_group']
changed = False
try:
cloud = shade.openstack_cloud(**module.params)
secgroup = cloud.get_security_group(security_group)
if module.check_mode:
module.exit_json(changed=_system_state_change(module, secgroup))
if state == 'present':
if not secgroup:
module.fail_json(msg='Could not find security group %s' %
security_group)
rule = _find_matching_rule(module, secgroup)
if not rule:
rule = cloud.create_security_group_rule(
secgroup['id'],
port_range_min=module.params['port_range_min'],
port_range_max=module.params['port_range_max'],
protocol=module.params['protocol'],
remote_ip_prefix=module.params['remote_ip_prefix'],
remote_group_id=module.params['remote_group'],
direction=module.params['direction'],
ethertype=module.params['ethertype']
)
changed = True
module.exit_json(changed=changed, rule=rule, id=rule['id'])
if state == 'absent' and secgroup:
rule = _find_matching_rule(module, secgroup)
if rule:
cloud.delete_security_group_rule(rule['id'])
changed = True
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=e.message)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| gpl-3.0 |
jylaxp/django | tests/auth_tests/test_mixins.py | 274 | 8335 | from django.contrib.auth import models
from django.contrib.auth.mixins import (
LoginRequiredMixin, PermissionRequiredMixin, UserPassesTestMixin,
)
from django.contrib.auth.models import AnonymousUser
from django.core.exceptions import PermissionDenied
from django.http import HttpResponse
from django.test import RequestFactory, TestCase
from django.views.generic import View
class AlwaysTrueMixin(UserPassesTestMixin):
def test_func(self):
return True
class AlwaysFalseMixin(UserPassesTestMixin):
def test_func(self):
return False
class EmptyResponseView(View):
def get(self, request, *args, **kwargs):
return HttpResponse()
class AlwaysTrueView(AlwaysTrueMixin, EmptyResponseView):
pass
class AlwaysFalseView(AlwaysFalseMixin, EmptyResponseView):
pass
class StackedMixinsView1(LoginRequiredMixin, PermissionRequiredMixin, EmptyResponseView):
permission_required = ['auth.add_customuser', 'auth.change_customuser']
raise_exception = True
class StackedMixinsView2(PermissionRequiredMixin, LoginRequiredMixin, EmptyResponseView):
permission_required = ['auth.add_customuser', 'auth.change_customuser']
raise_exception = True
class AccessMixinTests(TestCase):
factory = RequestFactory()
def test_stacked_mixins_success(self):
user = models.User.objects.create(username='joe', password='qwerty')
perms = models.Permission.objects.filter(codename__in=('add_customuser', 'change_customuser'))
user.user_permissions.add(*perms)
request = self.factory.get('/rand')
request.user = user
view = StackedMixinsView1.as_view()
response = view(request)
self.assertEqual(response.status_code, 200)
view = StackedMixinsView2.as_view()
response = view(request)
self.assertEqual(response.status_code, 200)
def test_stacked_mixins_missing_permission(self):
user = models.User.objects.create(username='joe', password='qwerty')
perms = models.Permission.objects.filter(codename__in=('add_customuser',))
user.user_permissions.add(*perms)
request = self.factory.get('/rand')
request.user = user
view = StackedMixinsView1.as_view()
with self.assertRaises(PermissionDenied):
view(request)
view = StackedMixinsView2.as_view()
with self.assertRaises(PermissionDenied):
view(request)
def test_stacked_mixins_not_logged_in(self):
user = models.User.objects.create(username='joe', password='qwerty')
user.is_authenticated = lambda: False
perms = models.Permission.objects.filter(codename__in=('add_customuser', 'change_customuser'))
user.user_permissions.add(*perms)
request = self.factory.get('/rand')
request.user = user
view = StackedMixinsView1.as_view()
with self.assertRaises(PermissionDenied):
view(request)
view = StackedMixinsView2.as_view()
with self.assertRaises(PermissionDenied):
view(request)
class UserPassesTestTests(TestCase):
factory = RequestFactory()
def _test_redirect(self, view=None, url='/accounts/login/?next=/rand'):
if not view:
view = AlwaysFalseView.as_view()
request = self.factory.get('/rand')
request.user = AnonymousUser()
response = view(request)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, url)
def test_default(self):
self._test_redirect()
def test_custom_redirect_url(self):
class AView(AlwaysFalseView):
login_url = '/login/'
self._test_redirect(AView.as_view(), '/login/?next=/rand')
def test_custom_redirect_parameter(self):
class AView(AlwaysFalseView):
redirect_field_name = 'goto'
self._test_redirect(AView.as_view(), '/accounts/login/?goto=/rand')
def test_no_redirect_parameter(self):
class AView(AlwaysFalseView):
redirect_field_name = None
self._test_redirect(AView.as_view(), '/accounts/login/')
def test_raise_exception(self):
class AView(AlwaysFalseView):
raise_exception = True
request = self.factory.get('/rand')
request.user = AnonymousUser()
self.assertRaises(PermissionDenied, AView.as_view(), request)
def test_raise_exception_custom_message(self):
msg = "You don't have access here"
class AView(AlwaysFalseView):
raise_exception = True
permission_denied_message = msg
request = self.factory.get('/rand')
request.user = AnonymousUser()
view = AView.as_view()
with self.assertRaises(PermissionDenied) as cm:
view(request)
self.assertEqual(cm.exception.args[0], msg)
def test_raise_exception_custom_message_function(self):
msg = "You don't have access here"
class AView(AlwaysFalseView):
raise_exception = True
def get_permission_denied_message(self):
return msg
request = self.factory.get('/rand')
request.user = AnonymousUser()
view = AView.as_view()
with self.assertRaises(PermissionDenied) as cm:
view(request)
self.assertEqual(cm.exception.args[0], msg)
def test_user_passes(self):
view = AlwaysTrueView.as_view()
request = self.factory.get('/rand')
request.user = AnonymousUser()
response = view(request)
self.assertEqual(response.status_code, 200)
class LoginRequiredMixinTests(TestCase):
factory = RequestFactory()
@classmethod
def setUpTestData(cls):
cls.user = models.User.objects.create(username='joe', password='qwerty')
def test_login_required(self):
"""
Check that login_required works on a simple view wrapped in a
login_required decorator.
"""
class AView(LoginRequiredMixin, EmptyResponseView):
pass
view = AView.as_view()
request = self.factory.get('/rand')
request.user = AnonymousUser()
response = view(request)
self.assertEqual(response.status_code, 302)
self.assertEqual('/accounts/login/?next=/rand', response.url)
request = self.factory.get('/rand')
request.user = self.user
response = view(request)
self.assertEqual(response.status_code, 200)
class PermissionsRequiredMixinTests(TestCase):
factory = RequestFactory()
@classmethod
def setUpTestData(cls):
cls.user = models.User.objects.create(username='joe', password='qwerty')
perms = models.Permission.objects.filter(codename__in=('add_customuser', 'change_customuser'))
cls.user.user_permissions.add(*perms)
def test_many_permissions_pass(self):
class AView(PermissionRequiredMixin, EmptyResponseView):
permission_required = ['auth.add_customuser', 'auth.change_customuser']
request = self.factory.get('/rand')
request.user = self.user
resp = AView.as_view()(request)
self.assertEqual(resp.status_code, 200)
def test_single_permission_pass(self):
class AView(PermissionRequiredMixin, EmptyResponseView):
permission_required = 'auth.add_customuser'
request = self.factory.get('/rand')
request.user = self.user
resp = AView.as_view()(request)
self.assertEqual(resp.status_code, 200)
def test_permissioned_denied_redirect(self):
class AView(PermissionRequiredMixin, EmptyResponseView):
permission_required = ['auth.add_customuser', 'auth.change_customuser', 'non-existent-permission']
request = self.factory.get('/rand')
request.user = self.user
resp = AView.as_view()(request)
self.assertEqual(resp.status_code, 302)
def test_permissioned_denied_exception_raised(self):
class AView(PermissionRequiredMixin, EmptyResponseView):
permission_required = ['auth.add_customuser', 'auth.change_customuser', 'non-existent-permission']
raise_exception = True
request = self.factory.get('/rand')
request.user = self.user
self.assertRaises(PermissionDenied, AView.as_view(), request)
| bsd-3-clause |
khazhyk/discord.py | discord/shard.py | 1 | 18068 | # -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2015-2020 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import asyncio
import itertools
import logging
import aiohttp
from .state import AutoShardedConnectionState
from .client import Client
from .backoff import ExponentialBackoff
from .gateway import *
from .errors import ClientException, InvalidArgument, HTTPException, GatewayNotFound, ConnectionClosed
from . import utils
from .enums import Status
log = logging.getLogger(__name__)
class EventType:
close = 0
reconnect = 1
resume = 2
identify = 3
terminate = 4
clean_close = 5
class EventItem:
__slots__ = ('type', 'shard', 'error')
def __init__(self, etype, shard, error):
self.type = etype
self.shard = shard
self.error = error
def __lt__(self, other):
if not isinstance(other, EventItem):
return NotImplemented
return self.type < other.type
def __eq__(self, other):
if not isinstance(other, EventItem):
return NotImplemented
return self.type == other.type
def __hash__(self):
return hash(self.type)
class Shard:
def __init__(self, ws, client, queue_put):
self.ws = ws
self._client = client
self._dispatch = client.dispatch
self._queue_put = queue_put
self.loop = self._client.loop
self._disconnect = False
self._reconnect = client._reconnect
self._backoff = ExponentialBackoff()
self._task = None
self._handled_exceptions = (
OSError,
HTTPException,
GatewayNotFound,
ConnectionClosed,
aiohttp.ClientError,
asyncio.TimeoutError,
)
@property
def id(self):
return self.ws.shard_id
def launch(self):
self._task = self.loop.create_task(self.worker())
def _cancel_task(self):
if self._task is not None and not self._task.done():
self._task.cancel()
async def close(self):
self._cancel_task()
await self.ws.close(code=1000)
async def disconnect(self):
await self.close()
self._dispatch('shard_disconnect', self.id)
async def _handle_disconnect(self, e):
self._dispatch('disconnect')
self._dispatch('shard_disconnect', self.id)
if not self._reconnect:
self._queue_put(EventItem(EventType.close, self, e))
return
if self._client.is_closed():
return
if isinstance(e, OSError) and e.errno in (54, 10054):
# If we get Connection reset by peer then always try to RESUME the connection.
exc = ReconnectWebSocket(self.id, resume=True)
self._queue_put(EventItem(EventType.resume, self, exc))
return
if isinstance(e, ConnectionClosed):
if e.code != 1000:
self._queue_put(EventItem(EventType.close, self, e))
return
retry = self._backoff.delay()
log.error('Attempting a reconnect for shard ID %s in %.2fs', self.id, retry, exc_info=e)
await asyncio.sleep(retry)
self._queue_put(EventItem(EventType.reconnect, self, e))
async def worker(self):
while not self._client.is_closed():
try:
await self.ws.poll_event()
except ReconnectWebSocket as e:
etype = EventType.resume if e.resume else EventType.identify
self._queue_put(EventItem(etype, self, e))
break
except self._handled_exceptions as e:
await self._handle_disconnect(e)
break
except asyncio.CancelledError:
break
except Exception as e:
self._queue_put(EventItem(EventType.terminate, self, e))
break
async def reidentify(self, exc):
self._cancel_task()
self._dispatch('disconnect')
self._dispatch('shard_disconnect', self.id)
log.info('Got a request to %s the websocket at Shard ID %s.', exc.op, self.id)
try:
coro = DiscordWebSocket.from_client(self._client, resume=exc.resume, shard_id=self.id,
session=self.ws.session_id, sequence=self.ws.sequence)
self.ws = await asyncio.wait_for(coro, timeout=60.0)
except self._handled_exceptions as e:
await self._handle_disconnect(e)
except asyncio.CancelledError:
return
except Exception as e:
self._queue_put(EventItem(EventType.terminate, self, e))
else:
self.launch()
async def reconnect(self):
self._cancel_task()
try:
coro = DiscordWebSocket.from_client(self._client, shard_id=self.id)
self.ws = await asyncio.wait_for(coro, timeout=60.0)
except self._handled_exceptions as e:
await self._handle_disconnect(e)
except asyncio.CancelledError:
return
except Exception as e:
self._queue_put(EventItem(EventType.terminate, self, e))
else:
self.launch()
class ShardInfo:
"""A class that gives information and control over a specific shard.
You can retrieve this object via :meth:`AutoShardedClient.get_shard`
or :attr:`AutoShardedClient.shards`.
.. versionadded:: 1.4
Attributes
------------
id: :class:`int`
The shard ID for this shard.
shard_count: Optional[:class:`int`]
The shard count for this cluster. If this is ``None`` then the bot has not started yet.
"""
__slots__ = ('_parent', 'id', 'shard_count')
def __init__(self, parent, shard_count):
self._parent = parent
self.id = parent.id
self.shard_count = shard_count
def is_closed(self):
""":class:`bool`: Whether the shard connection is currently closed."""
return not self._parent.ws.open
async def disconnect(self):
"""|coro|
Disconnects a shard. When this is called, the shard connection will no
longer be open.
If the shard is already disconnected this does nothing.
"""
if self.is_closed():
return
await self._parent.disconnect()
async def reconnect(self):
"""|coro|
Disconnects and then connects the shard again.
"""
if not self.is_closed():
await self._parent.disconnect()
await self._parent.reconnect()
async def connect(self):
"""|coro|
Connects a shard. If the shard is already connected this does nothing.
"""
if not self.is_closed():
return
await self._parent.reconnect()
@property
def latency(self):
""":class:`float`: Measures latency between a HEARTBEAT and a HEARTBEAT_ACK in seconds for this shard."""
return self._parent.ws.latency
class AutoShardedClient(Client):
"""A client similar to :class:`Client` except it handles the complications
of sharding for the user into a more manageable and transparent single
process bot.
When using this client, you will be able to use it as-if it was a regular
:class:`Client` with a single shard when implementation wise internally it
is split up into multiple shards. This allows you to not have to deal with
IPC or other complicated infrastructure.
It is recommended to use this client only if you have surpassed at least
1000 guilds.
If no :attr:`.shard_count` is provided, then the library will use the
Bot Gateway endpoint call to figure out how many shards to use.
If a ``shard_ids`` parameter is given, then those shard IDs will be used
to launch the internal shards. Note that :attr:`.shard_count` must be provided
if this is used. By default, when omitted, the client will launch shards from
0 to ``shard_count - 1``.
Attributes
------------
shard_ids: Optional[List[:class:`int`]]
An optional list of shard_ids to launch the shards with.
"""
def __init__(self, *args, loop=None, **kwargs):
kwargs.pop('shard_id', None)
self.shard_ids = kwargs.pop('shard_ids', None)
super().__init__(*args, loop=loop, **kwargs)
if self.shard_ids is not None:
if self.shard_count is None:
raise ClientException('When passing manual shard_ids, you must provide a shard_count.')
elif not isinstance(self.shard_ids, (list, tuple)):
raise ClientException('shard_ids parameter must be a list or a tuple.')
self._connection = AutoShardedConnectionState(dispatch=self.dispatch,
handlers=self._handlers, syncer=self._syncer,
hooks=self._hooks, http=self.http, loop=self.loop, **kwargs)
# instead of a single websocket, we have multiple
# the key is the shard_id
self.__shards = {}
self._connection._get_websocket = self._get_websocket
self.__queue = asyncio.PriorityQueue()
def _get_websocket(self, guild_id=None, *, shard_id=None):
if shard_id is None:
shard_id = (guild_id >> 22) % self.shard_count
return self.__shards[shard_id].ws
@property
def latency(self):
""":class:`float`: Measures latency between a HEARTBEAT and a HEARTBEAT_ACK in seconds.
This operates similarly to :meth:`Client.latency` except it uses the average
latency of every shard's latency. To get a list of shard latency, check the
:attr:`latencies` property. Returns ``nan`` if there are no shards ready.
"""
if not self.__shards:
return float('nan')
return sum(latency for _, latency in self.latencies) / len(self.__shards)
@property
def latencies(self):
"""List[Tuple[:class:`int`, :class:`float`]]: A list of latencies between a HEARTBEAT and a HEARTBEAT_ACK in seconds.
This returns a list of tuples with elements ``(shard_id, latency)``.
"""
return [(shard_id, shard.ws.latency) for shard_id, shard in self.__shards.items()]
def get_shard(self, shard_id):
"""Optional[:class:`ShardInfo`]: Gets the shard information at a given shard ID or ``None`` if not found."""
try:
parent = self.__shards[shard_id]
except KeyError:
return None
else:
return ShardInfo(parent, self.shard_count)
@utils.cached_property
def shards(self):
"""Mapping[int, :class:`ShardInfo`]: Returns a mapping of shard IDs to their respective info object."""
return { shard_id: ShardInfo(parent, self.shard_count) for shard_id, parent in self.__shards.items() }
async def request_offline_members(self, *guilds):
r"""|coro|
Requests previously offline members from the guild to be filled up
into the :attr:`Guild.members` cache. This function is usually not
called. It should only be used if you have the ``fetch_offline_members``
parameter set to ``False``.
When the client logs on and connects to the websocket, Discord does
not provide the library with offline members if the number of members
in the guild is larger than 250. You can check if a guild is large
if :attr:`Guild.large` is ``True``.
Parameters
-----------
\*guilds: :class:`Guild`
An argument list of guilds to request offline members for.
Raises
-------
InvalidArgument
If any guild is unavailable or not large in the collection.
"""
if any(not g.large or g.unavailable for g in guilds):
raise InvalidArgument('An unavailable or non-large guild was passed.')
_guilds = sorted(guilds, key=lambda g: g.shard_id)
for shard_id, sub_guilds in itertools.groupby(_guilds, key=lambda g: g.shard_id):
sub_guilds = list(sub_guilds)
await self._connection.request_offline_members(sub_guilds, shard_id=shard_id)
async def launch_shard(self, gateway, shard_id, *, initial=False):
try:
coro = DiscordWebSocket.from_client(self, initial=initial, gateway=gateway, shard_id=shard_id)
ws = await asyncio.wait_for(coro, timeout=180.0)
except Exception:
log.exception('Failed to connect for shard_id: %s. Retrying...', shard_id)
await asyncio.sleep(5.0)
return await self.launch_shard(gateway, shard_id)
# keep reading the shard while others connect
self.__shards[shard_id] = ret = Shard(ws, self, self.__queue.put_nowait)
ret.launch()
async def launch_shards(self):
if self.shard_count is None:
self.shard_count, gateway = await self.http.get_bot_gateway()
else:
gateway = await self.http.get_gateway()
self._connection.shard_count = self.shard_count
shard_ids = self.shard_ids if self.shard_ids else range(self.shard_count)
self._connection.shard_ids = shard_ids
for shard_id in shard_ids:
initial = shard_id == shard_ids[0]
await self.launch_shard(gateway, shard_id, initial=initial)
self._connection.shards_launched.set()
async def connect(self, *, reconnect=True):
self._reconnect = reconnect
await self.launch_shards()
while not self.is_closed():
item = await self.__queue.get()
if item.type == EventType.close:
await self.close()
if isinstance(item.error, ConnectionClosed) and item.error.code != 1000:
raise item.error
return
elif item.type in (EventType.identify, EventType.resume):
await item.shard.reidentify(item.error)
elif item.type == EventType.reconnect:
await item.shard.reconnect()
elif item.type == EventType.terminate:
await self.close()
raise item.error
elif item.type == EventType.clean_close:
return
async def close(self):
"""|coro|
Closes the connection to Discord.
"""
if self.is_closed():
return
self._closed = True
for vc in self.voice_clients:
try:
await vc.disconnect()
except Exception:
pass
to_close = [asyncio.ensure_future(shard.close(), loop=self.loop) for shard in self.__shards.values()]
if to_close:
await asyncio.wait(to_close)
await self.http.close()
self.__queue.put_nowait(EventItem(EventType.clean_close, None, None))
async def change_presence(self, *, activity=None, status=None, afk=False, shard_id=None):
"""|coro|
Changes the client's presence.
Example: ::
game = discord.Game("with the API")
await client.change_presence(status=discord.Status.idle, activity=game)
Parameters
----------
activity: Optional[:class:`BaseActivity`]
The activity being done. ``None`` if no currently active activity is done.
status: Optional[:class:`Status`]
Indicates what status to change to. If ``None``, then
:attr:`Status.online` is used.
afk: :class:`bool`
Indicates if you are going AFK. This allows the discord
client to know how to handle push notifications better
for you in case you are actually idle and not lying.
shard_id: Optional[:class:`int`]
The shard_id to change the presence to. If not specified
or ``None``, then it will change the presence of every
shard the bot can see.
Raises
------
InvalidArgument
If the ``activity`` parameter is not of proper type.
"""
if status is None:
status = 'online'
status_enum = Status.online
elif status is Status.offline:
status = 'invisible'
status_enum = Status.offline
else:
status_enum = status
status = str(status)
if shard_id is None:
for shard in self.__shards.values():
await shard.ws.change_presence(activity=activity, status=status, afk=afk)
guilds = self._connection.guilds
else:
shard = self.__shards[shard_id]
await shard.ws.change_presence(activity=activity, status=status, afk=afk)
guilds = [g for g in self._connection.guilds if g.shard_id == shard_id]
activities = () if activity is None else (activity,)
for guild in guilds:
me = guild.me
if me is None:
continue
me.activities = activities
me.status = status_enum
| mit |
m-labs/linux-milkymist | tools/perf/scripts/python/check-perf-trace.py | 11214 | 2503 | # perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
| gpl-2.0 |
gogoair/lavatory | src/lavatory/commands/stats.py | 1 | 1409 | """Statistics of the repo."""
import logging
import click
from ..utils.get_artifactory_info import get_repos, get_storage
LOG = logging.getLogger(__name__)
@click.command()
@click.pass_context
@click.option(
'--repo',
default=None,
multiple=True,
required=False,
help='Name of specific repository to run against. Can use --repo multiple times. If not provided, uses all repos.')
def stats(ctx, repo):
"""Get statistics of repos."""
LOG.debug('Passed args: %s, %s.', ctx, repo)
storage = get_storage(repo_names=repo, repo_type='any')
if not storage:
LOG.info('User does not have "Admin Privileges" to generate statistics.')
return
repositories = get_repos(repo_names=repo, repo_type='any')
for repository in repositories:
repo = storage.get(repository)
if repo is None:
LOG.error('Repo name %s does not exist.', repository)
continue
LOG.info('Repo Name: %s.', repo.get('repoKey'))
LOG.info('Repo Type: %s - %s.', repo.get('repoType'), repo.get('packageType'))
LOG.info('Repo Used Space: %s - %s of total used space.', repo.get('usedSpace'), repo.get('percentage'))
LOG.info('Repo Folders %s, Files %s. Total items count: %s.',
repo.get('foldersCount'), repo.get('filesCount'), repo.get('itemsCount'))
LOG.info('-' * 25)
click.echo('Done.')
| apache-2.0 |
PaulKinlan/cli-caniuse | site/app/scripts/bower_components/jsrepl-build/extern/python/closured/lib/python2.7/getpass.py | 233 | 5563 | """Utilities to get a password and/or the current user name.
getpass(prompt[, stream]) - Prompt for a password, with echo turned off.
getuser() - Get the user name from the environment or password database.
GetPassWarning - This UserWarning is issued when getpass() cannot prevent
echoing of the password contents while reading.
On Windows, the msvcrt module will be used.
On the Mac EasyDialogs.AskPassword is used, if available.
"""
# Authors: Piers Lauder (original)
# Guido van Rossum (Windows support and cleanup)
# Gregory P. Smith (tty support & GetPassWarning)
import os, sys, warnings
__all__ = ["getpass","getuser","GetPassWarning"]
class GetPassWarning(UserWarning): pass
def unix_getpass(prompt='Password: ', stream=None):
"""Prompt for a password, with echo turned off.
Args:
prompt: Written on stream to ask for the input. Default: 'Password: '
stream: A writable file object to display the prompt. Defaults to
the tty. If no tty is available defaults to sys.stderr.
Returns:
The seKr3t input.
Raises:
EOFError: If our input tty or stdin was closed.
GetPassWarning: When we were unable to turn echo off on the input.
Always restores terminal settings before returning.
"""
fd = None
tty = None
try:
# Always try reading and writing directly on the tty first.
fd = os.open('/dev/tty', os.O_RDWR|os.O_NOCTTY)
tty = os.fdopen(fd, 'w+', 1)
input = tty
if not stream:
stream = tty
except EnvironmentError, e:
# If that fails, see if stdin can be controlled.
try:
fd = sys.stdin.fileno()
except (AttributeError, ValueError):
passwd = fallback_getpass(prompt, stream)
input = sys.stdin
if not stream:
stream = sys.stderr
if fd is not None:
passwd = None
try:
old = termios.tcgetattr(fd) # a copy to save
new = old[:]
new[3] &= ~termios.ECHO # 3 == 'lflags'
tcsetattr_flags = termios.TCSAFLUSH
if hasattr(termios, 'TCSASOFT'):
tcsetattr_flags |= termios.TCSASOFT
try:
termios.tcsetattr(fd, tcsetattr_flags, new)
passwd = _raw_input(prompt, stream, input=input)
finally:
termios.tcsetattr(fd, tcsetattr_flags, old)
stream.flush() # issue7208
except termios.error, e:
if passwd is not None:
# _raw_input succeeded. The final tcsetattr failed. Reraise
# instead of leaving the terminal in an unknown state.
raise
# We can't control the tty or stdin. Give up and use normal IO.
# fallback_getpass() raises an appropriate warning.
del input, tty # clean up unused file objects before blocking
passwd = fallback_getpass(prompt, stream)
stream.write('\n')
return passwd
def win_getpass(prompt='Password: ', stream=None):
"""Prompt for password with echo off, using Windows getch()."""
if sys.stdin is not sys.__stdin__:
return fallback_getpass(prompt, stream)
import msvcrt
for c in prompt:
msvcrt.putch(c)
pw = ""
while 1:
c = msvcrt.getch()
if c == '\r' or c == '\n':
break
if c == '\003':
raise KeyboardInterrupt
if c == '\b':
pw = pw[:-1]
else:
pw = pw + c
msvcrt.putch('\r')
msvcrt.putch('\n')
return pw
def fallback_getpass(prompt='Password: ', stream=None):
warnings.warn("Can not control echo on the terminal.", GetPassWarning,
stacklevel=2)
if not stream:
stream = sys.stderr
print >>stream, "Warning: Password input may be echoed."
return _raw_input(prompt, stream)
def _raw_input(prompt="", stream=None, input=None):
# A raw_input() replacement that doesn't save the string in the
# GNU readline history.
if not stream:
stream = sys.stderr
if not input:
input = sys.stdin
prompt = str(prompt)
if prompt:
stream.write(prompt)
stream.flush()
# NOTE: The Python C API calls flockfile() (and unlock) during readline.
line = input.readline()
if not line:
raise EOFError
if line[-1] == '\n':
line = line[:-1]
return line
def getuser():
"""Get the username from the environment or password database.
First try various environment variables, then the password
database. This works on Windows as long as USERNAME is set.
"""
import os
for name in ('LOGNAME', 'USER', 'LNAME', 'USERNAME'):
user = os.environ.get(name)
if user:
return user
# If this fails, the exception will "explain" why
import pwd
return pwd.getpwuid(os.getuid())[0]
# Bind the name getpass to the appropriate function
try:
import termios
# it's possible there is an incompatible termios from the
# McMillan Installer, make sure we have a UNIX-compatible termios
termios.tcgetattr, termios.tcsetattr
except (ImportError, AttributeError):
try:
import msvcrt
except ImportError:
try:
from EasyDialogs import AskPassword
except ImportError:
getpass = fallback_getpass
else:
getpass = AskPassword
else:
getpass = win_getpass
else:
getpass = unix_getpass
| apache-2.0 |
ehfeng/pipet | tests/test_stripe.py | 2 | 23250 | from inspect import isclass
import json
import logging
import os
import pytest
from urllib.parse import urlparse
from unittest import TestCase
from flask import Flask
import responses
import requests
from pipet import create_app
from pipet.models import db, Organization
from pipet.sources.stripe import StripeAccount
from pipet.sources.stripe.models import (
Base,
CLASS_REGISTRY,
)
from pipet.sources.stripe.tasks import sync
ENDPOINT_RESPONSES = {'/v1/balance/history': """
{
"object": "list",
"url": "/v1/balance/history",
"has_more": false,
"data": [
{
"id": "txn_15X3o4IIEDaCRvUQzlz1y9TZ",
"object": "balance_transaction",
"amount": 999,
"available_on": 1424736000,
"created": 1424150520,
"currency": "usd",
"description": "pipet charge",
"exchange_rate": null,
"fee": 59,
"fee_details": [
{
"amount": 59,
"application": null,
"currency": "usd",
"description": "Stripe processing fees",
"type": "stripe_fee"
}
],
"net": 940,
"source": "ch_15X3o4IIEDaCRvUQXSQN3gtj",
"status": "available",
"type": "charge"
}
]
}
""",
"/v1/charges": """
{
"object": "list",
"url": "/v1/charges",
"has_more": false,
"data": [
{
"id": "ch_1CegICIIEDaCRvUQxRcom3bR",
"object": "charge",
"amount": 2345,
"amount_refunded": 0,
"application": null,
"application_fee": null,
"balance_transaction": "txn_15X3o4IIEDaCRvUQzlz1y9TZ",
"captured": true,
"created": 1529401108,
"currency": "usd",
"customer": null,
"description": "final sync test",
"destination": null,
"dispute": null,
"failure_code": null,
"failure_message": null,
"fraud_details": {
},
"invoice": null,
"livemode": false,
"metadata": {
},
"on_behalf_of": null,
"order": null,
"outcome": {
"network_status": "approved_by_network",
"reason": null,
"risk_level": "normal",
"seller_message": "Payment complete.",
"type": "authorized"
},
"paid": true,
"receipt_email": null,
"receipt_number": null,
"refunded": false,
"refunds": {
"object": "list",
"data": [
],
"has_more": false,
"total_count": 0,
"url": "/v1/charges/ch_1CegICIIEDaCRvUQxRcom3bR/refunds"
},
"review": null,
"shipping": null,
"source": {
"id": "card_1CegICIIEDaCRvUQUx2VSFuF",
"object": "card",
"address_city": null,
"address_country": null,
"address_line1": null,
"address_line1_check": null,
"address_line2": null,
"address_state": null,
"address_zip": null,
"address_zip_check": null,
"brand": "Visa",
"country": "US",
"customer": null,
"cvc_check": "pass",
"dynamic_last4": null,
"exp_month": 4,
"exp_year": 2024,
"fingerprint": "9OIudfTmj2f9oK6Y",
"funding": "credit",
"last4": "4242",
"metadata": {
},
"name": null,
"tokenization_method": null
},
"source_transfer": null,
"statement_descriptor": "pipet",
"status": "succeeded",
"transfer_group": null
}
]
}
""",
"/v1/customers": """
{
"object": "list",
"url": "/v1/customers",
"has_more": false,
"data": [
{
"id": "cus_D86vSD4f276b7D",
"object": "customer",
"account_balance": 0,
"created": 1530156120,
"currency": "usd",
"default_source": null,
"delinquent": false,
"description": null,
"discount": null,
"email": null,
"invoice_prefix": "187F3A1",
"livemode": false,
"metadata": {
},
"shipping": null,
"sources": {
"object": "list",
"data": [
],
"has_more": false,
"total_count": 0,
"url": "/v1/customers/cus_D86vSD4f276b7D/sources"
},
"subscriptions": {
"object": "list",
"data": [
],
"has_more": false,
"total_count": 0,
"url": "/v1/customers/cus_D86vSD4f276b7D/subscriptions"
}
}
]
}
""",
"/v1/disputes": """
{
"object": "list",
"url": "/v1/disputes",
"has_more": false,
"data": [
{
"id": "dp_1ChqhoIIEDaCRvUQLTd8DLpC",
"object": "dispute",
"amount": 1000,
"balance_transaction": "txn_15X3o4IIEDaCRvUQzlz1y9TZ",
"balance_transactions": [
],
"charge": "ch_1CegICIIEDaCRvUQxRcom3bR",
"created": 1530156120,
"currency": "usd",
"evidence": {
"access_activity_log": null,
"billing_address": null,
"cancellation_policy": null,
"cancellation_policy_disclosure": null,
"cancellation_rebuttal": null,
"customer_communication": null,
"customer_email_address": null,
"customer_name": null,
"customer_purchase_ip": null,
"customer_signature": null,
"duplicate_charge_documentation": null,
"duplicate_charge_explanation": null,
"duplicate_charge_id": null,
"product_description": null,
"receipt": null,
"refund_policy": null,
"refund_policy_disclosure": null,
"refund_refusal_explanation": null,
"service_date": null,
"service_documentation": null,
"shipping_address": null,
"shipping_carrier": null,
"shipping_date": null,
"shipping_documentation": null,
"shipping_tracking_number": null,
"uncategorized_file": null,
"uncategorized_text": null
},
"evidence_details": {
"due_by": 1531871999,
"has_evidence": false,
"past_due": false,
"submission_count": 0
},
"is_charge_refundable": false,
"livemode": false,
"metadata": {
},
"reason": "general",
"status": "needs_response"
}
]
}
""",
"/v1/payouts": """
{
"object": "list",
"url": "/v1/payouts",
"has_more": false,
"data": [
{
"id": "tr_15ZckvIIEDaCRvUQXroWiYUv",
"object": "payout",
"amount": 1880,
"arrival_date": 1424822400,
"automatic": true,
"balance_transaction": "txn_15X3o4IIEDaCRvUQzlz1y9TZ",
"created": 1424761521,
"currency": "usd",
"description": "STRIPE TRANSFER",
"destination": "ba_15X43PIIEDaCRvUQHoJzY91c",
"failure_balance_transaction": null,
"failure_code": null,
"failure_message": null,
"livemode": false,
"metadata": {
},
"method": "standard",
"source_type": "card",
"statement_descriptor": null,
"status": "paid",
"type": "bank_account"
}
]
}
""",
"/v1/refunds": """
{
"object": "list",
"url": "/v1/refunds",
"has_more": false,
"data": [
{
"id": "re_1ChqhmIIEDaCRvUQQz7NgdLh",
"object": "refund",
"amount": 100,
"balance_transaction": null,
"charge": "ch_1CegICIIEDaCRvUQxRcom3bR",
"created": 1530156118,
"currency": "usd",
"metadata": {
},
"reason": null,
"receipt_number": null,
"status": "succeeded"
}
]
}
""",
"/v1/coupons": """
{
"object": "list",
"url": "/v1/coupons",
"has_more": false,
"data": [
{
"id": "25OFF",
"object": "coupon",
"amount_off": null,
"created": 1530156116,
"currency": null,
"duration": "repeating",
"duration_in_months": 3,
"livemode": false,
"max_redemptions": null,
"metadata": {
},
"name": "25% off",
"percent_off": 25,
"redeem_by": null,
"times_redeemed": 0,
"valid": true
}
]
}
""",
"/v1/invoices": """
{
"object": "list",
"url": "/v1/invoices",
"has_more": false,
"data": [
{
"id": "in_1ChqhjIIEDaCRvUQ7NprLEPv",
"object": "invoice",
"amount_due": 0,
"amount_paid": 0,
"amount_remaining": 0,
"application_fee": null,
"attempt_count": 0,
"attempted": false,
"billing": "charge_automatically",
"billing_reason": "manual",
"charge": null,
"closed": false,
"currency": "usd",
"customer": "cus_D86vo2HPNmacy6",
"date": 1530156115,
"description": null,
"discount": null,
"due_date": null,
"ending_balance": null,
"forgiven": false,
"hosted_invoice_url": null,
"invoice_pdf": null,
"lines": {
"data": [
{
"id": "sli_58ae714e046dda",
"object": "line_item",
"amount": 999,
"currency": "usd",
"description": "1 × Ivory Enhanced (at $9.99 / month)",
"discountable": true,
"livemode": false,
"metadata": {
},
"period": {
"end": 1532748115,
"start": 1530156115
},
"plan": {
"id": "ivory-enhanced-527",
"object": "plan",
"active": true,
"aggregate_usage": null,
"amount": 999,
"billing_scheme": "per_unit",
"created": 1506389749,
"currency": "usd",
"interval": "month",
"interval_count": 1,
"livemode": false,
"metadata": {
},
"nickname": null,
"product": "prod_BT475rWOlNGTN4",
"tiers": null,
"tiers_mode": null,
"transform_usage": null,
"trial_period_days": null,
"usage_type": "licensed"
},
"proration": false,
"quantity": 1,
"subscription": "sub_D86vF3dcAhTjac",
"subscription_item": "si_D86vvJ9hzZdiIx",
"type": "subscription"
}
],
"has_more": false,
"object": "list",
"url": "/v1/invoices/in_1ChqhjIIEDaCRvUQ7NprLEPv/lines"
},
"livemode": false,
"metadata": {
},
"next_payment_attempt": 1530159715,
"number": "D5D4F3B-0001",
"paid": false,
"period_end": 1530156115,
"period_start": 1530156115,
"receipt_number": null,
"starting_balance": 0,
"statement_descriptor": null,
"subscription": null,
"subtotal": 0,
"tax": null,
"tax_percent": null,
"total": 0,
"webhooks_delivered_at": null
}
]
}
""",
"/v1/invoiceitems": """
{
"object": "list",
"url": "/v1/invoiceitems",
"has_more": false,
"data": [
{
"id": "ii_1ChqhkIIEDaCRvUQbBI0Cza6",
"object": "invoiceitem",
"amount": 0,
"currency": "usd",
"customer": "cus_D86vimVlXeP8xe",
"date": 1530156116,
"description": "My First Invoice Item (created for API docs)",
"discountable": true,
"invoice": null,
"livemode": false,
"metadata": {
},
"period": {
"start": 1530156116,
"end": 1530156116
},
"plan": null,
"proration": false,
"quantity": 1,
"subscription": null,
"unit_amount": 0
}
]
}
""",
"/v1/products": """
{
"object": "list",
"url": "/v1/products",
"has_more": false,
"data": [
{
"id": "prod_D86vOcNX2jT2GK",
"object": "product",
"active": true,
"attributes": [
"size",
"gender"
],
"caption": null,
"created": 1530156118,
"deactivate_on": [
],
"description": "Comfortable gray cotton t-shirts",
"images": [
],
"livemode": false,
"metadata": {
},
"name": "T-shirt",
"package_dimensions": null,
"shippable": true,
"type": "good",
"updated": 1530156118,
"url": null
}
]
}
""",
"/v1/plans": """
{
"object": "list",
"url": "/v1/plans",
"has_more": false,
"data": [
{
"id": "gold",
"object": "plan",
"active": true,
"aggregate_usage": null,
"amount": 2000,
"billing_scheme": "per_unit",
"created": 1530156116,
"currency": "usd",
"interval": "month",
"interval_count": 1,
"livemode": false,
"metadata": {
},
"nickname": null,
"product": "prod_D84GOPf5bcpOch",
"tiers": null,
"tiers_mode": null,
"transform_usage": null,
"trial_period_days": null,
"usage_type": "licensed"
}
]
}
""",
"/v1/subscriptions": """
{
"object": "list",
"url": "/v1/subscriptions",
"has_more": false,
"data": [
{
"id": "sub_D86viNMlAOOOuZ",
"object": "subscription",
"application_fee_percent": null,
"billing": "charge_automatically",
"billing_cycle_anchor": 1530156117,
"cancel_at_period_end": false,
"canceled_at": null,
"created": 1530156117,
"current_period_end": 1532748117,
"current_period_start": 1530156117,
"customer": "cus_D86aeQQwRIjv7N",
"days_until_due": null,
"discount": null,
"ended_at": null,
"items": {
"object": "list",
"data": [
{
"id": "si_D86vr1pUNsUV7m",
"object": "subscription_item",
"created": 1530156117,
"metadata": {
},
"plan": {
"id": "ivory-enhanced-527",
"object": "plan",
"active": true,
"aggregate_usage": null,
"amount": 999,
"billing_scheme": "per_unit",
"created": 1506389749,
"currency": "usd",
"interval": "month",
"interval_count": 1,
"livemode": false,
"metadata": {
},
"nickname": null,
"product": "prod_BT475rWOlNGTN4",
"tiers": null,
"tiers_mode": null,
"transform_usage": null,
"trial_period_days": null,
"usage_type": "licensed"
},
"quantity": 1,
"subscription": "sub_D86viNMlAOOOuZ"
}
],
"has_more": false,
"total_count": 1,
"url": "/v1/subscription_items?subscription=sub_D86viNMlAOOOuZ"
},
"livemode": false,
"metadata": {
},
"plan": {
"id": "ivory-enhanced-527",
"object": "plan",
"active": true,
"aggregate_usage": null,
"amount": 999,
"billing_scheme": "per_unit",
"created": 1506389749,
"currency": "usd",
"interval": "month",
"interval_count": 1,
"livemode": false,
"metadata": {
},
"nickname": null,
"product": "prod_BT475rWOlNGTN4",
"tiers": null,
"tiers_mode": null,
"transform_usage": null,
"trial_period_days": null,
"usage_type": "licensed"
},
"quantity": 1,
"start": 1530156117,
"status": "active",
"tax_percent": null,
"trial_end": null,
"trial_start": null
}
]
}
""",
"/v1/subscription_items": """
{
"object": "list",
"url": "/v1/subscription_items",
"has_more": false,
"data": [
{
"id": "si_D86vBOzoKARN6L",
"object": "subscription_item",
"created": 1530156118,
"metadata": {
},
"plan": {
"id": "ivory-enhanced-527",
"object": "plan",
"active": true,
"aggregate_usage": null,
"amount": 999,
"billing_scheme": "per_unit",
"created": 1506389749,
"currency": "usd",
"interval": "month",
"interval_count": 1,
"livemode": false,
"metadata": {
},
"nickname": null,
"product": "prod_BT475rWOlNGTN4",
"tiers": null,
"tiers_mode": null,
"transform_usage": null,
"trial_period_days": null,
"usage_type": "licensed"
},
"quantity": 1,
"subscription": "sub_D86vGzBN7EnPIw"
}
]
}
""",
"/v1/transfers": """
{
"object": "list",
"url": "/v1/transfers",
"has_more": false,
"data": [
{
"id": "tr_1ChqhoIIEDaCRvUQbMALwca9",
"object": "transfer",
"amount": 1100,
"amount_reversed": 0,
"balance_transaction": "txn_15X3o4IIEDaCRvUQzlz1y9TZ",
"created": 1530156120,
"currency": "usd",
"description": null,
"destination": "acct_15WLWRIIEDaCRvUQ",
"destination_payment": "py_D86vADQ6Vk99PX",
"livemode": false,
"metadata": {
},
"reversals": {
"object": "list",
"data": [
],
"has_more": false,
"total_count": 0,
"url": "/v1/transfers/tr_1ChqhoIIEDaCRvUQbMALwca9/reversals"
},
"reversed": false,
"source_transaction": null,
"source_type": "card",
"transfer_group": null
}
]
}
""",
"/v1/orders": """
{
"object": "list",
"url": "/v1/orders",
"has_more": false,
"data": [
{
"id": "or_1ChqhpIIEDaCRvUQoZp1buc9",
"object": "order",
"amount": 1500,
"amount_returned": null,
"application": null,
"application_fee": null,
"charge": null,
"created": 1530156121,
"currency": "usd",
"customer": null,
"email": null,
"items": [
{
"object": "order_item",
"amount": 1500,
"currency": "usd",
"description": "T-shirt",
"parent": "sk_178G7qIIEDaCRvUQO8OBONJn",
"quantity": null,
"type": "sku"
}
],
"livemode": false,
"metadata": {
},
"returns": {
"object": "list",
"data": [
],
"has_more": false,
"total_count": 0,
"url": "/v1/order_returns?order=or_1ChqhpIIEDaCRvUQoZp1buc9"
},
"selected_shipping_method": null,
"shipping": {
"address": {
"city": "Anytown",
"country": "US",
"line1": "1234 Main street",
"line2": null,
"postal_code": "123456",
"state": null
},
"carrier": null,
"name": "Jenny Rosen",
"phone": null,
"tracking_number": null
},
"shipping_methods": null,
"status": "created",
"status_transitions": {
"canceled": null,
"fulfiled": null,
"paid": null,
"returned": null
},
"updated": 1530156121
}
]
}
""",
"/v1/order_returns": """
{
"object": "list",
"url": "/v1/order_returns",
"has_more": false,
"data": [
{
"id": "orret_1ChqhpIIEDaCRvUQWYxCcEVA",
"object": "sku",
"active": true,
"attributes": {
"size": "Medium",
"gender": "Unisex"
},
"created": 1530156121,
"currency": "usd",
"image": null,
"inventory": {
"quantity": 50,
"type": "finite",
"value": null
},
"livemode": false,
"metadata": {
},
"package_dimensions": null,
"price": 1500,
"product": "prod_D86vUwm4EoTSpm",
"updated": 1530156121
}
]
}
""",
"/v1/skus": """
{
"object": "list",
"url": "/v1/skus",
"has_more": false,
"data": [
{
"id": "sk_1ChqhpIIEDaCRvUQrlX7yVOF",
"object": "sku",
"active": true,
"attributes": {
"size": "Medium",
"gender": "Unisex"
},
"created": 1530156121,
"currency": "usd",
"image": null,
"inventory": {
"quantity": 50,
"type": "finite",
"value": null
},
"livemode": false,
"metadata": {
},
"package_dimensions": null,
"price": 1500,
"product": "prod_D86vOpWvTsLGlv",
"updated": 1530156121
}
]
}
""",
"/v1/events": """
{
"object": "list",
"url": "/v1/events",
"has_more": false,
"data": [
{
"id": "evt_15ZvAHIIEDaCRvUQVPYnhLxV",
"object": "event",
"api_version": "2015-02-16",
"created": 1424832285,
"data": {
"object": {
"pending": [
{
"amount": 0,
"currency": "usd"
}
],
"available": [
{
"amount": 0,
"currency": "usd"
}
],
"livemode": false,
"object": "balance"
}
},
"livemode": false,
"pending_webhooks": 0,
"request": {
"id": null,
"idempotency_key": null
},
"type": "balance.available"
}
]
}
"""
}
class StripeTest(TestCase):
def setUp(self):
self.app = create_app()
self.db = db
self.app.config['TESTING'] = True
self.app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql+psycopg2://eric:@localhost/test'
self.app_ctx = self.app.app_context()
self.app_ctx.push()
db.session.close()
db.drop_all()
db.create_all()
self.client = self.app.test_client()
self.organization = Organization(
name="foo", database_credentials='postgresql+psycopg2://eric:@localhost/test')
self.account = StripeAccount(
organization=self.organization, api_key='asdf')
db.session.add(self.organization)
db.session.add(self.account)
db.session.commit()
session = self.organization.create_session()
self.account.drop_all(session)
self.account.create_all(session)
for cls in [m for n, m in CLASS_REGISTRY.items() if isclass(m) and issubclass(m, Base) and m.endpoint]:
responses.add(responses.GET, 'https://api.stripe.com' + cls.endpoint,
body=ENDPOINT_RESPONSES[cls.endpoint], content_type='application/json',)
responses.add(responses.GET, 'https://api.stripe.com/v1/events',
body=ENDPOINT_RESPONSES['/v1/events'], content_type='application/json',)
def tearDown(self):
db.session.remove()
db.get_engine(self.app).dispose()
self.app_ctx.pop()
@responses.activate
def test_endpoints(self):
for cls in [m for n, m in CLASS_REGISTRY.items() if isclass(m) and issubclass(m, Base) and m.endpoint]:
statements, cursor, has_more = cls.sync(self.account, None)
if urlparse(responses.calls[-1].request.url).path == '/v1/subscription_items':
continue
assert len(statements) > 0, (cls, responses.calls[-1].request.url)
assert has_more == False
@responses.activate
def test_sync(self):
r = requests.get('https://api.stripe.com/v1/events')
# account.backfill()
sync(self.account.id)
# TODO add new event response
# account.update()
sync(self.account.id)
| mit |
renaelectronics/linuxcnc | src/emc/usr_intf/touchy/listing.py | 35 | 3481 | # Touchy is Copyright (c) 2009 Chris Radek <chris@timeguy.com>
#
# Touchy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Touchy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
class listing:
def __init__(self, gtk, emc, labels, eventboxes):
self.labels = labels
self.eventboxes = eventboxes
self.numlabels = len(labels)
self.gtk = gtk
self.emc = emc
self.lineoffset = 0
self.selected = -1
self.start_line = -1
self.filename = ""
self.program = []
self.lines = 0
self.populate()
def populate(self):
program = self.program[self.lineoffset:self.lineoffset + self.numlabels]
for i in range(self.numlabels):
l = self.labels[i]
e = self.eventboxes[i]
if i < len(program):
l.set_text(program[i].rstrip())
else:
l.set_text('')
if self.start_line == self.lineoffset + i:
e.modify_bg(self.gtk.STATE_NORMAL, self.gtk.gdk.color_parse('#66f'))
elif self.selected == self.lineoffset + i:
e.modify_bg(self.gtk.STATE_NORMAL, self.gtk.gdk.color_parse('#fff'))
else:
e.modify_bg(self.gtk.STATE_NORMAL, self.gtk.gdk.color_parse('#ccc'))
def show_line(self, n):
if len(self.program) <= self.numlabels:
self.lineoffset = 0
else:
self.lineoffset = min(max(0, n - self.numlabels/2),self.lines - self.numlabels)
self.populate()
def highlight_line(self, n):
n -= 1 # program[] is zero-based, emc line numbers are one-based
if self.selected == n: return
self.selected = n
self.show_line(n)
def up(self, b):
self.lineoffset -= self.numlabels
if self.lineoffset < 0:
self.lineoffset = 0
self.populate()
def down(self, b):
self.lineoffset += self.numlabels
self.populate()
def readfile(self, fn):
self.filename = fn
f = file(fn, 'r')
self.program = f.readlines()
f.close()
self.lines = len(self.program)
self.lineoffset = 0
self.selected = -1
self.populate()
def reload(self, b):
pass
def previous(self, b,count=1):
for i in range(count):
while True:
if self.start_line <= 0:
break
self.start_line -= 1
if (self.program[self.start_line][0] == 'N' or
self.program[self.start_line][0] == 'n' ):
break
self.show_line(self.start_line)
def next(self,b,count=1):
if count < 0: return self.previous(b, -count)
for i in range(count):
while True:
if self.start_line >= len(self.program)-1:
break
self.start_line += 1
if (self.program[self.start_line][0] == 'N' or
self.program[self.start_line][0] == 'n' ):
break
self.show_line(self.start_line)
def clear_startline(self):
self.start_line = -1
self.populate()
def get_startline(self):
return self.start_line + 1
| gpl-2.0 |
Anonymous-X6/django | tests/utils_tests/test_feedgenerator.py | 163 | 4306 | from __future__ import unicode_literals
import datetime
import unittest
from django.utils import feedgenerator
from django.utils.timezone import get_fixed_timezone
class FeedgeneratorTest(unittest.TestCase):
"""
Tests for the low-level syndication feed framework.
"""
def test_get_tag_uri(self):
"""
Test get_tag_uri() correctly generates TagURIs.
"""
self.assertEqual(
feedgenerator.get_tag_uri('http://example.org/foo/bar#headline', datetime.date(2004, 10, 25)),
'tag:example.org,2004-10-25:/foo/bar/headline')
def test_get_tag_uri_with_port(self):
"""
Test that get_tag_uri() correctly generates TagURIs from URLs with port
numbers.
"""
self.assertEqual(
feedgenerator.get_tag_uri('http://www.example.org:8000/2008/11/14/django#headline', datetime.datetime(2008, 11, 14, 13, 37, 0)),
'tag:www.example.org,2008-11-14:/2008/11/14/django/headline')
def test_rfc2822_date(self):
"""
Test rfc2822_date() correctly formats datetime objects.
"""
self.assertEqual(
feedgenerator.rfc2822_date(datetime.datetime(2008, 11, 14, 13, 37, 0)),
"Fri, 14 Nov 2008 13:37:00 -0000"
)
def test_rfc2822_date_with_timezone(self):
"""
Test rfc2822_date() correctly formats datetime objects with tzinfo.
"""
self.assertEqual(
feedgenerator.rfc2822_date(datetime.datetime(2008, 11, 14, 13, 37, 0, tzinfo=get_fixed_timezone(60))),
"Fri, 14 Nov 2008 13:37:00 +0100"
)
def test_rfc2822_date_without_time(self):
"""
Test rfc2822_date() correctly formats date objects.
"""
self.assertEqual(
feedgenerator.rfc2822_date(datetime.date(2008, 11, 14)),
"Fri, 14 Nov 2008 00:00:00 -0000"
)
def test_rfc3339_date(self):
"""
Test rfc3339_date() correctly formats datetime objects.
"""
self.assertEqual(
feedgenerator.rfc3339_date(datetime.datetime(2008, 11, 14, 13, 37, 0)),
"2008-11-14T13:37:00Z"
)
def test_rfc3339_date_with_timezone(self):
"""
Test rfc3339_date() correctly formats datetime objects with tzinfo.
"""
self.assertEqual(
feedgenerator.rfc3339_date(datetime.datetime(2008, 11, 14, 13, 37, 0, tzinfo=get_fixed_timezone(120))),
"2008-11-14T13:37:00+02:00"
)
def test_rfc3339_date_without_time(self):
"""
Test rfc3339_date() correctly formats date objects.
"""
self.assertEqual(
feedgenerator.rfc3339_date(datetime.date(2008, 11, 14)),
"2008-11-14T00:00:00Z"
)
def test_atom1_mime_type(self):
"""
Test to make sure Atom MIME type has UTF8 Charset parameter set
"""
atom_feed = feedgenerator.Atom1Feed("title", "link", "description")
self.assertEqual(
atom_feed.content_type, "application/atom+xml; charset=utf-8"
)
def test_rss_mime_type(self):
"""
Test to make sure RSS MIME type has UTF8 Charset parameter set
"""
rss_feed = feedgenerator.Rss201rev2Feed("title", "link", "description")
self.assertEqual(
rss_feed.content_type, "application/rss+xml; charset=utf-8"
)
# Two regression tests for #14202
def test_feed_without_feed_url_gets_rendered_without_atom_link(self):
feed = feedgenerator.Rss201rev2Feed('title', '/link/', 'descr')
self.assertEqual(feed.feed['feed_url'], None)
feed_content = feed.writeString('utf-8')
self.assertNotIn('<atom:link', feed_content)
self.assertNotIn('href="/feed/"', feed_content)
self.assertNotIn('rel="self"', feed_content)
def test_feed_with_feed_url_gets_rendered_with_atom_link(self):
feed = feedgenerator.Rss201rev2Feed('title', '/link/', 'descr', feed_url='/feed/')
self.assertEqual(feed.feed['feed_url'], '/feed/')
feed_content = feed.writeString('utf-8')
self.assertIn('<atom:link', feed_content)
self.assertIn('href="/feed/"', feed_content)
self.assertIn('rel="self"', feed_content)
| bsd-3-clause |
adam111316/SickGear | lib/imdb/utils.py | 14 | 62168 | """
utils module (imdb package).
This module provides basic utilities for the imdb package.
Copyright 2004-2013 Davide Alberani <da@erlug.linux.it>
2009 H. Turgut Uyar <uyar@tekir.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
from __future__ import generators
import re
import string
import logging
from copy import copy, deepcopy
from time import strptime, strftime
from imdb import VERSION
from imdb import linguistics
from imdb._exceptions import IMDbParserError
# Logger for imdb.utils module.
_utils_logger = logging.getLogger('imdbpy.utils')
# The regular expression for the "long" year format of IMDb, like
# "(1998)" and "(1986/II)", where the optional roman number (that I call
# "imdbIndex" after the slash is used for movies with the same title
# and year of release.
# XXX: probably L, C, D and M are far too much! ;-)
re_year_index = re.compile(r'\(([0-9\?]{4}(/[IVXLCDM]+)?)\)')
re_extended_year_index = re.compile(r'\((TV episode|TV Series|TV mini-series|TV|Video|Video Game)? ?((?:[0-9\?]{4})(?:-[0-9\?]{4})?)(?:/([IVXLCDM]+)?)?\)')
re_remove_kind = re.compile(r'\((TV episode|TV Series|TV mini-series|TV|Video|Video Game)? ?')
# Match only the imdbIndex (for name strings).
re_index = re.compile(r'^\(([IVXLCDM]+)\)$')
# Match things inside parentheses.
re_parentheses = re.compile(r'(\(.*\))')
# Match the number of episodes.
re_episodes = re.compile('\s?\((\d+) episodes\)', re.I)
re_episode_info = re.compile(r'{\s*(.+?)?\s?(\([0-9\?]{4}-[0-9\?]{1,2}-[0-9\?]{1,2}\))?\s?(\(#[0-9]+\.[0-9]+\))?}')
# Common suffixes in surnames.
_sname_suffixes = ('de', 'la', 'der', 'den', 'del', 'y', 'da', 'van',
'e', 'von', 'the', 'di', 'du', 'el', 'al')
def canonicalName(name):
"""Return the given name in canonical "Surname, Name" format.
It assumes that name is in the 'Name Surname' format."""
# XXX: some statistics (as of 17 Apr 2008, over 2288622 names):
# - just a surname: 69476
# - single surname, single name: 2209656
# - composed surname, composed name: 9490
# - composed surname, single name: 67606
# (2: 59764, 3: 6862, 4: 728)
# - single surname, composed name: 242310
# (2: 229467, 3: 9901, 4: 2041, 5: 630)
# - Jr.: 8025
# Don't convert names already in the canonical format.
if name.find(', ') != -1: return name
if isinstance(name, unicode):
joiner = u'%s, %s'
sur_joiner = u'%s %s'
sur_space = u' %s'
space = u' '
else:
joiner = '%s, %s'
sur_joiner = '%s %s'
sur_space = ' %s'
space = ' '
sname = name.split(' ')
snl = len(sname)
if snl == 2:
# Just a name and a surname: how boring...
name = joiner % (sname[1], sname[0])
elif snl > 2:
lsname = [x.lower() for x in sname]
if snl == 3: _indexes = (0, snl-2)
else: _indexes = (0, snl-2, snl-3)
# Check for common surname prefixes at the beginning and near the end.
for index in _indexes:
if lsname[index] not in _sname_suffixes: continue
try:
# Build the surname.
surn = sur_joiner % (sname[index], sname[index+1])
del sname[index]
del sname[index]
try:
# Handle the "Jr." after the name.
if lsname[index+2].startswith('jr'):
surn += sur_space % sname[index]
del sname[index]
except (IndexError, ValueError):
pass
name = joiner % (surn, space.join(sname))
break
except ValueError:
continue
else:
name = joiner % (sname[-1], space.join(sname[:-1]))
return name
def normalizeName(name):
"""Return a name in the normal "Name Surname" format."""
if isinstance(name, unicode):
joiner = u'%s %s'
else:
joiner = '%s %s'
sname = name.split(', ')
if len(sname) == 2:
name = joiner % (sname[1], sname[0])
return name
def analyze_name(name, canonical=None):
"""Return a dictionary with the name and the optional imdbIndex
keys, from the given string.
If canonical is None (default), the name is stored in its own style.
If canonical is True, the name is converted to canonical style.
If canonical is False, the name is converted to normal format.
raise an IMDbParserError exception if the name is not valid.
"""
original_n = name
name = name.strip()
res = {}
imdbIndex = ''
opi = name.rfind('(')
cpi = name.rfind(')')
# Strip notes (but not if the name starts with a parenthesis).
if opi not in (-1, 0) and cpi > opi:
if re_index.match(name[opi:cpi+1]):
imdbIndex = name[opi+1:cpi]
name = name[:opi].rstrip()
else:
# XXX: for the birth and death dates case like " (1926-2004)"
name = re_parentheses.sub('', name).strip()
if not name:
raise IMDbParserError('invalid name: "%s"' % original_n)
if canonical is not None:
if canonical:
name = canonicalName(name)
else:
name = normalizeName(name)
res['name'] = name
if imdbIndex:
res['imdbIndex'] = imdbIndex
return res
def build_name(name_dict, canonical=None):
"""Given a dictionary that represents a "long" IMDb name,
return a string.
If canonical is None (default), the name is returned in the stored style.
If canonical is True, the name is converted to canonical style.
If canonical is False, the name is converted to normal format.
"""
name = name_dict.get('canonical name') or name_dict.get('name', '')
if not name: return ''
if canonical is not None:
if canonical:
name = canonicalName(name)
else:
name = normalizeName(name)
imdbIndex = name_dict.get('imdbIndex')
if imdbIndex:
name += ' (%s)' % imdbIndex
return name
# XXX: here only for backward compatibility. Find and remove any dependency.
_articles = linguistics.GENERIC_ARTICLES
_unicodeArticles = linguistics.toUnicode(_articles)
articlesDicts = linguistics.articlesDictsForLang(None)
spArticles = linguistics.spArticlesForLang(None)
def canonicalTitle(title, lang=None, imdbIndex=None):
"""Return the title in the canonic format 'Movie Title, The';
beware that it doesn't handle long imdb titles.
The 'lang' argument can be used to specify the language of the title.
"""
isUnicode = isinstance(title, unicode)
articlesDicts = linguistics.articlesDictsForLang(lang)
try:
if title.split(', ')[-1].lower() in articlesDicts[isUnicode]:
return title
except IndexError:
pass
if isUnicode:
_format = u'%s%s, %s'
else:
_format = '%s%s, %s'
ltitle = title.lower()
if imdbIndex:
imdbIndex = ' (%s)' % imdbIndex
else:
imdbIndex = ''
spArticles = linguistics.spArticlesForLang(lang)
for article in spArticles[isUnicode]:
if ltitle.startswith(article):
lart = len(article)
title = _format % (title[lart:], imdbIndex, title[:lart])
if article[-1] == ' ':
title = title[:-1]
break
## XXX: an attempt using a dictionary lookup.
##for artSeparator in (' ', "'", '-'):
## article = _articlesDict.get(ltitle.split(artSeparator)[0])
## if article is not None:
## lart = len(article)
## # check titles like "una", "I'm Mad" and "L'abbacchio".
## if title[lart:] == '' or (artSeparator != ' ' and
## title[lart:][1] != artSeparator): continue
## title = '%s, %s' % (title[lart:], title[:lart])
## if artSeparator == ' ': title = title[1:]
## break
return title
def normalizeTitle(title, lang=None):
"""Return the title in the normal "The Title" format;
beware that it doesn't handle long imdb titles, but only the
title portion, without year[/imdbIndex] or special markup.
The 'lang' argument can be used to specify the language of the title.
"""
isUnicode = isinstance(title, unicode)
stitle = title.split(', ')
articlesDicts = linguistics.articlesDictsForLang(lang)
if len(stitle) > 1 and stitle[-1].lower() in articlesDicts[isUnicode]:
sep = ' '
if stitle[-1][-1] in ("'", '-'):
sep = ''
if isUnicode:
_format = u'%s%s%s'
_joiner = u', '
else:
_format = '%s%s%s'
_joiner = ', '
title = _format % (stitle[-1], sep, _joiner.join(stitle[:-1]))
return title
def _split_series_episode(title):
"""Return the series and the episode titles; if this is not a
series' episode, the returned series title is empty.
This function recognize two different styles:
"The Series" An Episode (2005)
"The Series" (2004) {An Episode (2005) (#season.episode)}"""
series_title = ''
episode_or_year = ''
if title[-1:] == '}':
# Title of the episode, as in the plain text data files.
begin_eps = title.rfind('{')
if begin_eps == -1: return '', ''
series_title = title[:begin_eps].rstrip()
# episode_or_year is returned with the {...}
episode_or_year = title[begin_eps:].strip()
if episode_or_year[:12] == '{SUSPENDED}}': return '', ''
# XXX: works only with tv series; it's still unclear whether
# IMDb will support episodes for tv mini series and tv movies...
elif title[0:1] == '"':
second_quot = title[1:].find('"') + 2
if second_quot != 1: # a second " was found.
episode_or_year = title[second_quot:].lstrip()
first_char = episode_or_year[0:1]
if not first_char: return '', ''
if first_char != '(':
# There is not a (year) but the title of the episode;
# that means this is an episode title, as returned by
# the web server.
series_title = title[:second_quot]
##elif episode_or_year[-1:] == '}':
## # Title of the episode, as in the plain text data files.
## begin_eps = episode_or_year.find('{')
## if begin_eps == -1: return series_title, episode_or_year
## series_title = title[:second_quot+begin_eps].rstrip()
## # episode_or_year is returned with the {...}
## episode_or_year = episode_or_year[begin_eps:]
return series_title, episode_or_year
def is_series_episode(title):
"""Return True if 'title' is an series episode."""
title = title.strip()
if _split_series_episode(title)[0]: return 1
return 0
def analyze_title(title, canonical=None, canonicalSeries=None,
canonicalEpisode=None, _emptyString=u''):
"""Analyze the given title and return a dictionary with the
"stripped" title, the kind of the show ("movie", "tv series", etc.),
the year of production and the optional imdbIndex (a roman number
used to distinguish between movies with the same title and year).
If canonical is None (default), the title is stored in its own style.
If canonical is True, the title is converted to canonical style.
If canonical is False, the title is converted to normal format.
raise an IMDbParserError exception if the title is not valid.
"""
# XXX: introduce the 'lang' argument?
if canonical is not None:
canonicalSeries = canonicalEpisode = canonical
original_t = title
result = {}
title = title.strip()
year = _emptyString
kind = _emptyString
imdbIndex = _emptyString
series_title, episode_or_year = _split_series_episode(title)
if series_title:
# It's an episode of a series.
series_d = analyze_title(series_title, canonical=canonicalSeries)
oad = sen = ep_year = _emptyString
# Plain text data files format.
if episode_or_year[0:1] == '{' and episode_or_year[-1:] == '}':
match = re_episode_info.findall(episode_or_year)
if match:
# Episode title, original air date and #season.episode
episode_or_year, oad, sen = match[0]
episode_or_year = episode_or_year.strip()
if not oad:
# No year, but the title is something like (2005-04-12)
if episode_or_year and episode_or_year[0] == '(' and \
episode_or_year[-1:] == ')' and \
episode_or_year[1:2] != '#':
oad = episode_or_year
if oad[1:5] and oad[5:6] == '-':
try:
ep_year = int(oad[1:5])
except (TypeError, ValueError):
pass
if not oad and not sen and episode_or_year.startswith('(#'):
sen = episode_or_year
elif episode_or_year.startswith('Episode dated'):
oad = episode_or_year[14:]
if oad[-4:].isdigit():
try:
ep_year = int(oad[-4:])
except (TypeError, ValueError):
pass
episode_d = analyze_title(episode_or_year, canonical=canonicalEpisode)
episode_d['kind'] = u'episode'
episode_d['episode of'] = series_d
if oad:
episode_d['original air date'] = oad[1:-1]
if ep_year and episode_d.get('year') is None:
episode_d['year'] = ep_year
if sen and sen[2:-1].find('.') != -1:
seas, epn = sen[2:-1].split('.')
if seas:
# Set season and episode.
try: seas = int(seas)
except: pass
try: epn = int(epn)
except: pass
episode_d['season'] = seas
if epn:
episode_d['episode'] = epn
return episode_d
# First of all, search for the kind of show.
# XXX: Number of entries at 17 Apr 2008:
# movie: 379,871
# episode: 483,832
# tv movie: 61,119
# tv series: 44,795
# video movie: 57,915
# tv mini series: 5,497
# video game: 5,490
# More up-to-date statistics: http://us.imdb.com/database_statistics
if title.endswith('(TV)'):
kind = u'tv movie'
title = title[:-4].rstrip()
elif title.endswith('(TV Movie)'):
kind = u'tv movie'
title = title[:-10].rstrip()
elif title.endswith('(V)'):
kind = u'video movie'
title = title[:-3].rstrip()
elif title.lower().endswith('(video)'):
kind = u'video movie'
title = title[:-7].rstrip()
elif title.endswith('(TV Short)'):
kind = u'tv short'
title = title[:-10].rstrip()
elif title.endswith('(TV Mini-Series)'):
kind = u'tv mini series'
title = title[:-16].rstrip()
elif title.endswith('(mini)'):
kind = u'tv mini series'
title = title[:-6].rstrip()
elif title.endswith('(VG)'):
kind = u'video game'
title = title[:-4].rstrip()
elif title.endswith('(Video Game)'):
kind = u'video game'
title = title[:-12].rstrip()
elif title.endswith('(TV Series)'):
epindex = title.find('(TV Episode) - ')
if epindex >= 0:
# It's an episode of a series.
kind = u'episode'
series_info = analyze_title(title[epindex + 15:])
result['episode of'] = series_info.get('title')
result['series year'] = series_info.get('year')
title = title[:epindex]
else:
kind = u'tv series'
title = title[:-11].rstrip()
# Search for the year and the optional imdbIndex (a roman number).
yi = re_year_index.findall(title)
if not yi:
yi = re_extended_year_index.findall(title)
if yi:
yk, yiy, yii = yi[-1]
yi = [(yiy, yii)]
if yk == 'TV episode':
kind = u'episode'
elif yk == 'TV':
kind = u'tv movie'
elif yk == 'TV Series':
kind = u'tv series'
elif yk == 'Video':
kind = u'video movie'
elif yk == 'TV mini-series':
kind = u'tv mini series'
elif yk == 'Video Game':
kind = u'video game'
title = re_remove_kind.sub('(', title)
if yi:
last_yi = yi[-1]
year = last_yi[0]
if last_yi[1]:
imdbIndex = last_yi[1][1:]
year = year[:-len(imdbIndex)-1]
i = title.rfind('(%s)' % last_yi[0])
if i != -1:
title = title[:i-1].rstrip()
# This is a tv (mini) series: strip the '"' at the begin and at the end.
# XXX: strip('"') is not used for compatibility with Python 2.0.
if title and title[0] == title[-1] == '"':
if not kind:
kind = u'tv series'
title = title[1:-1].strip()
if not title:
raise IMDbParserError('invalid title: "%s"' % original_t)
if canonical is not None:
if canonical:
title = canonicalTitle(title)
else:
title = normalizeTitle(title)
# 'kind' is one in ('movie', 'episode', 'tv series', 'tv mini series',
# 'tv movie', 'video movie', 'video game')
result['title'] = title
result['kind'] = kind or u'movie'
if year and year != '????':
if '-' in year:
result['series years'] = year
year = year[:4]
try:
result['year'] = int(year)
except (TypeError, ValueError):
pass
if imdbIndex:
result['imdbIndex'] = imdbIndex
if isinstance(_emptyString, str):
result['kind'] = str(kind or 'movie')
return result
_web_format = '%d %B %Y'
_ptdf_format = '(%Y-%m-%d)'
def _convertTime(title, fromPTDFtoWEB=1, _emptyString=u''):
"""Convert a time expressed in the pain text data files, to
the 'Episode dated ...' format used on the web site; if
fromPTDFtoWEB is false, the inverted conversion is applied."""
try:
if fromPTDFtoWEB:
from_format = _ptdf_format
to_format = _web_format
else:
from_format = u'Episode dated %s' % _web_format
to_format = _ptdf_format
t = strptime(title, from_format)
title = strftime(to_format, t)
if fromPTDFtoWEB:
if title[0] == '0': title = title[1:]
title = u'Episode dated %s' % title
except ValueError:
pass
if isinstance(_emptyString, str):
try:
title = str(title)
except UnicodeDecodeError:
pass
return title
def build_title(title_dict, canonical=None, canonicalSeries=None,
canonicalEpisode=None, ptdf=0, lang=None, _doYear=1,
_emptyString=u'', appendKind=True):
"""Given a dictionary that represents a "long" IMDb title,
return a string.
If canonical is None (default), the title is returned in the stored style.
If canonical is True, the title is converted to canonical style.
If canonical is False, the title is converted to normal format.
lang can be used to specify the language of the title.
If ptdf is true, the plain text data files format is used.
"""
if canonical is not None:
canonicalSeries = canonical
pre_title = _emptyString
kind = title_dict.get('kind')
episode_of = title_dict.get('episode of')
if kind == 'episode' and episode_of is not None:
# Works with both Movie instances and plain dictionaries.
doYear = 0
if ptdf:
doYear = 1
# XXX: for results coming from the new search page.
if not isinstance(episode_of, (dict, _Container)):
episode_of = {'title': episode_of, 'kind': 'tv series'}
if 'series year' in title_dict:
episode_of['year'] = title_dict['series year']
pre_title = build_title(episode_of, canonical=canonicalSeries,
ptdf=0, _doYear=doYear,
_emptyString=_emptyString)
ep_dict = {'title': title_dict.get('title', ''),
'imdbIndex': title_dict.get('imdbIndex')}
ep_title = ep_dict['title']
if not ptdf:
doYear = 1
ep_dict['year'] = title_dict.get('year', '????')
if ep_title[0:1] == '(' and ep_title[-1:] == ')' and \
ep_title[1:5].isdigit():
ep_dict['title'] = _convertTime(ep_title, fromPTDFtoWEB=1,
_emptyString=_emptyString)
else:
doYear = 0
if ep_title.startswith('Episode dated'):
ep_dict['title'] = _convertTime(ep_title, fromPTDFtoWEB=0,
_emptyString=_emptyString)
episode_title = build_title(ep_dict,
canonical=canonicalEpisode, ptdf=ptdf,
_doYear=doYear, _emptyString=_emptyString)
if ptdf:
oad = title_dict.get('original air date', _emptyString)
if len(oad) == 10 and oad[4] == '-' and oad[7] == '-' and \
episode_title.find(oad) == -1:
episode_title += ' (%s)' % oad
seas = title_dict.get('season')
if seas is not None:
episode_title += ' (#%s' % seas
episode = title_dict.get('episode')
if episode is not None:
episode_title += '.%s' % episode
episode_title += ')'
episode_title = '{%s}' % episode_title
return _emptyString + '%s %s' % (_emptyString + pre_title,
_emptyString + episode_title)
title = title_dict.get('title', '')
imdbIndex = title_dict.get('imdbIndex', '')
if not title: return _emptyString
if canonical is not None:
if canonical:
title = canonicalTitle(title, lang=lang, imdbIndex=imdbIndex)
else:
title = normalizeTitle(title, lang=lang)
if pre_title:
title = '%s %s' % (pre_title, title)
if kind in (u'tv series', u'tv mini series'):
title = '"%s"' % title
if _doYear:
year = title_dict.get('year') or '????'
if isinstance(_emptyString, str):
year = str(year)
imdbIndex = title_dict.get('imdbIndex')
if not ptdf:
if imdbIndex and (canonical is None or canonical):
title += ' (%s)' % imdbIndex
title += ' (%s)' % year
else:
title += ' (%s' % year
if imdbIndex and (canonical is None or canonical):
title += '/%s' % imdbIndex
title += ')'
if appendKind and kind:
if kind == 'tv movie':
title += ' (TV)'
elif kind == 'video movie':
title += ' (V)'
elif kind == 'tv mini series':
title += ' (mini)'
elif kind == 'video game':
title += ' (VG)'
return title
def split_company_name_notes(name):
"""Return two strings, the first representing the company name,
and the other representing the (optional) notes."""
name = name.strip()
notes = u''
if name.endswith(')'):
fpidx = name.find('(')
if fpidx != -1:
notes = name[fpidx:]
name = name[:fpidx].rstrip()
return name, notes
def analyze_company_name(name, stripNotes=False):
"""Return a dictionary with the name and the optional 'country'
keys, from the given string.
If stripNotes is true, tries to not consider optional notes.
raise an IMDbParserError exception if the name is not valid.
"""
if stripNotes:
name = split_company_name_notes(name)[0]
o_name = name
name = name.strip()
country = None
if name.endswith(']'):
idx = name.rfind('[')
if idx != -1:
country = name[idx:]
name = name[:idx].rstrip()
if not name:
raise IMDbParserError('invalid name: "%s"' % o_name)
result = {'name': name}
if country:
result['country'] = country
return result
def build_company_name(name_dict, _emptyString=u''):
"""Given a dictionary that represents a "long" IMDb company name,
return a string.
"""
name = name_dict.get('name')
if not name:
return _emptyString
country = name_dict.get('country')
if country is not None:
name += ' %s' % country
return name
class _LastC:
"""Size matters."""
def __cmp__(self, other):
if isinstance(other, self.__class__): return 0
return 1
_last = _LastC()
def cmpMovies(m1, m2):
"""Compare two movies by year, in reverse order; the imdbIndex is checked
for movies with the same year of production and title."""
# Sort tv series' episodes.
m1e = m1.get('episode of')
m2e = m2.get('episode of')
if m1e is not None and m2e is not None:
cmp_series = cmpMovies(m1e, m2e)
if cmp_series != 0:
return cmp_series
m1s = m1.get('season')
m2s = m2.get('season')
if m1s is not None and m2s is not None:
if m1s < m2s:
return 1
elif m1s > m2s:
return -1
m1p = m1.get('episode')
m2p = m2.get('episode')
if m1p < m2p:
return 1
elif m1p > m2p:
return -1
try:
if m1e is None: m1y = int(m1.get('year', 0))
else: m1y = int(m1e.get('year', 0))
except ValueError:
m1y = 0
try:
if m2e is None: m2y = int(m2.get('year', 0))
else: m2y = int(m2e.get('year', 0))
except ValueError:
m2y = 0
if m1y > m2y: return -1
if m1y < m2y: return 1
# Ok, these movies have the same production year...
#m1t = m1.get('canonical title', _last)
#m2t = m2.get('canonical title', _last)
# It should works also with normal dictionaries (returned from searches).
#if m1t is _last and m2t is _last:
m1t = m1.get('title', _last)
m2t = m2.get('title', _last)
if m1t < m2t: return -1
if m1t > m2t: return 1
# Ok, these movies have the same title...
m1i = m1.get('imdbIndex', _last)
m2i = m2.get('imdbIndex', _last)
if m1i > m2i: return -1
if m1i < m2i: return 1
m1id = getattr(m1, 'movieID', None)
# Introduce this check even for other comparisons functions?
# XXX: is it safe to check without knowning the data access system?
# probably not a great idea. Check for 'kind', instead?
if m1id is not None:
m2id = getattr(m2, 'movieID', None)
if m1id > m2id: return -1
elif m1id < m2id: return 1
return 0
def cmpPeople(p1, p2):
"""Compare two people by billingPos, name and imdbIndex."""
p1b = getattr(p1, 'billingPos', None) or _last
p2b = getattr(p2, 'billingPos', None) or _last
if p1b > p2b: return 1
if p1b < p2b: return -1
p1n = p1.get('canonical name', _last)
p2n = p2.get('canonical name', _last)
if p1n is _last and p2n is _last:
p1n = p1.get('name', _last)
p2n = p2.get('name', _last)
if p1n > p2n: return 1
if p1n < p2n: return -1
p1i = p1.get('imdbIndex', _last)
p2i = p2.get('imdbIndex', _last)
if p1i > p2i: return 1
if p1i < p2i: return -1
return 0
def cmpCompanies(p1, p2):
"""Compare two companies."""
p1n = p1.get('long imdb name', _last)
p2n = p2.get('long imdb name', _last)
if p1n is _last and p2n is _last:
p1n = p1.get('name', _last)
p2n = p2.get('name', _last)
if p1n > p2n: return 1
if p1n < p2n: return -1
p1i = p1.get('country', _last)
p2i = p2.get('country', _last)
if p1i > p2i: return 1
if p1i < p2i: return -1
return 0
# References to titles, names and characters.
# XXX: find better regexp!
re_titleRef = re.compile(r'_(.+?(?: \([0-9\?]{4}(?:/[IVXLCDM]+)?\))?(?: \(mini\)| \(TV\)| \(V\)| \(VG\))?)_ \(qv\)')
# FIXME: doesn't match persons with ' in the name.
re_nameRef = re.compile(r"'([^']+?)' \(qv\)")
# XXX: good choice? Are there characters with # in the name?
re_characterRef = re.compile(r"#([^']+?)# \(qv\)")
# Functions used to filter the text strings.
def modNull(s, titlesRefs, namesRefs, charactersRefs):
"""Do nothing."""
return s
def modClearTitleRefs(s, titlesRefs, namesRefs, charactersRefs):
"""Remove titles references."""
return re_titleRef.sub(r'\1', s)
def modClearNameRefs(s, titlesRefs, namesRefs, charactersRefs):
"""Remove names references."""
return re_nameRef.sub(r'\1', s)
def modClearCharacterRefs(s, titlesRefs, namesRefs, charactersRefs):
"""Remove characters references"""
return re_characterRef.sub(r'\1', s)
def modClearRefs(s, titlesRefs, namesRefs, charactersRefs):
"""Remove titles, names and characters references."""
s = modClearTitleRefs(s, {}, {}, {})
s = modClearCharacterRefs(s, {}, {}, {})
return modClearNameRefs(s, {}, {}, {})
def modifyStrings(o, modFunct, titlesRefs, namesRefs, charactersRefs):
"""Modify a string (or string values in a dictionary or strings
in a list), using the provided modFunct function and titlesRefs
namesRefs and charactersRefs references dictionaries."""
# Notice that it doesn't go any deeper than the first two levels in a list.
if isinstance(o, (unicode, str)):
return modFunct(o, titlesRefs, namesRefs, charactersRefs)
elif isinstance(o, (list, tuple, dict)):
_stillorig = 1
if isinstance(o, (list, tuple)): keys = xrange(len(o))
else: keys = o.keys()
for i in keys:
v = o[i]
if isinstance(v, (unicode, str)):
if _stillorig:
o = copy(o)
_stillorig = 0
o[i] = modFunct(v, titlesRefs, namesRefs, charactersRefs)
elif isinstance(v, (list, tuple)):
modifyStrings(o[i], modFunct, titlesRefs, namesRefs,
charactersRefs)
return o
def date_and_notes(s):
"""Parse (birth|death) date and notes; returns a tuple in the
form (date, notes)."""
s = s.strip()
if not s: return (u'', u'')
notes = u''
if s[0].isdigit() or s.split()[0].lower() in ('c.', 'january', 'february',
'march', 'april', 'may', 'june',
'july', 'august', 'september',
'october', 'november',
'december', 'ca.', 'circa',
'????,'):
i = s.find(',')
if i != -1:
notes = s[i+1:].strip()
s = s[:i]
else:
notes = s
s = u''
if s == '????': s = u''
return s, notes
class RolesList(list):
"""A list of Person or Character instances, used for the currentRole
property."""
def __unicode__(self):
return u' / '.join([unicode(x) for x in self])
def __str__(self):
# FIXME: does it make sense at all? Return a unicode doesn't
# seem right, in __str__.
return u' / '.join([unicode(x).encode('utf8') for x in self])
# Replace & with &, but only if it's not already part of a charref.
#_re_amp = re.compile(r'(&)(?!\w+;)', re.I)
#_re_amp = re.compile(r'(?<=\W)&(?=[^a-zA-Z0-9_#])')
_re_amp = re.compile(r'&(?![^a-zA-Z0-9_#]{1,5};)')
def escape4xml(value):
"""Escape some chars that can't be present in a XML value."""
if isinstance(value, int):
value = str(value)
value = _re_amp.sub('&', value)
value = value.replace('"', '"').replace("'", ''')
value = value.replace('<', '<').replace('>', '>')
if isinstance(value, unicode):
value = value.encode('ascii', 'xmlcharrefreplace')
return value
def _refsToReplace(value, modFunct, titlesRefs, namesRefs, charactersRefs):
"""Return three lists - for movie titles, persons and characters names -
with two items tuples: the first item is the reference once escaped
by the user-provided modFunct function, the second is the same
reference un-escaped."""
mRefs = []
for refRe, refTemplate in [(re_titleRef, u'_%s_ (qv)'),
(re_nameRef, u"'%s' (qv)"),
(re_characterRef, u'#%s# (qv)')]:
theseRefs = []
for theRef in refRe.findall(value):
# refTemplate % theRef values don't change for a single
# _Container instance, so this is a good candidate for a
# cache or something - even if it's so rarely used that...
# Moreover, it can grow - ia.update(...) - and change if
# modFunct is modified.
goodValue = modFunct(refTemplate % theRef, titlesRefs, namesRefs,
charactersRefs)
# Prevents problems with crap in plain text data files.
# We should probably exclude invalid chars and string that
# are too long in the re_*Ref expressions.
if '_' in goodValue or len(goodValue) > 128:
continue
toReplace = escape4xml(goodValue)
# Only the 'value' portion is replaced.
replaceWith = goodValue.replace(theRef, escape4xml(theRef))
theseRefs.append((toReplace, replaceWith))
mRefs.append(theseRefs)
return mRefs
def _handleTextNotes(s):
"""Split text::notes strings."""
ssplit = s.split('::', 1)
if len(ssplit) == 1:
return s
return u'%s<notes>%s</notes>' % (ssplit[0], ssplit[1])
def _normalizeValue(value, withRefs=False, modFunct=None, titlesRefs=None,
namesRefs=None, charactersRefs=None):
"""Replace some chars that can't be present in a XML text."""
# XXX: use s.encode(encoding, 'xmlcharrefreplace') ? Probably not
# a great idea: after all, returning a unicode is safe.
if isinstance(value, (unicode, str)):
if not withRefs:
value = _handleTextNotes(escape4xml(value))
else:
# Replace references that were accidentally escaped.
replaceLists = _refsToReplace(value, modFunct, titlesRefs,
namesRefs, charactersRefs)
value = modFunct(value, titlesRefs or {}, namesRefs or {},
charactersRefs or {})
value = _handleTextNotes(escape4xml(value))
for replaceList in replaceLists:
for toReplace, replaceWith in replaceList:
value = value.replace(toReplace, replaceWith)
else:
value = unicode(value)
return value
def _tag4TON(ton, addAccessSystem=False, _containerOnly=False):
"""Build a tag for the given _Container instance;
both open and close tags are returned."""
tag = ton.__class__.__name__.lower()
what = 'name'
if tag == 'movie':
value = ton.get('long imdb title') or ton.get('title', '')
what = 'title'
else:
value = ton.get('long imdb name') or ton.get('name', '')
value = _normalizeValue(value)
extras = u''
crl = ton.currentRole
if crl:
if not isinstance(crl, list):
crl = [crl]
for cr in crl:
crTag = cr.__class__.__name__.lower()
crValue = cr['long imdb name']
crValue = _normalizeValue(crValue)
crID = cr.getID()
if crID is not None:
extras += u'<current-role><%s id="%s">' \
u'<name>%s</name></%s>' % (crTag, crID,
crValue, crTag)
else:
extras += u'<current-role><%s><name>%s</name></%s>' % \
(crTag, crValue, crTag)
if cr.notes:
extras += u'<notes>%s</notes>' % _normalizeValue(cr.notes)
extras += u'</current-role>'
theID = ton.getID()
if theID is not None:
beginTag = u'<%s id="%s"' % (tag, theID)
if addAccessSystem and ton.accessSystem:
beginTag += ' access-system="%s"' % ton.accessSystem
if not _containerOnly:
beginTag += u'><%s>%s</%s>' % (what, value, what)
else:
beginTag += u'>'
else:
if not _containerOnly:
beginTag = u'<%s><%s>%s</%s>' % (tag, what, value, what)
else:
beginTag = u'<%s>' % tag
beginTag += extras
if ton.notes:
beginTag += u'<notes>%s</notes>' % _normalizeValue(ton.notes)
return (beginTag, u'</%s>' % tag)
TAGS_TO_MODIFY = {
'movie.parents-guide': ('item', True),
'movie.number-of-votes': ('item', True),
'movie.soundtrack.item': ('item', True),
'movie.quotes': ('quote', False),
'movie.quotes.quote': ('line', False),
'movie.demographic': ('item', True),
'movie.episodes': ('season', True),
'movie.episodes.season': ('episode', True),
'person.merchandising-links': ('item', True),
'person.genres': ('item', True),
'person.quotes': ('quote', False),
'person.keywords': ('item', True),
'character.quotes': ('item', True),
'character.quotes.item': ('quote', False),
'character.quotes.item.quote': ('line', False)
}
_allchars = string.maketrans('', '')
_keepchars = _allchars.translate(_allchars, string.ascii_lowercase + '-' +
string.digits)
def _tagAttr(key, fullpath):
"""Return a tuple with a tag name and a (possibly empty) attribute,
applying the conversions specified in TAGS_TO_MODIFY and checking
that the tag is safe for a XML document."""
attrs = {}
_escapedKey = escape4xml(key)
if fullpath in TAGS_TO_MODIFY:
tagName, useTitle = TAGS_TO_MODIFY[fullpath]
if useTitle:
attrs['key'] = _escapedKey
elif not isinstance(key, unicode):
if isinstance(key, str):
tagName = unicode(key, 'ascii', 'ignore')
else:
strType = str(type(key)).replace("<type '", "").replace("'>", "")
attrs['keytype'] = strType
tagName = unicode(key)
else:
tagName = key
if isinstance(key, int):
attrs['keytype'] = 'int'
origTagName = tagName
tagName = tagName.lower().replace(' ', '-')
tagName = str(tagName).translate(_allchars, _keepchars)
if origTagName != tagName:
if 'key' not in attrs:
attrs['key'] = _escapedKey
if (not tagName) or tagName[0].isdigit() or tagName[0] == '-':
# This is a fail-safe: we should never be here, since unpredictable
# keys must be listed in TAGS_TO_MODIFY.
# This will proably break the DTD/schema, but at least it will
# produce a valid XML.
tagName = 'item'
_utils_logger.error('invalid tag: %s [%s]' % (_escapedKey, fullpath))
attrs['key'] = _escapedKey
return tagName, u' '.join([u'%s="%s"' % i for i in attrs.items()])
def _seq2xml(seq, _l=None, withRefs=False, modFunct=None,
titlesRefs=None, namesRefs=None, charactersRefs=None,
_topLevel=True, key2infoset=None, fullpath=''):
"""Convert a sequence or a dictionary to a list of XML
unicode strings."""
if _l is None:
_l = []
if isinstance(seq, dict):
for key in seq:
value = seq[key]
if isinstance(key, _Container):
# Here we're assuming that a _Container is never a top-level
# key (otherwise we should handle key2infoset).
openTag, closeTag = _tag4TON(key)
# So that fullpath will contains something meaningful.
tagName = key.__class__.__name__.lower()
else:
tagName, attrs = _tagAttr(key, fullpath)
openTag = u'<%s' % tagName
if attrs:
openTag += ' %s' % attrs
if _topLevel and key2infoset and key in key2infoset:
openTag += u' infoset="%s"' % key2infoset[key]
if isinstance(value, int):
openTag += ' type="int"'
elif isinstance(value, float):
openTag += ' type="float"'
openTag += u'>'
closeTag = u'</%s>' % tagName
_l.append(openTag)
_seq2xml(value, _l, withRefs, modFunct, titlesRefs,
namesRefs, charactersRefs, _topLevel=False,
fullpath='%s.%s' % (fullpath, tagName))
_l.append(closeTag)
elif isinstance(seq, (list, tuple)):
tagName, attrs = _tagAttr('item', fullpath)
beginTag = u'<%s' % tagName
if attrs:
beginTag += u' %s' % attrs
#beginTag += u'>'
closeTag = u'</%s>' % tagName
for item in seq:
if isinstance(item, _Container):
_seq2xml(item, _l, withRefs, modFunct, titlesRefs,
namesRefs, charactersRefs, _topLevel=False,
fullpath='%s.%s' % (fullpath,
item.__class__.__name__.lower()))
else:
openTag = beginTag
if isinstance(item, int):
openTag += ' type="int"'
elif isinstance(item, float):
openTag += ' type="float"'
openTag += u'>'
_l.append(openTag)
_seq2xml(item, _l, withRefs, modFunct, titlesRefs,
namesRefs, charactersRefs, _topLevel=False,
fullpath='%s.%s' % (fullpath, tagName))
_l.append(closeTag)
else:
if isinstance(seq, _Container):
_l.extend(_tag4TON(seq))
else:
# Text, ints, floats and the like.
_l.append(_normalizeValue(seq, withRefs=withRefs,
modFunct=modFunct,
titlesRefs=titlesRefs,
namesRefs=namesRefs,
charactersRefs=charactersRefs))
return _l
_xmlHead = u"""<?xml version="1.0"?>
<!DOCTYPE %s SYSTEM "http://imdbpy.sf.net/dtd/imdbpy{VERSION}.dtd">
"""
_xmlHead = _xmlHead.replace('{VERSION}',
VERSION.replace('.', '').split('dev')[0][:2])
class _Container(object):
"""Base class for Movie, Person, Character and Company classes."""
# The default sets of information retrieved.
default_info = ()
# Aliases for some not-so-intuitive keys.
keys_alias = {}
# List of keys to modify.
keys_tomodify_list = ()
# Function used to compare two instances of this class.
cmpFunct = None
# Regular expression used to build the 'full-size (headshot|cover url)'.
_re_fullsizeURL = re.compile(r'\._V1\._SX(\d+)_SY(\d+)_')
def __init__(self, myID=None, data=None, notes=u'',
currentRole=u'', roleID=None, roleIsPerson=False,
accessSystem=None, titlesRefs=None, namesRefs=None,
charactersRefs=None, modFunct=None, *args, **kwds):
"""Initialize a Movie, Person, Character or Company object.
*myID* -- your personal identifier for this object.
*data* -- a dictionary used to initialize the object.
*notes* -- notes for the person referred in the currentRole
attribute; e.g.: '(voice)' or the alias used in the
movie credits.
*accessSystem* -- a string representing the data access system used.
*currentRole* -- a Character instance representing the current role
or duty of a person in this movie, or a Person
object representing the actor/actress who played
a given character in a Movie. If a string is
passed, an object is automatically build.
*roleID* -- if available, the characterID/personID of the currentRole
object.
*roleIsPerson* -- when False (default) the currentRole is assumed
to be a Character object, otherwise a Person.
*titlesRefs* -- a dictionary with references to movies.
*namesRefs* -- a dictionary with references to persons.
*charactersRefs* -- a dictionary with references to characters.
*modFunct* -- function called returning text fields.
"""
self.reset()
self.accessSystem = accessSystem
self.myID = myID
if data is None: data = {}
self.set_data(data, override=1)
self.notes = notes
if titlesRefs is None: titlesRefs = {}
self.update_titlesRefs(titlesRefs)
if namesRefs is None: namesRefs = {}
self.update_namesRefs(namesRefs)
if charactersRefs is None: charactersRefs = {}
self.update_charactersRefs(charactersRefs)
self.set_mod_funct(modFunct)
self.keys_tomodify = {}
for item in self.keys_tomodify_list:
self.keys_tomodify[item] = None
self._roleIsPerson = roleIsPerson
if not roleIsPerson:
from imdb.Character import Character
self._roleClass = Character
else:
from imdb.Person import Person
self._roleClass = Person
self.currentRole = currentRole
if roleID:
self.roleID = roleID
self._init(*args, **kwds)
def _get_roleID(self):
"""Return the characterID or personID of the currentRole object."""
if not self.__role:
return None
if isinstance(self.__role, list):
return [x.getID() for x in self.__role]
return self.currentRole.getID()
def _set_roleID(self, roleID):
"""Set the characterID or personID of the currentRole object."""
if not self.__role:
# XXX: needed? Just ignore it? It's probably safer to
# ignore it, to prevent some bugs in the parsers.
#raise IMDbError,"Can't set ID of an empty Character/Person object."
pass
if not self._roleIsPerson:
if not isinstance(roleID, (list, tuple)):
self.currentRole.characterID = roleID
else:
for index, item in enumerate(roleID):
self.__role[index].characterID = item
else:
if not isinstance(roleID, (list, tuple)):
self.currentRole.personID = roleID
else:
for index, item in enumerate(roleID):
self.__role[index].personID = item
roleID = property(_get_roleID, _set_roleID,
doc="the characterID or personID of the currentRole object.")
def _get_currentRole(self):
"""Return a Character or Person instance."""
if self.__role:
return self.__role
return self._roleClass(name=u'', accessSystem=self.accessSystem,
modFunct=self.modFunct)
def _set_currentRole(self, role):
"""Set self.currentRole to a Character or Person instance."""
if isinstance(role, (unicode, str)):
if not role:
self.__role = None
else:
self.__role = self._roleClass(name=role, modFunct=self.modFunct,
accessSystem=self.accessSystem)
elif isinstance(role, (list, tuple)):
self.__role = RolesList()
for item in role:
if isinstance(item, (unicode, str)):
self.__role.append(self._roleClass(name=item,
accessSystem=self.accessSystem,
modFunct=self.modFunct))
else:
self.__role.append(item)
if not self.__role:
self.__role = None
else:
self.__role = role
currentRole = property(_get_currentRole, _set_currentRole,
doc="The role of a Person in a Movie" + \
" or the interpreter of a Character in a Movie.")
def _init(self, **kwds): pass
def reset(self):
"""Reset the object."""
self.data = {}
self.myID = None
self.notes = u''
self.titlesRefs = {}
self.namesRefs = {}
self.charactersRefs = {}
self.modFunct = modClearRefs
self.current_info = []
self.infoset2keys = {}
self.key2infoset = {}
self.__role = None
self._reset()
def _reset(self): pass
def clear(self):
"""Reset the dictionary."""
self.data.clear()
self.notes = u''
self.titlesRefs = {}
self.namesRefs = {}
self.charactersRefs = {}
self.current_info = []
self.infoset2keys = {}
self.key2infoset = {}
self.__role = None
self._clear()
def _clear(self): pass
def get_current_info(self):
"""Return the current set of information retrieved."""
return self.current_info
def update_infoset_map(self, infoset, keys, mainInfoset):
"""Update the mappings between infoset and keys."""
if keys is None:
keys = []
if mainInfoset is not None:
theIS = mainInfoset
else:
theIS = infoset
self.infoset2keys[theIS] = keys
for key in keys:
self.key2infoset[key] = theIS
def set_current_info(self, ci):
"""Set the current set of information retrieved."""
# XXX:Remove? It's never used and there's no way to update infoset2keys.
self.current_info = ci
def add_to_current_info(self, val, keys=None, mainInfoset=None):
"""Add a set of information to the current list."""
if val not in self.current_info:
self.current_info.append(val)
self.update_infoset_map(val, keys, mainInfoset)
def has_current_info(self, val):
"""Return true if the given set of information is in the list."""
return val in self.current_info
def set_mod_funct(self, modFunct):
"""Set the fuction used to modify the strings."""
if modFunct is None: modFunct = modClearRefs
self.modFunct = modFunct
def update_titlesRefs(self, titlesRefs):
"""Update the dictionary with the references to movies."""
self.titlesRefs.update(titlesRefs)
def get_titlesRefs(self):
"""Return the dictionary with the references to movies."""
return self.titlesRefs
def update_namesRefs(self, namesRefs):
"""Update the dictionary with the references to names."""
self.namesRefs.update(namesRefs)
def get_namesRefs(self):
"""Return the dictionary with the references to names."""
return self.namesRefs
def update_charactersRefs(self, charactersRefs):
"""Update the dictionary with the references to characters."""
self.charactersRefs.update(charactersRefs)
def get_charactersRefs(self):
"""Return the dictionary with the references to characters."""
return self.charactersRefs
def set_data(self, data, override=0):
"""Set the movie data to the given dictionary; if 'override' is
set, the previous data is removed, otherwise the two dictionary
are merged.
"""
if not override:
self.data.update(data)
else:
self.data = data
def getID(self):
"""Return movieID, personID, characterID or companyID."""
raise NotImplementedError('override this method')
def __cmp__(self, other):
"""Compare two Movie, Person, Character or Company objects."""
# XXX: raise an exception?
if self.cmpFunct is None: return -1
if not isinstance(other, self.__class__): return -1
return self.cmpFunct(other)
def __hash__(self):
"""Hash for this object."""
# XXX: does it always work correctly?
theID = self.getID()
if theID is not None and self.accessSystem not in ('UNKNOWN', None):
# Handle 'http' and 'mobile' as they are the same access system.
acs = self.accessSystem
if acs in ('mobile', 'httpThin'):
acs = 'http'
# There must be some indication of the kind of the object, too.
s4h = '%s:%s[%s]' % (self.__class__.__name__, theID, acs)
else:
s4h = repr(self)
return hash(s4h)
def isSame(self, other):
"""Return True if the two represent the same object."""
if not isinstance(other, self.__class__): return 0
if hash(self) == hash(other): return 1
return 0
def __len__(self):
"""Number of items in the data dictionary."""
return len(self.data)
def getAsXML(self, key, _with_add_keys=True):
"""Return a XML representation of the specified key, or None
if empty. If _with_add_keys is False, dinamically generated
keys are excluded."""
# Prevent modifyStrings in __getitem__ to be called; if needed,
# it will be called by the _normalizeValue function.
origModFunct = self.modFunct
self.modFunct = modNull
# XXX: not totally sure it's a good idea, but could prevent
# problems (i.e.: the returned string always contains
# a DTD valid tag, and not something that can be only in
# the keys_alias map).
key = self.keys_alias.get(key, key)
if (not _with_add_keys) and (key in self._additional_keys()):
self.modFunct = origModFunct
return None
try:
withRefs = False
if key in self.keys_tomodify and \
origModFunct not in (None, modNull):
withRefs = True
value = self.get(key)
if value is None:
return None
tag = self.__class__.__name__.lower()
return u''.join(_seq2xml({key: value}, withRefs=withRefs,
modFunct=origModFunct,
titlesRefs=self.titlesRefs,
namesRefs=self.namesRefs,
charactersRefs=self.charactersRefs,
key2infoset=self.key2infoset,
fullpath=tag))
finally:
self.modFunct = origModFunct
def asXML(self, _with_add_keys=True):
"""Return a XML representation of the whole object.
If _with_add_keys is False, dinamically generated keys are excluded."""
beginTag, endTag = _tag4TON(self, addAccessSystem=True,
_containerOnly=True)
resList = [beginTag]
for key in self.keys():
value = self.getAsXML(key, _with_add_keys=_with_add_keys)
if not value:
continue
resList.append(value)
resList.append(endTag)
head = _xmlHead % self.__class__.__name__.lower()
return head + u''.join(resList)
def _getitem(self, key):
"""Handle special keys."""
return None
def __getitem__(self, key):
"""Return the value for a given key, checking key aliases;
a KeyError exception is raised if the key is not found.
"""
value = self._getitem(key)
if value is not None: return value
# Handle key aliases.
key = self.keys_alias.get(key, key)
rawData = self.data[key]
if key in self.keys_tomodify and \
self.modFunct not in (None, modNull):
try:
return modifyStrings(rawData, self.modFunct, self.titlesRefs,
self.namesRefs, self.charactersRefs)
except RuntimeError, e:
# Symbian/python 2.2 has a poor regexp implementation.
import warnings
warnings.warn('RuntimeError in '
"imdb.utils._Container.__getitem__; if it's not "
"a recursion limit exceeded and we're not running "
"in a Symbian environment, it's a bug:\n%s" % e)
return rawData
def __setitem__(self, key, item):
"""Directly store the item with the given key."""
self.data[key] = item
def __delitem__(self, key):
"""Remove the given section or key."""
# XXX: how to remove an item of a section?
del self.data[key]
def _additional_keys(self):
"""Valid keys to append to the data.keys() list."""
return []
def keys(self):
"""Return a list of valid keys."""
return self.data.keys() + self._additional_keys()
def items(self):
"""Return the items in the dictionary."""
return [(k, self.get(k)) for k in self.keys()]
# XXX: is this enough?
def iteritems(self): return self.data.iteritems()
def iterkeys(self): return self.data.iterkeys()
def itervalues(self): return self.data.itervalues()
def values(self):
"""Return the values in the dictionary."""
return [self.get(k) for k in self.keys()]
def has_key(self, key):
"""Return true if a given section is defined."""
try:
self.__getitem__(key)
except KeyError:
return 0
return 1
# XXX: really useful???
# consider also that this will confuse people who meant to
# call ia.update(movieObject, 'data set') instead.
def update(self, dict):
self.data.update(dict)
def get(self, key, failobj=None):
"""Return the given section, or default if it's not found."""
try:
return self.__getitem__(key)
except KeyError:
return failobj
def setdefault(self, key, failobj=None):
if not self.has_key(key):
self[key] = failobj
return self[key]
def pop(self, key, *args):
return self.data.pop(key, *args)
def popitem(self):
return self.data.popitem()
def __repr__(self):
"""String representation of an object."""
raise NotImplementedError('override this method')
def __str__(self):
"""Movie title or person name."""
raise NotImplementedError('override this method')
def __contains__(self, key):
raise NotImplementedError('override this method')
def append_item(self, key, item):
"""The item is appended to the list identified by the given key."""
self.data.setdefault(key, []).append(item)
def set_item(self, key, item):
"""Directly store the item with the given key."""
self.data[key] = item
def __nonzero__(self):
"""Return true if self.data contains something."""
if self.data: return 1
return 0
def __deepcopy__(self, memo):
raise NotImplementedError('override this method')
def copy(self):
"""Return a deep copy of the object itself."""
return deepcopy(self)
def flatten(seq, toDescend=(list, dict, tuple), yieldDictKeys=0,
onlyKeysType=(_Container,), scalar=None):
"""Iterate over nested lists and dictionaries; toDescend is a list
or a tuple of types to be considered non-scalar; if yieldDictKeys is
true, also dictionaries' keys are yielded; if scalar is not None, only
items of the given type(s) are yielded."""
if scalar is None or isinstance(seq, scalar):
yield seq
if isinstance(seq, toDescend):
if isinstance(seq, (dict, _Container)):
if yieldDictKeys:
# Yield also the keys of the dictionary.
for key in seq.iterkeys():
for k in flatten(key, toDescend=toDescend,
yieldDictKeys=yieldDictKeys,
onlyKeysType=onlyKeysType, scalar=scalar):
if onlyKeysType and isinstance(k, onlyKeysType):
yield k
for value in seq.itervalues():
for v in flatten(value, toDescend=toDescend,
yieldDictKeys=yieldDictKeys,
onlyKeysType=onlyKeysType, scalar=scalar):
yield v
elif not isinstance(seq, (str, unicode, int, float)):
for item in seq:
for i in flatten(item, toDescend=toDescend,
yieldDictKeys=yieldDictKeys,
onlyKeysType=onlyKeysType, scalar=scalar):
yield i
| gpl-3.0 |
MarnuLombard/namebench | nb_third_party/dns/renderer.py | 248 | 11865 | # Copyright (C) 2001-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Help for building DNS wire format messages"""
import cStringIO
import struct
import random
import time
import dns.exception
import dns.tsig
QUESTION = 0
ANSWER = 1
AUTHORITY = 2
ADDITIONAL = 3
class Renderer(object):
"""Helper class for building DNS wire-format messages.
Most applications can use the higher-level L{dns.message.Message}
class and its to_wire() method to generate wire-format messages.
This class is for those applications which need finer control
over the generation of messages.
Typical use::
r = dns.renderer.Renderer(id=1, flags=0x80, max_size=512)
r.add_question(qname, qtype, qclass)
r.add_rrset(dns.renderer.ANSWER, rrset_1)
r.add_rrset(dns.renderer.ANSWER, rrset_2)
r.add_rrset(dns.renderer.AUTHORITY, ns_rrset)
r.add_edns(0, 0, 4096)
r.add_rrset(dns.renderer.ADDTIONAL, ad_rrset_1)
r.add_rrset(dns.renderer.ADDTIONAL, ad_rrset_2)
r.write_header()
r.add_tsig(keyname, secret, 300, 1, 0, '', request_mac)
wire = r.get_wire()
@ivar output: where rendering is written
@type output: cStringIO.StringIO object
@ivar id: the message id
@type id: int
@ivar flags: the message flags
@type flags: int
@ivar max_size: the maximum size of the message
@type max_size: int
@ivar origin: the origin to use when rendering relative names
@type origin: dns.name.Name object
@ivar compress: the compression table
@type compress: dict
@ivar section: the section currently being rendered
@type section: int (dns.renderer.QUESTION, dns.renderer.ANSWER,
dns.renderer.AUTHORITY, or dns.renderer.ADDITIONAL)
@ivar counts: list of the number of RRs in each section
@type counts: int list of length 4
@ivar mac: the MAC of the rendered message (if TSIG was used)
@type mac: string
"""
def __init__(self, id=None, flags=0, max_size=65535, origin=None):
"""Initialize a new renderer.
@param id: the message id
@type id: int
@param flags: the DNS message flags
@type flags: int
@param max_size: the maximum message size; the default is 65535.
If rendering results in a message greater than I{max_size},
then L{dns.exception.TooBig} will be raised.
@type max_size: int
@param origin: the origin to use when rendering relative names
@type origin: dns.name.Namem or None.
"""
self.output = cStringIO.StringIO()
if id is None:
self.id = random.randint(0, 65535)
else:
self.id = id
self.flags = flags
self.max_size = max_size
self.origin = origin
self.compress = {}
self.section = QUESTION
self.counts = [0, 0, 0, 0]
self.output.write('\x00' * 12)
self.mac = ''
def _rollback(self, where):
"""Truncate the output buffer at offset I{where}, and remove any
compression table entries that pointed beyond the truncation
point.
@param where: the offset
@type where: int
"""
self.output.seek(where)
self.output.truncate()
keys_to_delete = []
for k, v in self.compress.iteritems():
if v >= where:
keys_to_delete.append(k)
for k in keys_to_delete:
del self.compress[k]
def _set_section(self, section):
"""Set the renderer's current section.
Sections must be rendered order: QUESTION, ANSWER, AUTHORITY,
ADDITIONAL. Sections may be empty.
@param section: the section
@type section: int
@raises dns.exception.FormError: an attempt was made to set
a section value less than the current section.
"""
if self.section != section:
if self.section > section:
raise dns.exception.FormError
self.section = section
def add_question(self, qname, rdtype, rdclass=dns.rdataclass.IN):
"""Add a question to the message.
@param qname: the question name
@type qname: dns.name.Name
@param rdtype: the question rdata type
@type rdtype: int
@param rdclass: the question rdata class
@type rdclass: int
"""
self._set_section(QUESTION)
before = self.output.tell()
qname.to_wire(self.output, self.compress, self.origin)
self.output.write(struct.pack("!HH", rdtype, rdclass))
after = self.output.tell()
if after >= self.max_size:
self._rollback(before)
raise dns.exception.TooBig
self.counts[QUESTION] += 1
def add_rrset(self, section, rrset, **kw):
"""Add the rrset to the specified section.
Any keyword arguments are passed on to the rdataset's to_wire()
routine.
@param section: the section
@type section: int
@param rrset: the rrset
@type rrset: dns.rrset.RRset object
"""
self._set_section(section)
before = self.output.tell()
n = rrset.to_wire(self.output, self.compress, self.origin, **kw)
after = self.output.tell()
if after >= self.max_size:
self._rollback(before)
raise dns.exception.TooBig
self.counts[section] += n
def add_rdataset(self, section, name, rdataset, **kw):
"""Add the rdataset to the specified section, using the specified
name as the owner name.
Any keyword arguments are passed on to the rdataset's to_wire()
routine.
@param section: the section
@type section: int
@param name: the owner name
@type name: dns.name.Name object
@param rdataset: the rdataset
@type rdataset: dns.rdataset.Rdataset object
"""
self._set_section(section)
before = self.output.tell()
n = rdataset.to_wire(name, self.output, self.compress, self.origin,
**kw)
after = self.output.tell()
if after >= self.max_size:
self._rollback(before)
raise dns.exception.TooBig
self.counts[section] += n
def add_edns(self, edns, ednsflags, payload, options=None):
"""Add an EDNS OPT record to the message.
@param edns: The EDNS level to use.
@type edns: int
@param ednsflags: EDNS flag values.
@type ednsflags: int
@param payload: The EDNS sender's payload field, which is the maximum
size of UDP datagram the sender can handle.
@type payload: int
@param options: The EDNS options list
@type options: list of dns.edns.Option instances
@see: RFC 2671
"""
# make sure the EDNS version in ednsflags agrees with edns
ednsflags &= 0xFF00FFFFL
ednsflags |= (edns << 16)
self._set_section(ADDITIONAL)
before = self.output.tell()
self.output.write(struct.pack('!BHHIH', 0, dns.rdatatype.OPT, payload,
ednsflags, 0))
if not options is None:
lstart = self.output.tell()
for opt in options:
stuff = struct.pack("!HH", opt.otype, 0)
self.output.write(stuff)
start = self.output.tell()
opt.to_wire(self.output)
end = self.output.tell()
assert end - start < 65536
self.output.seek(start - 2)
stuff = struct.pack("!H", end - start)
self.output.write(stuff)
self.output.seek(0, 2)
lend = self.output.tell()
assert lend - lstart < 65536
self.output.seek(lstart - 2)
stuff = struct.pack("!H", lend - lstart)
self.output.write(stuff)
self.output.seek(0, 2)
after = self.output.tell()
if after >= self.max_size:
self._rollback(before)
raise dns.exception.TooBig
self.counts[ADDITIONAL] += 1
def add_tsig(self, keyname, secret, fudge, id, tsig_error, other_data,
request_mac, algorithm=dns.tsig.default_algorithm):
"""Add a TSIG signature to the message.
@param keyname: the TSIG key name
@type keyname: dns.name.Name object
@param secret: the secret to use
@type secret: string
@param fudge: TSIG time fudge
@type fudge: int
@param id: the message id to encode in the tsig signature
@type id: int
@param tsig_error: TSIG error code; default is 0.
@type tsig_error: int
@param other_data: TSIG other data.
@type other_data: string
@param request_mac: This message is a response to the request which
had the specified MAC.
@param algorithm: the TSIG algorithm to use
@type request_mac: string
"""
self._set_section(ADDITIONAL)
before = self.output.tell()
s = self.output.getvalue()
(tsig_rdata, self.mac, ctx) = dns.tsig.sign(s,
keyname,
secret,
int(time.time()),
fudge,
id,
tsig_error,
other_data,
request_mac,
algorithm=algorithm)
keyname.to_wire(self.output, self.compress, self.origin)
self.output.write(struct.pack('!HHIH', dns.rdatatype.TSIG,
dns.rdataclass.ANY, 0, 0))
rdata_start = self.output.tell()
self.output.write(tsig_rdata)
after = self.output.tell()
assert after - rdata_start < 65536
if after >= self.max_size:
self._rollback(before)
raise dns.exception.TooBig
self.output.seek(rdata_start - 2)
self.output.write(struct.pack('!H', after - rdata_start))
self.counts[ADDITIONAL] += 1
self.output.seek(10)
self.output.write(struct.pack('!H', self.counts[ADDITIONAL]))
self.output.seek(0, 2)
def write_header(self):
"""Write the DNS message header.
Writing the DNS message header is done asfter all sections
have been rendered, but before the optional TSIG signature
is added.
"""
self.output.seek(0)
self.output.write(struct.pack('!HHHHHH', self.id, self.flags,
self.counts[0], self.counts[1],
self.counts[2], self.counts[3]))
self.output.seek(0, 2)
def get_wire(self):
"""Return the wire format message.
@rtype: string
"""
return self.output.getvalue()
| apache-2.0 |
quantifiedcode-bot/invenio-utils | invenio_utils/w3c_validator.py | 3 | 7518 | # This file is part of Invenio.
# Copyright (C) 2007, 2008, 2010, 2011, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Exports just one function w3c_validate which validate a text against the W3C validator
"""
__revision__ = "$Id$"
import httplib
import mimetypes
import re
import time
from xml.sax.saxutils import unescape
from invenio.config import CFG_CERN_SITE
if CFG_CERN_SITE:
# A host mirroring W3C validator
CFG_W3C_VALIDATOR_HOST = 'pcuds12.cern.ch'
# The selector for checking the page
CFG_W3C_VALIDATOR_SELECTOR = '/w3c-markup-validator/check'
# Whethever to sleep for 1s for kindness to the server
CFG_W3C_VALIDATOR_SLEEP_P = False
else:
CFG_W3C_VALIDATOR_HOST = 'validator.w3.org'
CFG_W3C_VALIDATOR_SELECTOR = '/check'
CFG_W3C_VALIDATOR_SLEEP_P = True
# Whethever we automatically exploit regression tests for validating pages.
CFG_TESTS_REQUIRE_HTML_VALIDATION = False
def w3c_validate(text, host=CFG_W3C_VALIDATOR_HOST,
selector=CFG_W3C_VALIDATOR_SELECTOR,
sleep_p=CFG_W3C_VALIDATOR_SLEEP_P):
""" Validate the text against W3C validator like host, with a given selector
and eventually sleeping for a second.
Return a triple, with True if the document validate as the first element.
If False, then the second and third elements contain respectively a list of
errors and of warnings of the form: (line number, column, error, row involved).
"""
if sleep_p:
time.sleep(1)
h = _post_multipart(
host, selector, [
('output', 'soap12')], [
('uploaded_file', 'foobar.html', text)])
errcode, errmsg, headers = h.getreply()
if 'X-W3C-Validator-Status' in headers:
if headers['X-W3C-Validator-Status'] == 'Valid':
return (True, [], [])
else:
errors, warnings = _parse_validator_soap(
h.file.read(), text.split('\n'))
return (False, errors, warnings)
else:
return (False, [], [])
def w3c_errors_to_str(errors, warnings):
""" Pretty print errors and warnings coming from w3c_validate """
ret = ''
if errors:
ret += '%s errors:\n' % len(errors)
for line, col, msg, text in errors:
ret += '%s (%s:%s):\n' % (unescape(msg,
{'"': "'"}), line, col)
ret += text + '\n'
ret += ' ' * (int(col) - 1) + '^\n'
ret += '---\n'
if warnings:
ret += '%s warnings:\n' % len(warnings)
for line, col, msg, text in warnings:
ret += '%s (%s:%s):\n' % (unescape(msg,
{'"': "'"}), line, col)
ret += text + '\n'
ret += ' ' * (int(col) - 1) + '^\n'
ret += '---\n'
return ret
def w3c_validate_p(text, host=CFG_W3C_VALIDATOR_HOST,
selector=CFG_W3C_VALIDATOR_SELECTOR,
sleep_p=CFG_W3C_VALIDATOR_SLEEP_P):
""" Validate the text against W3C validator like host, with a given selector
and eventually sleeping for a second.
Return a True if the document validate.
"""
if sleep_p:
time.sleep(1)
h = _post_multipart(
host, selector, [
('output', 'soap12')], [
('uploaded_file', 'foobar.html', text)])
errcode, errmsg, headers = h.getreply()
if 'X-W3C-Validator-Status' in headers:
return headers['X-W3C-Validator-Status'] == 'Valid'
return False
_errors_re = re.compile(
r'<m:errors>.*<m:errorcount>(?P<errorcount>[\d]+)\</m:errorcount>.*<m:errorlist>(?P<errors>.*)</m:errorlist>.*</m:errors>',
re.M | re.S)
_warnings_re = re.compile(
r'<m:warnings>.*<m:warningcount>(?P<warningcount>[\d]+)</m:warningcount>.*<m:warninglist>(?P<warnings>.*)</m:warninglist>.*</m:warnings>',
re.M | re.S)
_error_re = re.compile(
r'<m:error>.*<m:line>(?P<line>[\d]+)</m:line>.*<m:col>(?P<col>[\d]+)</m:col>.*<m:message>(?P<message>.+)</m:message>.*</m:error>',
re.M | re.S)
_warning_re = re.compile(
r'<m:warning>.*<m:line>(?P<line>[\d]+)</m:line>.*<m:col>(?P<col>[\d]+)</m:col>.*<m:message>(?P<message>.+)</m:message>.*</m:warning>',
re.M | re.S)
def _parse_validator_soap(soap_output, rows):
""" Given the soap output provided by W3C validator it returns a tuple
containing the list of errors in the form (line, col, error_msg) and
the list of warnings in the same form.
"""
errors = _errors_re.search(soap_output)
warnings = _warnings_re.search(soap_output)
if errors:
errors = _error_re.findall(errors.group('errors'))
errors = map(lambda error: (error[0], error[1], error[
2], rows[int(error[0]) - 1]), errors)
else:
errors = []
if warnings:
warnings = _warning_re.findall(warnings.group('warnings'))
warnings = map(lambda warning: (warning[0], warning[1], warning[
2], rows[int(warning[0]) - 1]), warnings)
else:
warnings = []
return (errors, warnings)
def _post_multipart(host, selector, fields, files):
"""
Post fields and files to an http host as multipart/form-data.
fields is a sequence of (name, value) elements for regular form fields.
files is a sequence of (name, filename, value) elements for data to be uploaded as files
Return the server's responses.
"""
content_type, body = _encode_multipart_formdata(fields, files)
h = httplib.HTTP(host)
h.putrequest('POST', selector)
h.putheader('content-type', content_type)
h.putheader('content-length', str(len(body)))
h.endheaders()
h.send(body)
return h
def _encode_multipart_formdata(fields, files):
"""
fields is a sequence of (name, value) elements for regular form fields.
files is a sequence of (name, filename, value) elements for data to be uploaded as files
Return (content_type, body) ready for httplib.HTTP instance
"""
BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$'
CRLF = '\r\n'
L = []
for (key, value) in fields:
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"' % key)
L.append('')
L.append(value)
for (key, filename, value) in files:
L.append('--' + BOUNDARY)
L.append(
'Content-Disposition: form-data; name="%s"; filename="%s"' %
(key, filename))
L.append('Content-Type: %s' % _get_content_type(filename))
L.append('')
L.append(value)
L.append('--' + BOUNDARY + '--')
L.append('')
body = CRLF.join(L)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def _get_content_type(filename):
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
| gpl-2.0 |
msnorm/forums | dotfiles/.vim/.ycm_extra_conf.py | 2 | 2114 | ##########################################################################
# Simple ycm_extra_conf.py example #
# Copyright (C) <2013> Onur Aslan <onur@onur.im> #
# #
# This file is loaded by default. Place your own .ycm_extra_conf.py to #
# project root to override this. #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
##########################################################################
# some default flags
# for more information install clang-3.2-doc package and
# check UsersManual.html
flags = [
'-Wall',
'-Werror',
# std is required
# clang won't know which language to use compiling headers
'-std=c++11',
# '-x' and 'c++' also required
# use 'c' for C projects
'-x',
'c',
# include third party libraries
#'-isystem',
#'/usr/include/python2.7',
]
# youcompleteme is calling this function to get flags
# You can also set database for flags. Check: JSONCompilationDatabase.html in
# clang-3.2-doc package
def FlagsForFile( filename ):
return {
'flags': flags,
'do_cache': True
}
| mit |
Southpaw-TACTIC/TACTIC | src/context/client/tactic-api-python-4.0.api04/Lib/HTMLParser.py | 4 | 13966 | """A parser for HTML and XHTML."""
# This file is based on sgmllib.py, but the API is slightly different.
# XXX There should be a way to distinguish between PCDATA (parsed
# character data -- the normal case), RCDATA (replaceable character
# data -- only char and entity references and end tags are special)
# and CDATA (character data -- only end tags are special).
import markupbase
import re
# Regular expressions used for parsing
interesting_normal = re.compile('[&<]')
interesting_cdata = re.compile(r'<(/|\Z)')
incomplete = re.compile('&[a-zA-Z#]')
entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]')
charref = re.compile('&#(?:[0-9]+|[xX][0-9a-fA-F]+)[^0-9a-fA-F]')
starttagopen = re.compile('<[a-zA-Z]')
piclose = re.compile('>')
commentclose = re.compile(r'--\s*>')
tagfind = re.compile('[a-zA-Z][-.a-zA-Z0-9:_]*')
attrfind = re.compile(
r'\s*([a-zA-Z_][-.:a-zA-Z_0-9]*)(\s*=\s*'
r'(\'[^\']*\'|"[^"]*"|[-a-zA-Z0-9./,:;+*%?!&$\(\)_#=~@]*))?')
locatestarttagend = re.compile(r"""
<[a-zA-Z][-.a-zA-Z0-9:_]* # tag name
(?:\s+ # whitespace before attribute name
(?:[a-zA-Z_][-.:a-zA-Z0-9_]* # attribute name
(?:\s*=\s* # value indicator
(?:'[^']*' # LITA-enclosed value
|\"[^\"]*\" # LIT-enclosed value
|[^'\">\s]+ # bare value
)
)?
)
)*
\s* # trailing whitespace
""", re.VERBOSE)
endendtag = re.compile('>')
endtagfind = re.compile('</\s*([a-zA-Z][-.a-zA-Z0-9:_]*)\s*>')
class HTMLParseError(Exception):
"""Exception raised for all parse errors."""
def __init__(self, msg, position=(None, None)):
assert msg
self.msg = msg
self.lineno = position[0]
self.offset = position[1]
def __str__(self):
result = self.msg
if self.lineno is not None:
result = result + ", at line %d" % self.lineno
if self.offset is not None:
result = result + ", column %d" % (self.offset + 1)
return result
class HTMLParser(markupbase.ParserBase):
"""Find tags and other markup and call handler functions.
Usage:
p = HTMLParser()
p.feed(data)
...
p.close()
Start tags are handled by calling self.handle_starttag() or
self.handle_startendtag(); end tags by self.handle_endtag(). The
data between tags is passed from the parser to the derived class
by calling self.handle_data() with the data as argument (the data
may be split up in arbitrary chunks). Entity references are
passed by calling self.handle_entityref() with the entity
reference as the argument. Numeric character references are
passed to self.handle_charref() with the string containing the
reference as the argument.
"""
CDATA_CONTENT_ELEMENTS = ("script", "style")
def __init__(self):
"""Initialize and reset this instance."""
self.reset()
def reset(self):
"""Reset this instance. Loses all unprocessed data."""
self.rawdata = ''
self.lasttag = '???'
self.interesting = interesting_normal
markupbase.ParserBase.reset(self)
def feed(self, data):
"""Feed data to the parser.
Call this as often as you want, with as little or as much text
as you want (may include '\n').
"""
self.rawdata = self.rawdata + data
self.goahead(0)
def close(self):
"""Handle any buffered data."""
self.goahead(1)
def error(self, message):
raise HTMLParseError(message, self.getpos())
__starttag_text = None
def get_starttag_text(self):
"""Return full source of start tag: '<...>'."""
return self.__starttag_text
def set_cdata_mode(self):
self.interesting = interesting_cdata
def clear_cdata_mode(self):
self.interesting = interesting_normal
# Internal -- handle data as far as reasonable. May leave state
# and data to be processed by a subsequent call. If 'end' is
# true, force handling all data as if followed by EOF marker.
def goahead(self, end):
rawdata = self.rawdata
i = 0
n = len(rawdata)
while i < n:
match = self.interesting.search(rawdata, i) # < or &
if match:
j = match.start()
else:
j = n
if i < j: self.handle_data(rawdata[i:j])
i = self.updatepos(i, j)
if i == n: break
startswith = rawdata.startswith
if startswith('<', i):
if starttagopen.match(rawdata, i): # < + letter
k = self.parse_starttag(i)
elif startswith("</", i):
k = self.parse_endtag(i)
elif startswith("<!--", i):
k = self.parse_comment(i)
elif startswith("<?", i):
k = self.parse_pi(i)
elif startswith("<!", i):
k = self.parse_declaration(i)
elif (i + 1) < n:
self.handle_data("<")
k = i + 1
else:
break
if k < 0:
if end:
self.error("EOF in middle of construct")
break
i = self.updatepos(i, k)
elif startswith("&#", i):
match = charref.match(rawdata, i)
if match:
name = match.group()[2:-1]
self.handle_charref(name)
k = match.end()
if not startswith(';', k-1):
k = k - 1
i = self.updatepos(i, k)
continue
else:
if ";" in rawdata[i:]: #bail by consuming &#
self.handle_data(rawdata[0:2])
i = self.updatepos(i, 2)
break
elif startswith('&', i):
match = entityref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_entityref(name)
k = match.end()
if not startswith(';', k-1):
k = k - 1
i = self.updatepos(i, k)
continue
match = incomplete.match(rawdata, i)
if match:
# match.group() will contain at least 2 chars
if end and match.group() == rawdata[i:]:
self.error("EOF in middle of entity or char ref")
# incomplete
break
elif (i + 1) < n:
# not the end of the buffer, and can't be confused
# with some other construct
self.handle_data("&")
i = self.updatepos(i, i + 1)
else:
break
else:
assert 0, "interesting.search() lied"
# end while
if end and i < n:
self.handle_data(rawdata[i:n])
i = self.updatepos(i, n)
self.rawdata = rawdata[i:]
# Internal -- parse processing instr, return end or -1 if not terminated
def parse_pi(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == '<?', 'unexpected call to parse_pi()'
match = piclose.search(rawdata, i+2) # >
if not match:
return -1
j = match.start()
self.handle_pi(rawdata[i+2: j])
j = match.end()
return j
# Internal -- handle starttag, return end or -1 if not terminated
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
match = tagfind.match(rawdata, i+1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = rawdata[i+1:k].lower()
while k < endpos:
m = attrfind.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
attrvalue = self.unescape(attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = m.end()
end = rawdata[k:endpos].strip()
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + self.__starttag_text.count("\n")
offset = len(self.__starttag_text) \
- self.__starttag_text.rfind("\n")
else:
offset = offset + len(self.__starttag_text)
self.error("junk characters in start tag: %r"
% (rawdata[k:endpos][:20],))
if end.endswith('/>'):
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode()
return endpos
# Internal -- check to see if we have a complete starttag; return end
# or -1 if incomplete.
def check_for_whole_start_tag(self, i):
rawdata = self.rawdata
m = locatestarttagend.match(rawdata, i)
if m:
j = m.end()
next = rawdata[j:j+1]
if next == ">":
return j + 1
if next == "/":
if rawdata.startswith("/>", j):
return j + 2
if rawdata.startswith("/", j):
# buffer boundary
return -1
# else bogus input
self.updatepos(i, j + 1)
self.error("malformed empty start tag")
if next == "":
# end of input
return -1
if next in ("abcdefghijklmnopqrstuvwxyz=/"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"):
# end of input in or before attribute value, or we have the
# '/' from a '/>' ending
return -1
self.updatepos(i, j)
self.error("malformed start tag")
raise AssertionError("we should not get here!")
# Internal -- parse endtag, return end or -1 if incomplete
def parse_endtag(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == "</", "unexpected call to parse_endtag"
match = endendtag.search(rawdata, i+1) # >
if not match:
return -1
j = match.end()
match = endtagfind.match(rawdata, i) # </ + tag + >
if not match:
self.error("bad end tag: %r" % (rawdata[i:j],))
tag = match.group(1)
self.handle_endtag(tag.lower())
self.clear_cdata_mode()
return j
# Overridable -- finish processing of start+end tag: <tag.../>
def handle_startendtag(self, tag, attrs):
self.handle_starttag(tag, attrs)
self.handle_endtag(tag)
# Overridable -- handle start tag
def handle_starttag(self, tag, attrs):
pass
# Overridable -- handle end tag
def handle_endtag(self, tag):
pass
# Overridable -- handle character reference
def handle_charref(self, name):
pass
# Overridable -- handle entity reference
def handle_entityref(self, name):
pass
# Overridable -- handle data
def handle_data(self, data):
pass
# Overridable -- handle comment
def handle_comment(self, data):
pass
# Overridable -- handle declaration
def handle_decl(self, decl):
pass
# Overridable -- handle processing instruction
def handle_pi(self, data):
pass
def unknown_decl(self, data):
self.error("unknown declaration: %r" % (data,))
# Internal -- helper to remove special character quoting
entitydefs = None
def unescape(self, s):
if '&' not in s:
return s
def replaceEntities(s):
s = s.groups()[0]
if s[0] == "#":
s = s[1:]
if s[0] in ['x','X']:
c = int(s[1:], 16)
else:
c = int(s)
return unichr(c)
else:
# Cannot use name2codepoint directly, because HTMLParser supports apos,
# which is not part of HTML 4
import htmlentitydefs
if HTMLParser.entitydefs is None:
entitydefs = HTMLParser.entitydefs = {'apos':u"'"}
for k, v in htmlentitydefs.name2codepoint.iteritems():
entitydefs[k] = unichr(v)
try:
return self.entitydefs[s]
except KeyError:
return '&'+s+';'
return re.sub(r"&(#?[xX]?(?:[0-9a-fA-F]+|\w{1,8}));", replaceEntities, s)
| epl-1.0 |
yaolinz/rethinkdb | test/rql_test/connections/http_support/werkzeug/testsuite/utils.py | 146 | 11435 | # -*- coding: utf-8 -*-
"""
werkzeug.testsuite.utils
~~~~~~~~~~~~~~~~~~~~~~~~
General utilities.
:copyright: (c) 2014 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import unittest
from datetime import datetime
from functools import partial
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug import utils
from werkzeug.datastructures import Headers
from werkzeug.http import parse_date, http_date
from werkzeug.wrappers import BaseResponse
from werkzeug.test import Client, run_wsgi_app
from werkzeug._compat import text_type, implements_iterator
class GeneralUtilityTestCase(WerkzeugTestCase):
def test_redirect(self):
resp = utils.redirect(u'/füübär')
self.assert_in(b'/f%C3%BC%C3%BCb%C3%A4r', resp.get_data())
self.assert_equal(resp.headers['Location'], '/f%C3%BC%C3%BCb%C3%A4r')
self.assert_equal(resp.status_code, 302)
resp = utils.redirect(u'http://☃.net/', 307)
self.assert_in(b'http://xn--n3h.net/', resp.get_data())
self.assert_equal(resp.headers['Location'], 'http://xn--n3h.net/')
self.assert_equal(resp.status_code, 307)
resp = utils.redirect('http://example.com/', 305)
self.assert_equal(resp.headers['Location'], 'http://example.com/')
self.assert_equal(resp.status_code, 305)
def test_redirect_no_unicode_header_keys(self):
# Make sure all headers are native keys. This was a bug at one point
# due to an incorrect conversion.
resp = utils.redirect('http://example.com/', 305)
for key, value in resp.headers.items():
self.assert_equal(type(key), str)
self.assert_equal(type(value), text_type)
self.assert_equal(resp.headers['Location'], 'http://example.com/')
self.assert_equal(resp.status_code, 305)
def test_redirect_xss(self):
location = 'http://example.com/?xss="><script>alert(1)</script>'
resp = utils.redirect(location)
self.assert_not_in(b'<script>alert(1)</script>', resp.get_data())
location = 'http://example.com/?xss="onmouseover="alert(1)'
resp = utils.redirect(location)
self.assert_not_in(b'href="http://example.com/?xss="onmouseover="alert(1)"', resp.get_data())
def test_cached_property(self):
foo = []
class A(object):
def prop(self):
foo.append(42)
return 42
prop = utils.cached_property(prop)
a = A()
p = a.prop
q = a.prop
self.assert_true(p == q == 42)
self.assert_equal(foo, [42])
foo = []
class A(object):
def _prop(self):
foo.append(42)
return 42
prop = utils.cached_property(_prop, name='prop')
del _prop
a = A()
p = a.prop
q = a.prop
self.assert_true(p == q == 42)
self.assert_equal(foo, [42])
def test_environ_property(self):
class A(object):
environ = {'string': 'abc', 'number': '42'}
string = utils.environ_property('string')
missing = utils.environ_property('missing', 'spam')
read_only = utils.environ_property('number')
number = utils.environ_property('number', load_func=int)
broken_number = utils.environ_property('broken_number', load_func=int)
date = utils.environ_property('date', None, parse_date, http_date,
read_only=False)
foo = utils.environ_property('foo')
a = A()
self.assert_equal(a.string, 'abc')
self.assert_equal(a.missing, 'spam')
def test_assign():
a.read_only = 'something'
self.assert_raises(AttributeError, test_assign)
self.assert_equal(a.number, 42)
self.assert_equal(a.broken_number, None)
self.assert_is_none(a.date)
a.date = datetime(2008, 1, 22, 10, 0, 0, 0)
self.assert_equal(a.environ['date'], 'Tue, 22 Jan 2008 10:00:00 GMT')
def test_escape(self):
class Foo(str):
def __html__(self):
return text_type(self)
self.assert_equal(utils.escape(None), '')
self.assert_equal(utils.escape(42), '42')
self.assert_equal(utils.escape('<>'), '<>')
self.assert_equal(utils.escape('"foo"'), '"foo"')
self.assert_equal(utils.escape(Foo('<foo>')), '<foo>')
def test_unescape(self):
self.assert_equal(utils.unescape('<ä>'), u'<ä>')
def test_run_wsgi_app(self):
def foo(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/plain')])
yield '1'
yield '2'
yield '3'
app_iter, status, headers = run_wsgi_app(foo, {})
self.assert_equal(status, '200 OK')
self.assert_equal(list(headers), [('Content-Type', 'text/plain')])
self.assert_equal(next(app_iter), '1')
self.assert_equal(next(app_iter), '2')
self.assert_equal(next(app_iter), '3')
self.assert_raises(StopIteration, partial(next, app_iter))
got_close = []
@implements_iterator
class CloseIter(object):
def __init__(self):
self.iterated = False
def __iter__(self):
return self
def close(self):
got_close.append(None)
def __next__(self):
if self.iterated:
raise StopIteration()
self.iterated = True
return 'bar'
def bar(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/plain')])
return CloseIter()
app_iter, status, headers = run_wsgi_app(bar, {})
self.assert_equal(status, '200 OK')
self.assert_equal(list(headers), [('Content-Type', 'text/plain')])
self.assert_equal(next(app_iter), 'bar')
self.assert_raises(StopIteration, partial(next, app_iter))
app_iter.close()
self.assert_equal(run_wsgi_app(bar, {}, True)[0], ['bar'])
self.assert_equal(len(got_close), 2)
def test_import_string(self):
import cgi
from werkzeug.debug import DebuggedApplication
self.assert_is(utils.import_string('cgi.escape'), cgi.escape)
self.assert_is(utils.import_string(u'cgi.escape'), cgi.escape)
self.assert_is(utils.import_string('cgi:escape'), cgi.escape)
self.assert_is_none(utils.import_string('XXXXXXXXXXXX', True))
self.assert_is_none(utils.import_string('cgi.XXXXXXXXXXXX', True))
self.assert_is(utils.import_string(u'cgi.escape'), cgi.escape)
self.assert_is(utils.import_string(u'werkzeug.debug.DebuggedApplication'), DebuggedApplication)
self.assert_raises(ImportError, utils.import_string, 'XXXXXXXXXXXXXXXX')
self.assert_raises(ImportError, utils.import_string, 'cgi.XXXXXXXXXX')
def test_find_modules(self):
self.assert_equal(list(utils.find_modules('werkzeug.debug')), \
['werkzeug.debug.console', 'werkzeug.debug.repr',
'werkzeug.debug.tbtools'])
def test_html_builder(self):
html = utils.html
xhtml = utils.xhtml
self.assert_equal(html.p('Hello World'), '<p>Hello World</p>')
self.assert_equal(html.a('Test', href='#'), '<a href="#">Test</a>')
self.assert_equal(html.br(), '<br>')
self.assert_equal(xhtml.br(), '<br />')
self.assert_equal(html.img(src='foo'), '<img src="foo">')
self.assert_equal(xhtml.img(src='foo'), '<img src="foo" />')
self.assert_equal(html.html(
html.head(
html.title('foo'),
html.script(type='text/javascript')
)
), '<html><head><title>foo</title><script type="text/javascript">'
'</script></head></html>')
self.assert_equal(html('<foo>'), '<foo>')
self.assert_equal(html.input(disabled=True), '<input disabled>')
self.assert_equal(xhtml.input(disabled=True), '<input disabled="disabled" />')
self.assert_equal(html.input(disabled=''), '<input>')
self.assert_equal(xhtml.input(disabled=''), '<input />')
self.assert_equal(html.input(disabled=None), '<input>')
self.assert_equal(xhtml.input(disabled=None), '<input />')
self.assert_equal(html.script('alert("Hello World");'), '<script>' \
'alert("Hello World");</script>')
self.assert_equal(xhtml.script('alert("Hello World");'), '<script>' \
'/*<![CDATA[*/alert("Hello World");/*]]>*/</script>')
def test_validate_arguments(self):
take_none = lambda: None
take_two = lambda a, b: None
take_two_one_default = lambda a, b=0: None
self.assert_equal(utils.validate_arguments(take_two, (1, 2,), {}), ((1, 2), {}))
self.assert_equal(utils.validate_arguments(take_two, (1,), {'b': 2}), ((1, 2), {}))
self.assert_equal(utils.validate_arguments(take_two_one_default, (1,), {}), ((1, 0), {}))
self.assert_equal(utils.validate_arguments(take_two_one_default, (1, 2), {}), ((1, 2), {}))
self.assert_raises(utils.ArgumentValidationError,
utils.validate_arguments, take_two, (), {})
self.assert_equal(utils.validate_arguments(take_none, (1, 2,), {'c': 3}), ((), {}))
self.assert_raises(utils.ArgumentValidationError,
utils.validate_arguments, take_none, (1,), {}, drop_extra=False)
self.assert_raises(utils.ArgumentValidationError,
utils.validate_arguments, take_none, (), {'a': 1}, drop_extra=False)
def test_header_set_duplication_bug(self):
headers = Headers([
('Content-Type', 'text/html'),
('Foo', 'bar'),
('Blub', 'blah')
])
headers['blub'] = 'hehe'
headers['blafasel'] = 'humm'
self.assert_equal(headers, Headers([
('Content-Type', 'text/html'),
('Foo', 'bar'),
('blub', 'hehe'),
('blafasel', 'humm')
]))
def test_append_slash_redirect(self):
def app(env, sr):
return utils.append_slash_redirect(env)(env, sr)
client = Client(app, BaseResponse)
response = client.get('foo', base_url='http://example.org/app')
self.assert_equal(response.status_code, 301)
self.assert_equal(response.headers['Location'], 'http://example.org/app/foo/')
def test_cached_property_doc(self):
@utils.cached_property
def foo():
"""testing"""
return 42
self.assert_equal(foo.__doc__, 'testing')
self.assert_equal(foo.__name__, 'foo')
self.assert_equal(foo.__module__, __name__)
def test_secure_filename(self):
self.assert_equal(utils.secure_filename('My cool movie.mov'),
'My_cool_movie.mov')
self.assert_equal(utils.secure_filename('../../../etc/passwd'),
'etc_passwd')
self.assert_equal(utils.secure_filename(u'i contain cool \xfcml\xe4uts.txt'),
'i_contain_cool_umlauts.txt')
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(GeneralUtilityTestCase))
return suite
| agpl-3.0 |
ANR-kamoulox/Telemeta | telemeta/management/commands/telemeta-export-fields.py | 2 | 1633 | from optparse import make_option
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from django.template.defaultfilters import slugify
from django.utils import translation
from telemeta.models import *
from telemeta.util.unaccent import unaccent
import logging
import codecs
from xlwt import Workbook
class Command(BaseCommand):
help = "Export media fields to a XLS file (see an example in example/data/"
args = "path"
first_row = 1
admin_email = 'webmaster@parisson.com'
language_codes = ['en_US', 'fr_FR', 'de_DE']
models = [MediaFonds, MediaCorpus, MediaCollection, MediaItem]
width = 256
def handle(self, *args, **options):
self.file = args[0]
self.book = Workbook()
for model in self.models:
self.sheet = self.book.add_sheet(model.element_type)
self.sheet.write(0, 0, 'Field')
self.sheet.col(0).width = self.width*32
k = 1
for language_code in self.language_codes:
self.sheet.write(0, k, language_code)
self.sheet.col(k).width = self.width*32
k += 1
i = 1
for field in model._meta.fields:
self.sheet.write(i, 0, field.attname)
j = 1
for language_code in self.language_codes:
translation.activate(language_code)
self.sheet.write(i, j, unicode(field.verbose_name.lower()))
j += 1
i += 1
self.book.save(self.file)
| agpl-3.0 |
freezmeinster/avagata-site | django/contrib/auth/tests/__init__.py | 231 | 1092 | from django.contrib.auth.tests.auth_backends import (BackendTest,
RowlevelBackendTest, AnonymousUserBackendTest, NoAnonymousUserBackendTest,
NoBackendsTest, InActiveUserBackendTest, NoInActiveUserBackendTest)
from django.contrib.auth.tests.basic import BasicTestCase
from django.contrib.auth.tests.decorators import LoginRequiredTestCase
from django.contrib.auth.tests.forms import (UserCreationFormTest,
AuthenticationFormTest, SetPasswordFormTest, PasswordChangeFormTest,
UserChangeFormTest, PasswordResetFormTest)
from django.contrib.auth.tests.remote_user import (RemoteUserTest,
RemoteUserNoCreateTest, RemoteUserCustomTest)
from django.contrib.auth.tests.models import ProfileTestCase
from django.contrib.auth.tests.signals import SignalTestCase
from django.contrib.auth.tests.tokens import TokenGeneratorTest
from django.contrib.auth.tests.views import (PasswordResetTest,
ChangePasswordTest, LoginTest, LogoutTest, LoginURLSettings)
from django.contrib.auth.tests.permissions import TestAuthPermissions
# The password for the fixture data users is 'password'
| bsd-3-clause |
awanke/bokeh | bokeh/charts/builder/tests/test_heatmap_builder.py | 33 | 4145 | """ This is the Bokeh charts testing interface.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from collections import OrderedDict
import unittest
import numpy as np
from numpy.testing import assert_array_equal
import pandas as pd
from bokeh.charts import HeatMap
from bokeh.models import FactorRange
from bokeh.charts.builder.tests._utils import create_chart
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class TestHeatMap(unittest.TestCase):
def test_supported_input(self):
xyvalues = OrderedDict()
xyvalues['apples'] = [4,5,8]
xyvalues['bananas'] = [1,2,4]
xyvalues['pears'] = [6,5,4]
xyvaluesdf = pd.DataFrame(xyvalues, index=['2009', '2010', '2011'])
# prepare some data to check tests results...
heights = widths = [0.95] * 9
colors = ['#e2e2e2', '#75968f', '#cc7878', '#ddb7b1', '#a5bab7', '#ddb7b1',
'#550b1d', '#e2e2e2', '#e2e2e2']
catx = ['apples', 'bananas', 'pears', 'apples', 'bananas', 'pears',
'apples', 'bananas', 'pears']
rates = [4, 1, 6, 5, 2, 5, 8, 4, 4]
caty = ['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c']
for i, _xy in enumerate([xyvalues, xyvaluesdf]):
hm = create_chart(HeatMap, _xy, palette=colors)
builder = hm._builders[0]
# TODO: Fix bug
#self.assertEqual(sorted(hm.groups), sorted(list(xyvalues.keys())))
assert_array_equal(builder._data['height'], heights)
assert_array_equal(builder._data['width'], widths)
assert_array_equal(builder._data['catx'], catx)
assert_array_equal(builder._data['rate'], rates)
assert_array_equal(builder._source._data, builder._data)
assert_array_equal(hm.x_range.factors, builder._catsx)
assert_array_equal(hm.y_range.factors, builder._catsy)
self.assertIsInstance(hm.x_range, FactorRange)
self.assertIsInstance(hm.y_range, FactorRange)
# TODO: (bev) not sure what correct behaviour is
#assert_array_equal(builder._data['color'], colors)
if i == 0: # if DataFrame
assert_array_equal(builder._data['caty'], caty)
else:
_caty = ['2009']*3 + ['2010']*3 + ['2011']*3
assert_array_equal(builder._data['caty'], _caty)
catx = ['0', '1', '2', '0', '1', '2', '0', '1', '2']
lvalues = [[4,5,8], [1,2,4], [6,5,4]]
for _xy in [lvalues, np.array(lvalues)]:
hm = create_chart(HeatMap, _xy, palette=colors)
builder = hm._builders[0]
# TODO: FIX bug
#self.assertEqual(sorted(hm.groups), sorted(list(xyvalues.keys())))
assert_array_equal(builder._data['height'], heights)
assert_array_equal(builder._data['width'], widths)
assert_array_equal(builder._data['catx'], catx)
assert_array_equal(builder._data['rate'], rates)
assert_array_equal(builder._source._data, builder._data)
assert_array_equal(hm.x_range.factors, builder._catsx)
assert_array_equal(hm.y_range.factors, builder._catsy)
self.assertIsInstance(hm.x_range, FactorRange)
self.assertIsInstance(hm.y_range, FactorRange)
assert_array_equal(builder._data['caty'], caty)
# TODO: (bev) not sure what correct behaviour is
# assert_array_equal(builder._data['color'], colors)
| bsd-3-clause |
oscarlab/betrfs | ftfs/userspace-testing/default-post.py | 1 | 1387 | #!/usr/bin/python
import subprocess
import sys
import getopt
def usage() :
print "optional args:"
print "\t--test=<name of test>: specify test name"
print "\th, --help: show this dialogue"
if __name__ == "__main__":
test = ""
try :
opts, args = getopt.getopt(sys.argv[1:], "h", ["help", "test="])
except getopt.GetoptError:
usage();
sys.exit(2)
for opt, arg in opts :
if opt in ("h", "--help") :
usage()
elif opt == "--test" :
test = arg
if test != "" :
print "\npost-test {0}.".format(test)
command = "rmmod ftfs.ko"
ret = subprocess.call(command, shell=True)
if ret != 0 :
print "ERROR!"
print "command \"{0}\" returning: {1}. exiting...".format(command, ret)
exit(ret)
# Check for umount errors from ftfs
command = "dmesg | grep \"ftfs_private_umount ret: -\""
ret = subprocess.call(command, shell=True)
if ret != 1 :
print "ERROR!"
print "command \"{0}\" returning: {1}. exiting...".format(command, ret)
exit(-1)
print "removed module, printing /proc/meminfo"
command = "cat /proc/meminfo"
ret = subprocess.call(command, shell=True)
if ret != 0 :
print "ERROR!"
print "cat /proc/meminfo returning: {0}. exiting...".format(ret)
exit(ret)
exit(ret)
| gpl-2.0 |
vasiliykochergin/euca2ools | euca2ools/commands/autoscaling/resumeprocesses.py | 6 | 2010 | # Copyright 2013 Eucalyptus Systems, Inc.
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from euca2ools.commands.argtypes import delimited_list
from euca2ools.commands.autoscaling import AutoScalingRequest
from requestbuilder import Arg
class ResumeProcesses(AutoScalingRequest):
DESCRIPTION = "Resume an auto-scaling group's auto-scaling processes"
ARGS = [Arg('AutoScalingGroupName', metavar='ASGROUP',
help='name of the auto-scaling group to update (required)'),
Arg('--processes', dest='ScalingProcesses.member',
metavar='PROCESS1,PROCESS2,...', type=delimited_list(','),
help='''comma-separated list of auto-scaling processes to
resume (default: all processes)''')]
| bsd-2-clause |
plotly/plotly.py | packages/python/plotly/plotly/graph_objs/sunburst/marker/_colorbar.py | 1 | 73487 | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class ColorBar(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "sunburst.marker"
_path_str = "sunburst.marker.colorbar"
_valid_props = {
"bgcolor",
"bordercolor",
"borderwidth",
"dtick",
"exponentformat",
"len",
"lenmode",
"minexponent",
"nticks",
"outlinecolor",
"outlinewidth",
"separatethousands",
"showexponent",
"showticklabels",
"showtickprefix",
"showticksuffix",
"thickness",
"thicknessmode",
"tick0",
"tickangle",
"tickcolor",
"tickfont",
"tickformat",
"tickformatstopdefaults",
"tickformatstops",
"ticklabeloverflow",
"ticklabelposition",
"ticklen",
"tickmode",
"tickprefix",
"ticks",
"ticksuffix",
"ticktext",
"ticktextsrc",
"tickvals",
"tickvalssrc",
"tickwidth",
"title",
"titlefont",
"titleside",
"x",
"xanchor",
"xpad",
"y",
"yanchor",
"ypad",
}
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the color of padded area.
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the axis line color.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# borderwidth
# -----------
@property
def borderwidth(self):
"""
Sets the width (in px) or the border enclosing this color bar.
The 'borderwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["borderwidth"]
@borderwidth.setter
def borderwidth(self, val):
self["borderwidth"] = val
# dtick
# -----
@property
def dtick(self):
"""
Sets the step in-between ticks on this axis. Use with `tick0`.
Must be a positive number, or special strings available to
"log" and "date" axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick number. For
example, to set a tick mark at 1, 10, 100, 1000, ... set dtick
to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2.
To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special values;
"L<f>", where `f` is a positive number, gives ticks linearly
spaced in value (but not position). For example `tick0` = 0.1,
`dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To
show powers of 10 plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and
"D2". If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval between
ticks to one day, set `dtick` to 86400000.0. "date" also has
special values "M<n>" gives ticks spaced by a number of months.
`n` must be a positive integer. To set ticks on the 15th of
every third month, set `tick0` to "2000-01-15" and `dtick` to
"M3". To set ticks every 4 years, set `dtick` to "M48"
The 'dtick' property accepts values of any type
Returns
-------
Any
"""
return self["dtick"]
@dtick.setter
def dtick(self, val):
self["dtick"] = val
# exponentformat
# --------------
@property
def exponentformat(self):
"""
Determines a formatting rule for the tick exponents. For
example, consider the number 1,000,000,000. If "none", it
appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If "SI", 1G. If
"B", 1B.
The 'exponentformat' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'e', 'E', 'power', 'SI', 'B']
Returns
-------
Any
"""
return self["exponentformat"]
@exponentformat.setter
def exponentformat(self, val):
self["exponentformat"] = val
# len
# ---
@property
def len(self):
"""
Sets the length of the color bar This measure excludes the
padding of both ends. That is, the color bar length is this
length minus the padding on both ends.
The 'len' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["len"]
@len.setter
def len(self, val):
self["len"] = val
# lenmode
# -------
@property
def lenmode(self):
"""
Determines whether this color bar's length (i.e. the measure in
the color variation direction) is set in units of plot
"fraction" or in *pixels. Use `len` to set the value.
The 'lenmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["lenmode"]
@lenmode.setter
def lenmode(self, val):
self["lenmode"] = val
# minexponent
# -----------
@property
def minexponent(self):
"""
Hide SI prefix for 10^n if |n| is below this number. This only
has an effect when `tickformat` is "SI" or "B".
The 'minexponent' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["minexponent"]
@minexponent.setter
def minexponent(self, val):
self["minexponent"] = val
# nticks
# ------
@property
def nticks(self):
"""
Specifies the maximum number of ticks for the particular axis.
The actual number of ticks will be chosen automatically to be
less than or equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
The 'nticks' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["nticks"]
@nticks.setter
def nticks(self, val):
self["nticks"] = val
# outlinecolor
# ------------
@property
def outlinecolor(self):
"""
Sets the axis line color.
The 'outlinecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["outlinecolor"]
@outlinecolor.setter
def outlinecolor(self, val):
self["outlinecolor"] = val
# outlinewidth
# ------------
@property
def outlinewidth(self):
"""
Sets the width (in px) of the axis line.
The 'outlinewidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["outlinewidth"]
@outlinewidth.setter
def outlinewidth(self, val):
self["outlinewidth"] = val
# separatethousands
# -----------------
@property
def separatethousands(self):
"""
If "true", even 4-digit integers are separated
The 'separatethousands' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["separatethousands"]
@separatethousands.setter
def separatethousands(self, val):
self["separatethousands"] = val
# showexponent
# ------------
@property
def showexponent(self):
"""
If "all", all exponents are shown besides their significands.
If "first", only the exponent of the first tick is shown. If
"last", only the exponent of the last tick is shown. If "none",
no exponents appear.
The 'showexponent' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showexponent"]
@showexponent.setter
def showexponent(self, val):
self["showexponent"] = val
# showticklabels
# --------------
@property
def showticklabels(self):
"""
Determines whether or not the tick labels are drawn.
The 'showticklabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showticklabels"]
@showticklabels.setter
def showticklabels(self, val):
self["showticklabels"] = val
# showtickprefix
# --------------
@property
def showtickprefix(self):
"""
If "all", all tick labels are displayed with a prefix. If
"first", only the first tick is displayed with a prefix. If
"last", only the last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
The 'showtickprefix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showtickprefix"]
@showtickprefix.setter
def showtickprefix(self, val):
self["showtickprefix"] = val
# showticksuffix
# --------------
@property
def showticksuffix(self):
"""
Same as `showtickprefix` but for tick suffixes.
The 'showticksuffix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showticksuffix"]
@showticksuffix.setter
def showticksuffix(self, val):
self["showticksuffix"] = val
# thickness
# ---------
@property
def thickness(self):
"""
Sets the thickness of the color bar This measure excludes the
size of the padding, ticks and labels.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
# thicknessmode
# -------------
@property
def thicknessmode(self):
"""
Determines whether this color bar's thickness (i.e. the measure
in the constant color direction) is set in units of plot
"fraction" or in "pixels". Use `thickness` to set the value.
The 'thicknessmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["thicknessmode"]
@thicknessmode.setter
def thicknessmode(self, val):
self["thicknessmode"] = val
# tick0
# -----
@property
def tick0(self):
"""
Sets the placement of the first tick on this axis. Use with
`dtick`. If the axis `type` is "log", then you must take the
log of your starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when `dtick`=*L<f>* (see
`dtick` for more info). If the axis `type` is "date", it should
be a date string, like date data. If the axis `type` is
"category", it should be a number, using the scale where each
category is assigned a serial number from zero in the order it
appears.
The 'tick0' property accepts values of any type
Returns
-------
Any
"""
return self["tick0"]
@tick0.setter
def tick0(self, val):
self["tick0"] = val
# tickangle
# ---------
@property
def tickangle(self):
"""
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the tick
labels vertically.
The 'tickangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180. Numeric values outside this
range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["tickangle"]
@tickangle.setter
def tickangle(self, val):
self["tickangle"] = val
# tickcolor
# ---------
@property
def tickcolor(self):
"""
Sets the tick color.
The 'tickcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["tickcolor"]
@tickcolor.setter
def tickcolor(self, val):
self["tickcolor"] = val
# tickfont
# --------
@property
def tickfont(self):
"""
Sets the color bar's tick label font
The 'tickfont' property is an instance of Tickfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.sunburst.marker.colorbar.Tickfont`
- A dict of string/value properties that will be passed
to the Tickfont constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.sunburst.marker.colorbar.Tickfont
"""
return self["tickfont"]
@tickfont.setter
def tickfont(self, val):
self["tickfont"] = val
# tickformat
# ----------
@property
def tickformat(self):
"""
Sets the tick label formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format. And for dates
see: https://github.com/d3/d3-time-format#locale_format. We add
two items to d3's date formatter: "%h" for half of the year as
a decimal number as well as "%{n}f" for fractional seconds with
n digits. For example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display "09~15~23.46"
The 'tickformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickformat"]
@tickformat.setter
def tickformat(self, val):
self["tickformat"] = val
# tickformatstops
# ---------------
@property
def tickformatstops(self):
"""
The 'tickformatstops' property is a tuple of instances of
Tickformatstop that may be specified as:
- A list or tuple of instances of plotly.graph_objs.sunburst.marker.colorbar.Tickformatstop
- A list or tuple of dicts of string/value properties that
will be passed to the Tickformatstop constructor
Supported dict properties:
dtickrange
range [*min*, *max*], where "min", "max" -
dtick values which describe some zoom level, it
is possible to omit "min" or "max" value by
passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level,
the same as "tickformat"
Returns
-------
tuple[plotly.graph_objs.sunburst.marker.colorbar.Tickformatstop]
"""
return self["tickformatstops"]
@tickformatstops.setter
def tickformatstops(self, val):
self["tickformatstops"] = val
# tickformatstopdefaults
# ----------------------
@property
def tickformatstopdefaults(self):
"""
When used in a template (as layout.template.data.sunburst.marke
r.colorbar.tickformatstopdefaults), sets the default property
values to use for elements of
sunburst.marker.colorbar.tickformatstops
The 'tickformatstopdefaults' property is an instance of Tickformatstop
that may be specified as:
- An instance of :class:`plotly.graph_objs.sunburst.marker.colorbar.Tickformatstop`
- A dict of string/value properties that will be passed
to the Tickformatstop constructor
Supported dict properties:
Returns
-------
plotly.graph_objs.sunburst.marker.colorbar.Tickformatstop
"""
return self["tickformatstopdefaults"]
@tickformatstopdefaults.setter
def tickformatstopdefaults(self, val):
self["tickformatstopdefaults"] = val
# ticklabeloverflow
# -----------------
@property
def ticklabeloverflow(self):
"""
Determines how we handle tick labels that would overflow either
the graph div or the domain of the axis. The default value for
inside tick labels is *hide past domain*. In other cases the
default is *hide past div*.
The 'ticklabeloverflow' property is an enumeration that may be specified as:
- One of the following enumeration values:
['allow', 'hide past div', 'hide past domain']
Returns
-------
Any
"""
return self["ticklabeloverflow"]
@ticklabeloverflow.setter
def ticklabeloverflow(self, val):
self["ticklabeloverflow"] = val
# ticklabelposition
# -----------------
@property
def ticklabelposition(self):
"""
Determines where tick labels are drawn.
The 'ticklabelposition' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', 'outside top', 'inside top',
'outside bottom', 'inside bottom']
Returns
-------
Any
"""
return self["ticklabelposition"]
@ticklabelposition.setter
def ticklabelposition(self, val):
self["ticklabelposition"] = val
# ticklen
# -------
@property
def ticklen(self):
"""
Sets the tick length (in px).
The 'ticklen' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ticklen"]
@ticklen.setter
def ticklen(self, val):
self["ticklen"] = val
# tickmode
# --------
@property
def tickmode(self):
"""
Sets the tick mode for this axis. If "auto", the number of
ticks is set via `nticks`. If "linear", the placement of the
ticks is determined by a starting position `tick0` and a tick
step `dtick` ("linear" is the default value if `tick0` and
`dtick` are provided). If "array", the placement of the ticks
is set via `tickvals` and the tick text is `ticktext`. ("array"
is the default value if `tickvals` is provided).
The 'tickmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'linear', 'array']
Returns
-------
Any
"""
return self["tickmode"]
@tickmode.setter
def tickmode(self, val):
self["tickmode"] = val
# tickprefix
# ----------
@property
def tickprefix(self):
"""
Sets a tick label prefix.
The 'tickprefix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickprefix"]
@tickprefix.setter
def tickprefix(self, val):
self["tickprefix"] = val
# ticks
# -----
@property
def ticks(self):
"""
Determines whether ticks are drawn or not. If "", this axis'
ticks are not drawn. If "outside" ("inside"), this axis' are
drawn outside (inside) the axis lines.
The 'ticks' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', '']
Returns
-------
Any
"""
return self["ticks"]
@ticks.setter
def ticks(self, val):
self["ticks"] = val
# ticksuffix
# ----------
@property
def ticksuffix(self):
"""
Sets a tick label suffix.
The 'ticksuffix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["ticksuffix"]
@ticksuffix.setter
def ticksuffix(self, val):
self["ticksuffix"] = val
# ticktext
# --------
@property
def ticktext(self):
"""
Sets the text displayed at the ticks position via `tickvals`.
Only has an effect if `tickmode` is set to "array". Used with
`tickvals`.
The 'ticktext' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ticktext"]
@ticktext.setter
def ticktext(self, val):
self["ticktext"] = val
# ticktextsrc
# -----------
@property
def ticktextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for ticktext .
The 'ticktextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ticktextsrc"]
@ticktextsrc.setter
def ticktextsrc(self, val):
self["ticktextsrc"] = val
# tickvals
# --------
@property
def tickvals(self):
"""
Sets the values at which ticks on this axis appear. Only has an
effect if `tickmode` is set to "array". Used with `ticktext`.
The 'tickvals' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["tickvals"]
@tickvals.setter
def tickvals(self, val):
self["tickvals"] = val
# tickvalssrc
# -----------
@property
def tickvalssrc(self):
"""
Sets the source reference on Chart Studio Cloud for tickvals .
The 'tickvalssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["tickvalssrc"]
@tickvalssrc.setter
def tickvalssrc(self, val):
self["tickvalssrc"] = val
# tickwidth
# ---------
@property
def tickwidth(self):
"""
Sets the tick width (in px).
The 'tickwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["tickwidth"]
@tickwidth.setter
def tickwidth(self, val):
self["tickwidth"] = val
# title
# -----
@property
def title(self):
"""
The 'title' property is an instance of Title
that may be specified as:
- An instance of :class:`plotly.graph_objs.sunburst.marker.colorbar.Title`
- A dict of string/value properties that will be passed
to the Title constructor
Supported dict properties:
font
Sets this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
side
Determines the location of color bar's title
with respect to the color bar. Note that the
title's location used to be set by the now
deprecated `titleside` attribute.
text
Sets the title of the color bar. Note that
before the existence of `title.text`, the
title's contents used to be defined as the
`title` attribute itself. This behavior has
been deprecated.
Returns
-------
plotly.graph_objs.sunburst.marker.colorbar.Title
"""
return self["title"]
@title.setter
def title(self, val):
self["title"] = val
# titlefont
# ---------
@property
def titlefont(self):
"""
Deprecated: Please use sunburst.marker.colorbar.title.font
instead. Sets this color bar's title font. Note that the
title's font used to be set by the now deprecated `titlefont`
attribute.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.sunburst.marker.colorbar.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
"""
return self["titlefont"]
@titlefont.setter
def titlefont(self, val):
self["titlefont"] = val
# titleside
# ---------
@property
def titleside(self):
"""
Deprecated: Please use sunburst.marker.colorbar.title.side
instead. Determines the location of color bar's title with
respect to the color bar. Note that the title's location used
to be set by the now deprecated `titleside` attribute.
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['right', 'top', 'bottom']
Returns
-------
"""
return self["titleside"]
@titleside.setter
def titleside(self, val):
self["titleside"] = val
# x
# -
@property
def x(self):
"""
Sets the x position of the color bar (in plot fraction).
The 'x' property is a number and may be specified as:
- An int or float in the interval [-2, 3]
Returns
-------
int|float
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# xanchor
# -------
@property
def xanchor(self):
"""
Sets this color bar's horizontal position anchor. This anchor
binds the `x` position to the "left", "center" or "right" of
the color bar.
The 'xanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'center', 'right']
Returns
-------
Any
"""
return self["xanchor"]
@xanchor.setter
def xanchor(self, val):
self["xanchor"] = val
# xpad
# ----
@property
def xpad(self):
"""
Sets the amount of padding (in px) along the x direction.
The 'xpad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["xpad"]
@xpad.setter
def xpad(self, val):
self["xpad"] = val
# y
# -
@property
def y(self):
"""
Sets the y position of the color bar (in plot fraction).
The 'y' property is a number and may be specified as:
- An int or float in the interval [-2, 3]
Returns
-------
int|float
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
# yanchor
# -------
@property
def yanchor(self):
"""
Sets this color bar's vertical position anchor This anchor
binds the `y` position to the "top", "middle" or "bottom" of
the color bar.
The 'yanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top', 'middle', 'bottom']
Returns
-------
Any
"""
return self["yanchor"]
@yanchor.setter
def yanchor(self, val):
self["yanchor"] = val
# ypad
# ----
@property
def ypad(self):
"""
Sets the amount of padding (in px) along the y direction.
The 'ypad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ypad"]
@ypad.setter
def ypad(self, val):
self["ypad"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format. And for
dates see: https://github.com/d3/d3-time-
format#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal
number as well as "%{n}f" for fractional seconds with n
digits. For example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.sunburst.marker
.colorbar.Tickformatstop` instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.data.sunbur
st.marker.colorbar.tickformatstopdefaults), sets the
default property values to use for elements of
sunburst.marker.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of the
axis. The default value for inside tick labels is *hide
past domain*. In other cases the default is *hide past
div*.
ticklabelposition
Determines where tick labels are drawn.
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
ticktext .
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.sunburst.marker.colorbar.T
itle` instance or dict with compatible properties
titlefont
Deprecated: Please use
sunburst.marker.colorbar.title.font instead. Sets this
color bar's title font. Note that the title's font used
to be set by the now deprecated `titlefont` attribute.
titleside
Deprecated: Please use
sunburst.marker.colorbar.title.side instead. Determines
the location of color bar's title with respect to the
color bar. Note that the title's location used to be
set by the now deprecated `titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
"""
_mapped_properties = {
"titlefont": ("title", "font"),
"titleside": ("title", "side"),
}
def __init__(
self,
arg=None,
bgcolor=None,
bordercolor=None,
borderwidth=None,
dtick=None,
exponentformat=None,
len=None,
lenmode=None,
minexponent=None,
nticks=None,
outlinecolor=None,
outlinewidth=None,
separatethousands=None,
showexponent=None,
showticklabels=None,
showtickprefix=None,
showticksuffix=None,
thickness=None,
thicknessmode=None,
tick0=None,
tickangle=None,
tickcolor=None,
tickfont=None,
tickformat=None,
tickformatstops=None,
tickformatstopdefaults=None,
ticklabeloverflow=None,
ticklabelposition=None,
ticklen=None,
tickmode=None,
tickprefix=None,
ticks=None,
ticksuffix=None,
ticktext=None,
ticktextsrc=None,
tickvals=None,
tickvalssrc=None,
tickwidth=None,
title=None,
titlefont=None,
titleside=None,
x=None,
xanchor=None,
xpad=None,
y=None,
yanchor=None,
ypad=None,
**kwargs
):
"""
Construct a new ColorBar object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.sunburst.marker.ColorBar`
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format. And for
dates see: https://github.com/d3/d3-time-
format#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal
number as well as "%{n}f" for fractional seconds with n
digits. For example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.sunburst.marker
.colorbar.Tickformatstop` instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.data.sunbur
st.marker.colorbar.tickformatstopdefaults), sets the
default property values to use for elements of
sunburst.marker.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of the
axis. The default value for inside tick labels is *hide
past domain*. In other cases the default is *hide past
div*.
ticklabelposition
Determines where tick labels are drawn.
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
ticktext .
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.sunburst.marker.colorbar.T
itle` instance or dict with compatible properties
titlefont
Deprecated: Please use
sunburst.marker.colorbar.title.font instead. Sets this
color bar's title font. Note that the title's font used
to be set by the now deprecated `titlefont` attribute.
titleside
Deprecated: Please use
sunburst.marker.colorbar.title.side instead. Determines
the location of color bar's title with respect to the
color bar. Note that the title's location used to be
set by the now deprecated `titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
Returns
-------
ColorBar
"""
super(ColorBar, self).__init__("colorbar")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.sunburst.marker.ColorBar
constructor must be a dict or
an instance of :class:`plotly.graph_objs.sunburst.marker.ColorBar`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("bgcolor", None)
_v = bgcolor if bgcolor is not None else _v
if _v is not None:
self["bgcolor"] = _v
_v = arg.pop("bordercolor", None)
_v = bordercolor if bordercolor is not None else _v
if _v is not None:
self["bordercolor"] = _v
_v = arg.pop("borderwidth", None)
_v = borderwidth if borderwidth is not None else _v
if _v is not None:
self["borderwidth"] = _v
_v = arg.pop("dtick", None)
_v = dtick if dtick is not None else _v
if _v is not None:
self["dtick"] = _v
_v = arg.pop("exponentformat", None)
_v = exponentformat if exponentformat is not None else _v
if _v is not None:
self["exponentformat"] = _v
_v = arg.pop("len", None)
_v = len if len is not None else _v
if _v is not None:
self["len"] = _v
_v = arg.pop("lenmode", None)
_v = lenmode if lenmode is not None else _v
if _v is not None:
self["lenmode"] = _v
_v = arg.pop("minexponent", None)
_v = minexponent if minexponent is not None else _v
if _v is not None:
self["minexponent"] = _v
_v = arg.pop("nticks", None)
_v = nticks if nticks is not None else _v
if _v is not None:
self["nticks"] = _v
_v = arg.pop("outlinecolor", None)
_v = outlinecolor if outlinecolor is not None else _v
if _v is not None:
self["outlinecolor"] = _v
_v = arg.pop("outlinewidth", None)
_v = outlinewidth if outlinewidth is not None else _v
if _v is not None:
self["outlinewidth"] = _v
_v = arg.pop("separatethousands", None)
_v = separatethousands if separatethousands is not None else _v
if _v is not None:
self["separatethousands"] = _v
_v = arg.pop("showexponent", None)
_v = showexponent if showexponent is not None else _v
if _v is not None:
self["showexponent"] = _v
_v = arg.pop("showticklabels", None)
_v = showticklabels if showticklabels is not None else _v
if _v is not None:
self["showticklabels"] = _v
_v = arg.pop("showtickprefix", None)
_v = showtickprefix if showtickprefix is not None else _v
if _v is not None:
self["showtickprefix"] = _v
_v = arg.pop("showticksuffix", None)
_v = showticksuffix if showticksuffix is not None else _v
if _v is not None:
self["showticksuffix"] = _v
_v = arg.pop("thickness", None)
_v = thickness if thickness is not None else _v
if _v is not None:
self["thickness"] = _v
_v = arg.pop("thicknessmode", None)
_v = thicknessmode if thicknessmode is not None else _v
if _v is not None:
self["thicknessmode"] = _v
_v = arg.pop("tick0", None)
_v = tick0 if tick0 is not None else _v
if _v is not None:
self["tick0"] = _v
_v = arg.pop("tickangle", None)
_v = tickangle if tickangle is not None else _v
if _v is not None:
self["tickangle"] = _v
_v = arg.pop("tickcolor", None)
_v = tickcolor if tickcolor is not None else _v
if _v is not None:
self["tickcolor"] = _v
_v = arg.pop("tickfont", None)
_v = tickfont if tickfont is not None else _v
if _v is not None:
self["tickfont"] = _v
_v = arg.pop("tickformat", None)
_v = tickformat if tickformat is not None else _v
if _v is not None:
self["tickformat"] = _v
_v = arg.pop("tickformatstops", None)
_v = tickformatstops if tickformatstops is not None else _v
if _v is not None:
self["tickformatstops"] = _v
_v = arg.pop("tickformatstopdefaults", None)
_v = tickformatstopdefaults if tickformatstopdefaults is not None else _v
if _v is not None:
self["tickformatstopdefaults"] = _v
_v = arg.pop("ticklabeloverflow", None)
_v = ticklabeloverflow if ticklabeloverflow is not None else _v
if _v is not None:
self["ticklabeloverflow"] = _v
_v = arg.pop("ticklabelposition", None)
_v = ticklabelposition if ticklabelposition is not None else _v
if _v is not None:
self["ticklabelposition"] = _v
_v = arg.pop("ticklen", None)
_v = ticklen if ticklen is not None else _v
if _v is not None:
self["ticklen"] = _v
_v = arg.pop("tickmode", None)
_v = tickmode if tickmode is not None else _v
if _v is not None:
self["tickmode"] = _v
_v = arg.pop("tickprefix", None)
_v = tickprefix if tickprefix is not None else _v
if _v is not None:
self["tickprefix"] = _v
_v = arg.pop("ticks", None)
_v = ticks if ticks is not None else _v
if _v is not None:
self["ticks"] = _v
_v = arg.pop("ticksuffix", None)
_v = ticksuffix if ticksuffix is not None else _v
if _v is not None:
self["ticksuffix"] = _v
_v = arg.pop("ticktext", None)
_v = ticktext if ticktext is not None else _v
if _v is not None:
self["ticktext"] = _v
_v = arg.pop("ticktextsrc", None)
_v = ticktextsrc if ticktextsrc is not None else _v
if _v is not None:
self["ticktextsrc"] = _v
_v = arg.pop("tickvals", None)
_v = tickvals if tickvals is not None else _v
if _v is not None:
self["tickvals"] = _v
_v = arg.pop("tickvalssrc", None)
_v = tickvalssrc if tickvalssrc is not None else _v
if _v is not None:
self["tickvalssrc"] = _v
_v = arg.pop("tickwidth", None)
_v = tickwidth if tickwidth is not None else _v
if _v is not None:
self["tickwidth"] = _v
_v = arg.pop("title", None)
_v = title if title is not None else _v
if _v is not None:
self["title"] = _v
_v = arg.pop("titlefont", None)
_v = titlefont if titlefont is not None else _v
if _v is not None:
self["titlefont"] = _v
_v = arg.pop("titleside", None)
_v = titleside if titleside is not None else _v
if _v is not None:
self["titleside"] = _v
_v = arg.pop("x", None)
_v = x if x is not None else _v
if _v is not None:
self["x"] = _v
_v = arg.pop("xanchor", None)
_v = xanchor if xanchor is not None else _v
if _v is not None:
self["xanchor"] = _v
_v = arg.pop("xpad", None)
_v = xpad if xpad is not None else _v
if _v is not None:
self["xpad"] = _v
_v = arg.pop("y", None)
_v = y if y is not None else _v
if _v is not None:
self["y"] = _v
_v = arg.pop("yanchor", None)
_v = yanchor if yanchor is not None else _v
if _v is not None:
self["yanchor"] = _v
_v = arg.pop("ypad", None)
_v = ypad if ypad is not None else _v
if _v is not None:
self["ypad"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| mit |
moreati/django-allauth | allauth/socialaccount/providers/dropbox_oauth2/views.py | 66 | 1187 | from allauth.socialaccount.providers.oauth2.views import (OAuth2Adapter,
OAuth2LoginView,
OAuth2CallbackView)
import requests
from .provider import DropboxOAuth2Provider
class DropboxOAuth2Adapter(OAuth2Adapter):
provider_id = DropboxOAuth2Provider.id
access_token_url = 'https://api.dropbox.com/1/oauth2/token'
authorize_url = 'https://www.dropbox.com/1/oauth2/authorize'
profile_url = 'https://api.dropbox.com/1/account/info'
redirect_uri_protocol = 'https'
def complete_login(self, request, app, token, **kwargs):
extra_data = requests.get(self.profile_url, params={
'access_token': token.token
})
# This only here because of weird response from the test suite
if isinstance(extra_data, list):
extra_data = extra_data[0]
return self.get_provider().sociallogin_from_response(
request,
extra_data.json()
)
oauth_login = OAuth2LoginView.adapter_view(DropboxOAuth2Adapter)
oauth_callback = OAuth2CallbackView.adapter_view(DropboxOAuth2Adapter)
| mit |
ashcrow/flagon | test/test_backend_db_django.py | 2 | 3643 | # The MIT License (MIT)
#
# Copyright (c) 2014 Steve Milner
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import os
if not os.environ.get('DJANGO_SETTINGS_MODULE', None):
from unittest import SkipTest
raise SkipTest('Django environment not set up.')
from django.test import TestCase
from flagon.backends.db_django import DjangoORMBackend
from flagon.backends.db_django.models import FlagonFeature
class TestBackendDBDjango(TestCase):
"""
Test the DjangoORMBackend class.
"""
def setUp(self):
"""
Set up some items we can reuse.
"""
self.backend = DjangoORMBackend()
def test_exists(self):
"""
Verify Backend.exists returns proper info.
"""
assert self.backend.exists('exists') is False
# Create it and verify it now exists
FlagonFeature(name='exists', active=True).save()
assert self.backend.exists('exists')
def test_is_active(self):
"""
Verify Backend.is_active returns if the features is active.
"""
FlagonFeature(name='active', active=True).save()
FlagonFeature(name='notactive', active=False).save()
assert self.backend.is_active('active')
assert self.backend.is_active('notactive') is False
def test_turn_on(self):
"""
Verify Backend.turn_on turns a feature on.
"""
FlagonFeature(name='wasoff', active=False).save()
assert self.backend.is_active('wasoff') is False
self.backend.turn_on('wasoff')
assert self.backend.is_active('wasoff')
def test_turn_off(self):
"""
Verify Backend.turn_off turns a feature off.
"""
FlagonFeature(name='wason', active=True).save()
assert self.backend.is_active('wason')
self.backend.turn_off('wason')
assert self.backend.is_active('wason') is False
def test_toggle(self):
"""
Verify Backend.toggle flips the feature to it's reverse status.
"""
FlagonFeature(name='toggle', active=True).save()
assert self.backend.is_active('toggle')
self.backend.toggle('toggle')
assert self.backend.is_active('toggle') is False
self.backend.toggle('toggle')
assert self.backend.is_active('toggle')
def test_is_off(self):
"""
Verify Backend.is_off returns if the feature is off.
"""
FlagonFeature(name='isnotoff', active=True).save()
FlagonFeature(name='isoff', active=False).save()
assert self.backend.is_off('isnotoff') is False
assert self.backend.is_off('isoff')
| mit |
sambrightman/emacs-for-python | python-libs/rope/refactor/method_object.py | 91 | 3868 | import warnings
from rope.base import pyobjects, exceptions, change, evaluate, codeanalyze
from rope.refactor import sourceutils, occurrences, rename
class MethodObject(object):
def __init__(self, project, resource, offset):
self.pycore = project.pycore
this_pymodule = self.pycore.resource_to_pyobject(resource)
pyname = evaluate.eval_location(this_pymodule, offset)
if pyname is None or not isinstance(pyname.get_object(),
pyobjects.PyFunction):
raise exceptions.RefactoringError(
'Replace method with method object refactoring should be '
'performed on a function.')
self.pyfunction = pyname.get_object()
self.pymodule = self.pyfunction.get_module()
self.resource = self.pymodule.get_resource()
def get_new_class(self, name):
body = sourceutils.fix_indentation(
self._get_body(), sourceutils.get_indent(self.pycore) * 2)
return 'class %s(object):\n\n%s%sdef __call__(self):\n%s' % \
(name, self._get_init(),
' ' * sourceutils.get_indent(self.pycore), body)
def get_changes(self, classname=None, new_class_name=None):
if new_class_name is not None:
warnings.warn(
'new_class_name parameter is deprecated; use classname',
DeprecationWarning, stacklevel=2)
classname = new_class_name
collector = codeanalyze.ChangeCollector(self.pymodule.source_code)
start, end = sourceutils.get_body_region(self.pyfunction)
indents = sourceutils.get_indents(
self.pymodule.lines, self.pyfunction.get_scope().get_start()) + \
sourceutils.get_indent(self.pycore)
new_contents = ' ' * indents + 'return %s(%s)()\n' % \
(classname, ', '.join(self._get_parameter_names()))
collector.add_change(start, end, new_contents)
insertion = self._get_class_insertion_point()
collector.add_change(insertion, insertion,
'\n\n' + self.get_new_class(classname))
changes = change.ChangeSet('Replace method with method object refactoring')
changes.add_change(change.ChangeContents(self.resource,
collector.get_changed()))
return changes
def _get_class_insertion_point(self):
current = self.pyfunction
while current.parent != self.pymodule:
current = current.parent
end = self.pymodule.lines.get_line_end(current.get_scope().get_end())
return min(end + 1, len(self.pymodule.source_code))
def _get_body(self):
body = sourceutils.get_body(self.pyfunction)
for param in self._get_parameter_names():
body = param + ' = None\n' + body
pymod = self.pycore.get_string_module(body, self.resource)
pyname = pymod[param]
finder = occurrences.create_finder(self.pycore, param, pyname)
result = rename.rename_in_module(finder, 'self.' + param,
pymodule=pymod)
body = result[result.index('\n') + 1:]
return body
def _get_init(self):
params = self._get_parameter_names()
indents = ' ' * sourceutils.get_indent(self.pycore)
if not params:
return ''
header = indents + 'def __init__(self'
body = ''
for arg in params:
new_name = arg
if arg == 'self':
new_name = 'host'
header += ', %s' % new_name
body += indents * 2 + 'self.%s = %s\n' % (arg, new_name)
header += '):'
return '%s\n%s\n' % (header, body)
def _get_parameter_names(self):
return self.pyfunction.get_param_names()
| gpl-3.0 |
cikelengfeng/HTTPIDL | Sources/Compiler/SwiftTypeTransfer.py | 1 | 1558 | idl_to_swift_type = {'UINT32': 'UInt32', 'UINT64': 'UInt64', 'INT32': 'Int32', 'INT64': 'Int64', 'BOOL': 'Bool', 'DOUBLE': 'Double', 'STRING': 'String', 'FILE': 'HTTPFile', 'BLOB': 'HTTPData'}
def swift_base_type_name_from_idl_base_type(type_name):
if type_name in idl_to_swift_type:
builtin_type_name = idl_to_swift_type[type_name]
return builtin_type_name
return type_name
def swift_type_name(idl_param_type_context):
base_type = idl_param_type_context.baseType()
if base_type is not None:
return swift_base_type_name(base_type)
else:
generic_type = idl_param_type_context.genericType()
dict_type = generic_type.dictGenericParam()
if dict_type is not None:
return swift_dict_type_name(dict_type)
else:
array_type = generic_type.arrayGenericParam()
return swift_array_type_name(array_type)
def swift_base_type_name(base_type_context):
struct_name = base_type_context.structName()
if struct_name is not None:
return struct_name.getText()
else:
return idl_to_swift_type[base_type_context.getText()]
def swift_dict_type_name(dict_param_context):
key_type = swift_base_type_name_from_idl_base_type(dict_param_context.baseType().getText())
value_type = swift_type_name(dict_param_context.paramType())
return '[' + key_type + ': ' + value_type + ']'
def swift_array_type_name(array_param_context):
element_type = swift_type_name(array_param_context.paramType())
return '[' + element_type + ']' | mit |
davenovak/mtasa-blue | vendor/google-breakpad/src/third_party/protobuf/protobuf/python/google/protobuf/internal/message_test.py | 253 | 15707 | #! /usr/bin/python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests python protocol buffers against the golden message.
Note that the golden messages exercise every known field type, thus this
test ends up exercising and verifying nearly all of the parsing and
serialization code in the whole library.
TODO(kenton): Merge with wire_format_test? It doesn't make a whole lot of
sense to call this a test of the "message" module, which only declares an
abstract interface.
"""
__author__ = 'gps@google.com (Gregory P. Smith)'
import copy
import math
import unittest
from google.protobuf import unittest_import_pb2
from google.protobuf import unittest_pb2
from google.protobuf.internal import test_util
# Python pre-2.6 does not have isinf() or isnan() functions, so we have
# to provide our own.
def isnan(val):
# NaN is never equal to itself.
return val != val
def isinf(val):
# Infinity times zero equals NaN.
return not isnan(val) and isnan(val * 0)
def IsPosInf(val):
return isinf(val) and (val > 0)
def IsNegInf(val):
return isinf(val) and (val < 0)
class MessageTest(unittest.TestCase):
def testGoldenMessage(self):
golden_data = test_util.GoldenFile('golden_message').read()
golden_message = unittest_pb2.TestAllTypes()
golden_message.ParseFromString(golden_data)
test_util.ExpectAllFieldsSet(self, golden_message)
self.assertTrue(golden_message.SerializeToString() == golden_data)
golden_copy = copy.deepcopy(golden_message)
self.assertTrue(golden_copy.SerializeToString() == golden_data)
def testGoldenExtensions(self):
golden_data = test_util.GoldenFile('golden_message').read()
golden_message = unittest_pb2.TestAllExtensions()
golden_message.ParseFromString(golden_data)
all_set = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(all_set)
self.assertEquals(all_set, golden_message)
self.assertTrue(golden_message.SerializeToString() == golden_data)
golden_copy = copy.deepcopy(golden_message)
self.assertTrue(golden_copy.SerializeToString() == golden_data)
def testGoldenPackedMessage(self):
golden_data = test_util.GoldenFile('golden_packed_fields_message').read()
golden_message = unittest_pb2.TestPackedTypes()
golden_message.ParseFromString(golden_data)
all_set = unittest_pb2.TestPackedTypes()
test_util.SetAllPackedFields(all_set)
self.assertEquals(all_set, golden_message)
self.assertTrue(all_set.SerializeToString() == golden_data)
golden_copy = copy.deepcopy(golden_message)
self.assertTrue(golden_copy.SerializeToString() == golden_data)
def testGoldenPackedExtensions(self):
golden_data = test_util.GoldenFile('golden_packed_fields_message').read()
golden_message = unittest_pb2.TestPackedExtensions()
golden_message.ParseFromString(golden_data)
all_set = unittest_pb2.TestPackedExtensions()
test_util.SetAllPackedExtensions(all_set)
self.assertEquals(all_set, golden_message)
self.assertTrue(all_set.SerializeToString() == golden_data)
golden_copy = copy.deepcopy(golden_message)
self.assertTrue(golden_copy.SerializeToString() == golden_data)
def testPositiveInfinity(self):
golden_data = ('\x5D\x00\x00\x80\x7F'
'\x61\x00\x00\x00\x00\x00\x00\xF0\x7F'
'\xCD\x02\x00\x00\x80\x7F'
'\xD1\x02\x00\x00\x00\x00\x00\x00\xF0\x7F')
golden_message = unittest_pb2.TestAllTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(IsPosInf(golden_message.optional_float))
self.assertTrue(IsPosInf(golden_message.optional_double))
self.assertTrue(IsPosInf(golden_message.repeated_float[0]))
self.assertTrue(IsPosInf(golden_message.repeated_double[0]))
self.assertTrue(golden_message.SerializeToString() == golden_data)
def testNegativeInfinity(self):
golden_data = ('\x5D\x00\x00\x80\xFF'
'\x61\x00\x00\x00\x00\x00\x00\xF0\xFF'
'\xCD\x02\x00\x00\x80\xFF'
'\xD1\x02\x00\x00\x00\x00\x00\x00\xF0\xFF')
golden_message = unittest_pb2.TestAllTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(IsNegInf(golden_message.optional_float))
self.assertTrue(IsNegInf(golden_message.optional_double))
self.assertTrue(IsNegInf(golden_message.repeated_float[0]))
self.assertTrue(IsNegInf(golden_message.repeated_double[0]))
self.assertTrue(golden_message.SerializeToString() == golden_data)
def testNotANumber(self):
golden_data = ('\x5D\x00\x00\xC0\x7F'
'\x61\x00\x00\x00\x00\x00\x00\xF8\x7F'
'\xCD\x02\x00\x00\xC0\x7F'
'\xD1\x02\x00\x00\x00\x00\x00\x00\xF8\x7F')
golden_message = unittest_pb2.TestAllTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(isnan(golden_message.optional_float))
self.assertTrue(isnan(golden_message.optional_double))
self.assertTrue(isnan(golden_message.repeated_float[0]))
self.assertTrue(isnan(golden_message.repeated_double[0]))
self.assertTrue(golden_message.SerializeToString() == golden_data)
def testPositiveInfinityPacked(self):
golden_data = ('\xA2\x06\x04\x00\x00\x80\x7F'
'\xAA\x06\x08\x00\x00\x00\x00\x00\x00\xF0\x7F')
golden_message = unittest_pb2.TestPackedTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(IsPosInf(golden_message.packed_float[0]))
self.assertTrue(IsPosInf(golden_message.packed_double[0]))
self.assertTrue(golden_message.SerializeToString() == golden_data)
def testNegativeInfinityPacked(self):
golden_data = ('\xA2\x06\x04\x00\x00\x80\xFF'
'\xAA\x06\x08\x00\x00\x00\x00\x00\x00\xF0\xFF')
golden_message = unittest_pb2.TestPackedTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(IsNegInf(golden_message.packed_float[0]))
self.assertTrue(IsNegInf(golden_message.packed_double[0]))
self.assertTrue(golden_message.SerializeToString() == golden_data)
def testNotANumberPacked(self):
golden_data = ('\xA2\x06\x04\x00\x00\xC0\x7F'
'\xAA\x06\x08\x00\x00\x00\x00\x00\x00\xF8\x7F')
golden_message = unittest_pb2.TestPackedTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(isnan(golden_message.packed_float[0]))
self.assertTrue(isnan(golden_message.packed_double[0]))
self.assertTrue(golden_message.SerializeToString() == golden_data)
def testExtremeFloatValues(self):
message = unittest_pb2.TestAllTypes()
# Most positive exponent, no significand bits set.
kMostPosExponentNoSigBits = math.pow(2, 127)
message.optional_float = kMostPosExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == kMostPosExponentNoSigBits)
# Most positive exponent, one significand bit set.
kMostPosExponentOneSigBit = 1.5 * math.pow(2, 127)
message.optional_float = kMostPosExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == kMostPosExponentOneSigBit)
# Repeat last two cases with values of same magnitude, but negative.
message.optional_float = -kMostPosExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == -kMostPosExponentNoSigBits)
message.optional_float = -kMostPosExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == -kMostPosExponentOneSigBit)
# Most negative exponent, no significand bits set.
kMostNegExponentNoSigBits = math.pow(2, -127)
message.optional_float = kMostNegExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == kMostNegExponentNoSigBits)
# Most negative exponent, one significand bit set.
kMostNegExponentOneSigBit = 1.5 * math.pow(2, -127)
message.optional_float = kMostNegExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == kMostNegExponentOneSigBit)
# Repeat last two cases with values of the same magnitude, but negative.
message.optional_float = -kMostNegExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == -kMostNegExponentNoSigBits)
message.optional_float = -kMostNegExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == -kMostNegExponentOneSigBit)
def testExtremeFloatValues(self):
message = unittest_pb2.TestAllTypes()
# Most positive exponent, no significand bits set.
kMostPosExponentNoSigBits = math.pow(2, 1023)
message.optional_double = kMostPosExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == kMostPosExponentNoSigBits)
# Most positive exponent, one significand bit set.
kMostPosExponentOneSigBit = 1.5 * math.pow(2, 1023)
message.optional_double = kMostPosExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == kMostPosExponentOneSigBit)
# Repeat last two cases with values of same magnitude, but negative.
message.optional_double = -kMostPosExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == -kMostPosExponentNoSigBits)
message.optional_double = -kMostPosExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == -kMostPosExponentOneSigBit)
# Most negative exponent, no significand bits set.
kMostNegExponentNoSigBits = math.pow(2, -1023)
message.optional_double = kMostNegExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == kMostNegExponentNoSigBits)
# Most negative exponent, one significand bit set.
kMostNegExponentOneSigBit = 1.5 * math.pow(2, -1023)
message.optional_double = kMostNegExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == kMostNegExponentOneSigBit)
# Repeat last two cases with values of the same magnitude, but negative.
message.optional_double = -kMostNegExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == -kMostNegExponentNoSigBits)
message.optional_double = -kMostNegExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == -kMostNegExponentOneSigBit)
def testSortingRepeatedScalarFieldsDefaultComparator(self):
"""Check some different types with the default comparator."""
message = unittest_pb2.TestAllTypes()
# TODO(mattp): would testing more scalar types strengthen test?
message.repeated_int32.append(1)
message.repeated_int32.append(3)
message.repeated_int32.append(2)
message.repeated_int32.sort()
self.assertEqual(message.repeated_int32[0], 1)
self.assertEqual(message.repeated_int32[1], 2)
self.assertEqual(message.repeated_int32[2], 3)
message.repeated_float.append(1.1)
message.repeated_float.append(1.3)
message.repeated_float.append(1.2)
message.repeated_float.sort()
self.assertAlmostEqual(message.repeated_float[0], 1.1)
self.assertAlmostEqual(message.repeated_float[1], 1.2)
self.assertAlmostEqual(message.repeated_float[2], 1.3)
message.repeated_string.append('a')
message.repeated_string.append('c')
message.repeated_string.append('b')
message.repeated_string.sort()
self.assertEqual(message.repeated_string[0], 'a')
self.assertEqual(message.repeated_string[1], 'b')
self.assertEqual(message.repeated_string[2], 'c')
message.repeated_bytes.append('a')
message.repeated_bytes.append('c')
message.repeated_bytes.append('b')
message.repeated_bytes.sort()
self.assertEqual(message.repeated_bytes[0], 'a')
self.assertEqual(message.repeated_bytes[1], 'b')
self.assertEqual(message.repeated_bytes[2], 'c')
def testSortingRepeatedScalarFieldsCustomComparator(self):
"""Check some different types with custom comparator."""
message = unittest_pb2.TestAllTypes()
message.repeated_int32.append(-3)
message.repeated_int32.append(-2)
message.repeated_int32.append(-1)
message.repeated_int32.sort(lambda x,y: cmp(abs(x), abs(y)))
self.assertEqual(message.repeated_int32[0], -1)
self.assertEqual(message.repeated_int32[1], -2)
self.assertEqual(message.repeated_int32[2], -3)
message.repeated_string.append('aaa')
message.repeated_string.append('bb')
message.repeated_string.append('c')
message.repeated_string.sort(lambda x,y: cmp(len(x), len(y)))
self.assertEqual(message.repeated_string[0], 'c')
self.assertEqual(message.repeated_string[1], 'bb')
self.assertEqual(message.repeated_string[2], 'aaa')
def testSortingRepeatedCompositeFieldsCustomComparator(self):
"""Check passing a custom comparator to sort a repeated composite field."""
message = unittest_pb2.TestAllTypes()
message.repeated_nested_message.add().bb = 1
message.repeated_nested_message.add().bb = 3
message.repeated_nested_message.add().bb = 2
message.repeated_nested_message.add().bb = 6
message.repeated_nested_message.add().bb = 5
message.repeated_nested_message.add().bb = 4
message.repeated_nested_message.sort(lambda x,y: cmp(x.bb, y.bb))
self.assertEqual(message.repeated_nested_message[0].bb, 1)
self.assertEqual(message.repeated_nested_message[1].bb, 2)
self.assertEqual(message.repeated_nested_message[2].bb, 3)
self.assertEqual(message.repeated_nested_message[3].bb, 4)
self.assertEqual(message.repeated_nested_message[4].bb, 5)
self.assertEqual(message.repeated_nested_message[5].bb, 6)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
wohnsinn2/Radicale | radicale/storage/filesystem.py | 4 | 4250 | # -*- coding: utf-8 -*-
#
# This file is part of Radicale Server - Calendar Server
# Copyright © 2012-2015 Guillaume Ayoub
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Radicale. If not, see <http://www.gnu.org/licenses/>.
"""
Filesystem storage backend.
"""
import codecs
import os
import posixpath
import json
import time
import sys
from contextlib import contextmanager
from .. import config, ical
FOLDER = os.path.expanduser(config.get("storage", "filesystem_folder"))
FILESYSTEM_ENCODING = sys.getfilesystemencoding()
try:
from dulwich.repo import Repo
GIT_REPOSITORY = Repo(FOLDER)
except:
GIT_REPOSITORY = None
# This function overrides the builtin ``open`` function for this module
# pylint: disable=W0622
@contextmanager
def open(path, mode="r"):
"""Open a file at ``path`` with encoding set in the configuration."""
# On enter
abs_path = os.path.join(FOLDER, path.replace("/", os.sep))
with codecs.open(abs_path, mode, config.get("encoding", "stock")) as fd:
yield fd
# On exit
if GIT_REPOSITORY and mode == "w":
path = os.path.relpath(abs_path, FOLDER)
GIT_REPOSITORY.stage([path])
committer = config.get("git", "committer")
GIT_REPOSITORY.do_commit(path, committer=committer)
# pylint: enable=W0622
class Collection(ical.Collection):
"""Collection stored in a flat ical file."""
@property
def _path(self):
"""Absolute path of the file at local ``path``."""
return os.path.join(FOLDER, self.path.replace("/", os.sep))
@property
def _props_path(self):
"""Absolute path of the file storing the collection properties."""
return self._path + ".props"
def _create_dirs(self):
"""Create folder storing the collection if absent."""
if not os.path.exists(os.path.dirname(self._path)):
os.makedirs(os.path.dirname(self._path))
def save(self, text):
self._create_dirs()
with open(self._path, "w") as fd:
fd.write(text)
def delete(self):
os.remove(self._path)
os.remove(self._props_path)
@property
def text(self):
try:
with open(self._path) as fd:
return fd.read()
except IOError:
return ""
@classmethod
def children(cls, path):
abs_path = os.path.join(FOLDER, path.replace("/", os.sep))
_, directories, files = next(os.walk(abs_path))
for filename in directories + files:
rel_filename = posixpath.join(path, filename)
if cls.is_node(rel_filename) or cls.is_leaf(rel_filename):
yield cls(rel_filename)
@classmethod
def is_node(cls, path):
abs_path = os.path.join(FOLDER, path.replace("/", os.sep))
return os.path.isdir(abs_path)
@classmethod
def is_leaf(cls, path):
abs_path = os.path.join(FOLDER, path.replace("/", os.sep))
return os.path.isfile(abs_path) and not abs_path.endswith(".props")
@property
def last_modified(self):
modification_time = time.gmtime(os.path.getmtime(self._path))
return time.strftime("%a, %d %b %Y %H:%M:%S +0000", modification_time)
@property
@contextmanager
def props(self):
# On enter
properties = {}
if os.path.exists(self._props_path):
with open(self._props_path) as prop_file:
properties.update(json.load(prop_file))
old_properties = properties.copy()
yield properties
# On exit
self._create_dirs()
if old_properties != properties:
with open(self._props_path, "w") as prop_file:
json.dump(properties, prop_file)
| gpl-3.0 |
huanpc/IoT-1 | gui/controller/.venv/lib/python3.5/site-packages/pip/_vendor/lockfile/__init__.py | 536 | 9371 | # -*- coding: utf-8 -*-
"""
lockfile.py - Platform-independent advisory file locks.
Requires Python 2.5 unless you apply 2.4.diff
Locking is done on a per-thread basis instead of a per-process basis.
Usage:
>>> lock = LockFile('somefile')
>>> try:
... lock.acquire()
... except AlreadyLocked:
... print 'somefile', 'is locked already.'
... except LockFailed:
... print 'somefile', 'can\\'t be locked.'
... else:
... print 'got lock'
got lock
>>> print lock.is_locked()
True
>>> lock.release()
>>> lock = LockFile('somefile')
>>> print lock.is_locked()
False
>>> with lock:
... print lock.is_locked()
True
>>> print lock.is_locked()
False
>>> lock = LockFile('somefile')
>>> # It is okay to lock twice from the same thread...
>>> with lock:
... lock.acquire()
...
>>> # Though no counter is kept, so you can't unlock multiple times...
>>> print lock.is_locked()
False
Exceptions:
Error - base class for other exceptions
LockError - base class for all locking exceptions
AlreadyLocked - Another thread or process already holds the lock
LockFailed - Lock failed for some other reason
UnlockError - base class for all unlocking exceptions
AlreadyUnlocked - File was not locked.
NotMyLock - File was locked but not by the current thread/process
"""
from __future__ import absolute_import
import functools
import os
import socket
import threading
import warnings
# Work with PEP8 and non-PEP8 versions of threading module.
if not hasattr(threading, "current_thread"):
threading.current_thread = threading.currentThread
if not hasattr(threading.Thread, "get_name"):
threading.Thread.get_name = threading.Thread.getName
__all__ = ['Error', 'LockError', 'LockTimeout', 'AlreadyLocked',
'LockFailed', 'UnlockError', 'NotLocked', 'NotMyLock',
'LinkFileLock', 'MkdirFileLock', 'SQLiteFileLock',
'LockBase', 'locked']
class Error(Exception):
"""
Base class for other exceptions.
>>> try:
... raise Error
... except Exception:
... pass
"""
pass
class LockError(Error):
"""
Base class for error arising from attempts to acquire the lock.
>>> try:
... raise LockError
... except Error:
... pass
"""
pass
class LockTimeout(LockError):
"""Raised when lock creation fails within a user-defined period of time.
>>> try:
... raise LockTimeout
... except LockError:
... pass
"""
pass
class AlreadyLocked(LockError):
"""Some other thread/process is locking the file.
>>> try:
... raise AlreadyLocked
... except LockError:
... pass
"""
pass
class LockFailed(LockError):
"""Lock file creation failed for some other reason.
>>> try:
... raise LockFailed
... except LockError:
... pass
"""
pass
class UnlockError(Error):
"""
Base class for errors arising from attempts to release the lock.
>>> try:
... raise UnlockError
... except Error:
... pass
"""
pass
class NotLocked(UnlockError):
"""Raised when an attempt is made to unlock an unlocked file.
>>> try:
... raise NotLocked
... except UnlockError:
... pass
"""
pass
class NotMyLock(UnlockError):
"""Raised when an attempt is made to unlock a file someone else locked.
>>> try:
... raise NotMyLock
... except UnlockError:
... pass
"""
pass
class _SharedBase(object):
def __init__(self, path):
self.path = path
def acquire(self, timeout=None):
"""
Acquire the lock.
* If timeout is omitted (or None), wait forever trying to lock the
file.
* If timeout > 0, try to acquire the lock for that many seconds. If
the lock period expires and the file is still locked, raise
LockTimeout.
* If timeout <= 0, raise AlreadyLocked immediately if the file is
already locked.
"""
raise NotImplemented("implement in subclass")
def release(self):
"""
Release the lock.
If the file is not locked, raise NotLocked.
"""
raise NotImplemented("implement in subclass")
def __enter__(self):
"""
Context manager support.
"""
self.acquire()
return self
def __exit__(self, *_exc):
"""
Context manager support.
"""
self.release()
def __repr__(self):
return "<%s: %r>" % (self.__class__.__name__, self.path)
class LockBase(_SharedBase):
"""Base class for platform-specific lock classes."""
def __init__(self, path, threaded=True, timeout=None):
"""
>>> lock = LockBase('somefile')
>>> lock = LockBase('somefile', threaded=False)
"""
super(LockBase, self).__init__(path)
self.lock_file = os.path.abspath(path) + ".lock"
self.hostname = socket.gethostname()
self.pid = os.getpid()
if threaded:
t = threading.current_thread()
# Thread objects in Python 2.4 and earlier do not have ident
# attrs. Worm around that.
ident = getattr(t, "ident", hash(t))
self.tname = "-%x" % (ident & 0xffffffff)
else:
self.tname = ""
dirname = os.path.dirname(self.lock_file)
# unique name is mostly about the current process, but must
# also contain the path -- otherwise, two adjacent locked
# files conflict (one file gets locked, creating lock-file and
# unique file, the other one gets locked, creating lock-file
# and overwriting the already existing lock-file, then one
# gets unlocked, deleting both lock-file and unique file,
# finally the last lock errors out upon releasing.
self.unique_name = os.path.join(dirname,
"%s%s.%s%s" % (self.hostname,
self.tname,
self.pid,
hash(self.path)))
self.timeout = timeout
def is_locked(self):
"""
Tell whether or not the file is locked.
"""
raise NotImplemented("implement in subclass")
def i_am_locking(self):
"""
Return True if this object is locking the file.
"""
raise NotImplemented("implement in subclass")
def break_lock(self):
"""
Remove a lock. Useful if a locking thread failed to unlock.
"""
raise NotImplemented("implement in subclass")
def __repr__(self):
return "<%s: %r -- %r>" % (self.__class__.__name__, self.unique_name,
self.path)
def _fl_helper(cls, mod, *args, **kwds):
warnings.warn("Import from %s module instead of lockfile package" % mod,
DeprecationWarning, stacklevel=2)
# This is a bit funky, but it's only for awhile. The way the unit tests
# are constructed this function winds up as an unbound method, so it
# actually takes three args, not two. We want to toss out self.
if not isinstance(args[0], str):
# We are testing, avoid the first arg
args = args[1:]
if len(args) == 1 and not kwds:
kwds["threaded"] = True
return cls(*args, **kwds)
def LinkFileLock(*args, **kwds):
"""Factory function provided for backwards compatibility.
Do not use in new code. Instead, import LinkLockFile from the
lockfile.linklockfile module.
"""
from . import linklockfile
return _fl_helper(linklockfile.LinkLockFile, "lockfile.linklockfile",
*args, **kwds)
def MkdirFileLock(*args, **kwds):
"""Factory function provided for backwards compatibility.
Do not use in new code. Instead, import MkdirLockFile from the
lockfile.mkdirlockfile module.
"""
from . import mkdirlockfile
return _fl_helper(mkdirlockfile.MkdirLockFile, "lockfile.mkdirlockfile",
*args, **kwds)
def SQLiteFileLock(*args, **kwds):
"""Factory function provided for backwards compatibility.
Do not use in new code. Instead, import SQLiteLockFile from the
lockfile.mkdirlockfile module.
"""
from . import sqlitelockfile
return _fl_helper(sqlitelockfile.SQLiteLockFile, "lockfile.sqlitelockfile",
*args, **kwds)
def locked(path, timeout=None):
"""Decorator which enables locks for decorated function.
Arguments:
- path: path for lockfile.
- timeout (optional): Timeout for acquiring lock.
Usage:
@locked('/var/run/myname', timeout=0)
def myname(...):
...
"""
def decor(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
lock = FileLock(path, timeout=timeout)
lock.acquire()
try:
return func(*args, **kwargs)
finally:
lock.release()
return wrapper
return decor
if hasattr(os, "link"):
from . import linklockfile as _llf
LockFile = _llf.LinkLockFile
else:
from . import mkdirlockfile as _mlf
LockFile = _mlf.MkdirLockFile
FileLock = LockFile
| mit |
starakaj/wikisonnet | src/precomputer.py | 1 | 4434 | import db.dbconnect as dbconnect
import db.dbreader as dbreader
import wikibard.wikibard as wikibard
import mysql.connector
import argparse
import random
import copy
from multiprocessing import Process
parser = argparse.ArgumentParser(description="Precompute a shitload of wikipoems")
parser.add_argument('dbconfig', type=str, help="name of the database configuration in dbconfig.yml")
parser.add_argument('--remote', type=str, help="name of the remote database configuration")
parser.add_argument('--processes', action='store', type=int, default=1, help="Number of separate processes to run")
args = parser.parse_args()
dbconfig_name = args.dbconfig
remote_dbconfig_name = args.dbconfig
if args.remote:
remote_dbconfig_name = args.remote
is_setup = False
top_dog_count = 100000
top_dogs = []
dbconfig = dbconnect.MySQLDatabaseConnection.dbconfigForName(dbconfig_name)
remoteconfig = dbconnect.MySQLDatabaseConnection.dbconfigForName(remote_dbconfig_name)
def setup():
global top_dogs
global is_setup
conn = mysql.connector.connect(user=dbconfig['user'],
password=dbconfig['password'],
host=dbconfig['host'],
database=dbconfig['database'])
cursor = conn.cursor()
query = (
"""SELECT view_counts.id FROM view_counts INNER JOIN page_categories"""
""" ON page_categories.id = view_counts.id WHERE view_counts.count < 202917"""
""" ORDER BY view_counts.count DESC LIMIT %s;"""
)
values = (top_dog_count, )
cursor.execute(query, values)
res = cursor.fetchall()
top_dogs = [r[0] for r in res]
is_setup = True
conn.close()
def composeSlave(dbconfig, top_pages, remoteconfig):
while True:
random.shuffle(top_pages)
for page_id in top_pages:
writeNewPoemForArticle(dbconfig, remoteconfig, page_id)
def writePoem(dbconfig, page_id, poem_id, remoteconfig):
## Write the poem
poem = wikibard.poemForPageID(page_id, 'elizabethan', dbconfig)
if None in poem:
print "Error printing poem"
return
print(poem)
line_ids = [line['id'] for line in poem]
## Store the poem
conn = mysql.connector.connect(user=remoteconfig['user'],
password=remoteconfig['password'],
host=remoteconfig['host'],
database=remoteconfig['database'])
cursor = conn.cursor()
query = (
"""UPDATE cached_poems SET"""
""" line_0=%s, line_1=%s, line_2=%s, line_3=%s,"""
""" line_4=%s, line_5=%s, line_6=%s, line_7=%s,"""
""" line_8=%s, line_9=%s, line_10=%s, line_11=%s,"""
""" line_12=%s, line_13=%s, complete=1"""
""" WHERE id=%s;"""
)
values = tuple(line_ids + [poem_id])
cursor.execute(query, values)
cursor.execute("""COMMIT;""")
conn.close()
def writeNewPoemForArticle(dbconfig, remoteconfig, pageID=21):
## Create the row for the cached posStringForPoemLines
write_conn = mysql.connector.connect(user=remoteconfig['user'],
password=remoteconfig['password'],
host=remoteconfig['host'],
database=remoteconfig['database'])
cursor = write_conn.cursor()
query = """INSERT INTO cached_poems (page_id) VALUES (%s);"""
values = (pageID,)
cursor.execute(query, values)
cursor.execute("""COMMIT;""");
query = """SELECT LAST_INSERT_ID();"""
cursor.execute(query)
res = cursor.fetchall()
poem_id = res[0][0]
write_conn.close()
## Create the return dictionary
d = {}
d['complete'] = 0
d['starting_page'] = pageID
d['id'] = poem_id
## Write the poem
writePoem(dbconfig, pageID, poem_id, remoteconfig)
return d
if __name__ == '__main__':
setup()
pool = []
if args.processes>1:
for i in range(args.processes):
p = Process(target=composeSlave, args=(dbconfig, copy.deepcopy(top_dogs), remoteconfig))
pool.append(p)
p.start()
try:
for p in pool:
p.join()
except Exception as e:
print e
for p in pool:
p.terminate()
else:
composeSlave(dbconfig, copy.deepcopy(top_dogs), remoteconfig)
| mit |
o5k/openerp-oemedical-v0.1 | openerp/addons/report_webkit/ir_report.py | 49 | 5990 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2010 Camptocamp SA (http://www.camptocamp.com)
# All Right Reserved
#
# Author : Nicolas Bessi (Camptocamp)
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
from openerp.osv import fields, osv
from openerp import netsvc
from webkit_report import WebKitParser
from openerp.report.report_sxw import rml_parse
def register_report(name, model, tmpl_path, parser=rml_parse):
"""Register the report into the services"""
name = 'report.%s' % name
if netsvc.Service._services.get(name, False):
service = netsvc.Service._services[name]
if isinstance(service, WebKitParser):
#already instantiated properly, skip it
return
if hasattr(service, 'parser'):
parser = service.parser
del netsvc.Service._services[name]
WebKitParser(name, model, tmpl_path, parser=parser)
class ReportXML(osv.osv):
def __init__(self, pool, cr):
super(ReportXML, self).__init__(pool, cr)
def register_all(self,cursor):
value = super(ReportXML, self).register_all(cursor)
cursor.execute("SELECT * FROM ir_act_report_xml WHERE report_type = 'webkit'")
records = cursor.dictfetchall()
for record in records:
register_report(record['report_name'], record['model'], record['report_rml'])
return value
def unlink(self, cursor, user, ids, context=None):
"""Delete report and unregister it"""
trans_obj = self.pool.get('ir.translation')
trans_ids = trans_obj.search(
cursor,
user,
[('type', '=', 'report'), ('res_id', 'in', ids)]
)
trans_obj.unlink(cursor, user, trans_ids)
# Warning: we cannot unregister the services at the moment
# because they are shared across databases. Calling a deleted
# report will fail so it's ok.
res = super(ReportXML, self).unlink(
cursor,
user,
ids,
context
)
return res
def create(self, cursor, user, vals, context=None):
"Create report and register it"
res = super(ReportXML, self).create(cursor, user, vals, context)
if vals.get('report_type','') == 'webkit':
# I really look forward to virtual functions :S
register_report(
vals['report_name'],
vals['model'],
vals.get('report_rml', False)
)
return res
def write(self, cr, uid, ids, vals, context=None):
"Edit report and manage it registration"
if isinstance(ids, (int, long)):
ids = [ids,]
for rep in self.browse(cr, uid, ids, context=context):
if rep.report_type != 'webkit':
continue
if vals.get('report_name', False) and \
vals['report_name'] != rep.report_name:
report_name = vals['report_name']
else:
report_name = rep.report_name
register_report(
report_name,
vals.get('model', rep.model),
vals.get('report_rml', rep.report_rml)
)
res = super(ReportXML, self).write(cr, uid, ids, vals, context)
return res
_name = 'ir.actions.report.xml'
_inherit = 'ir.actions.report.xml'
_columns = {
'webkit_header': fields.property(
'ir.header_webkit',
type='many2one',
relation='ir.header_webkit',
string='Webkit Header',
help="The header linked to the report",
view_load=True,
required=True
),
'webkit_debug' : fields.boolean('Webkit debug', help="Enable the webkit engine debugger"),
'report_webkit_data': fields.text('Webkit Template', help="This template will be used if the main report file is not found"),
'precise_mode':fields.boolean('Precise Mode', help='This mode allow more precise element \
position as each object is printed on a separate HTML.\
but memory and disk usage is wider')
}
ReportXML()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
spvkgn/youtube-dl | youtube_dl/extractor/normalboots.py | 67 | 2181 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from .jwplatform import JWPlatformIE
from ..utils import (
unified_strdate,
)
class NormalbootsIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?normalboots\.com/video/(?P<id>[0-9a-z-]*)/?$'
_TEST = {
'url': 'http://normalboots.com/video/home-alone-games-jontron/',
'info_dict': {
'id': 'home-alone-games-jontron',
'ext': 'mp4',
'title': 'Home Alone Games - JonTron - NormalBoots',
'description': 'Jon is late for Christmas. Typical. Thanks to: Paul Ritchey for Co-Writing/Filming: http://www.youtube.com/user/ContinueShow Michael Azzi for Christmas Intro Animation: http://michafrar.tumblr.com/ Jerrod Waters for Christmas Intro Music: http://www.youtube.com/user/xXJerryTerryXx Casey Ormond for ‘Tense Battle Theme’:\xa0http://www.youtube.com/Kiamet/',
'uploader': 'JonTron',
'upload_date': '20140125',
},
'params': {
# m3u8 download
'skip_download': True,
},
'add_ie': ['JWPlatform'],
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_uploader = self._html_search_regex(
r'Posted\sby\s<a\shref="[A-Za-z0-9/]*">(?P<uploader>[A-Za-z]*)\s</a>',
webpage, 'uploader', fatal=False)
video_upload_date = unified_strdate(self._html_search_regex(
r'<span style="text-transform:uppercase; font-size:inherit;">[A-Za-z]+, (?P<date>.*)</span>',
webpage, 'date', fatal=False))
jwplatform_url = JWPlatformIE._extract_url(webpage)
return {
'_type': 'url_transparent',
'id': video_id,
'url': jwplatform_url,
'ie_key': JWPlatformIE.ie_key(),
'title': self._og_search_title(webpage),
'description': self._og_search_description(webpage),
'thumbnail': self._og_search_thumbnail(webpage),
'uploader': video_uploader,
'upload_date': video_upload_date,
}
| unlicense |
fengyuanjs/catawampus | tr/vendor/tornado/demos/blog/blog.py | 8 | 6931 | #!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import markdown
import os.path
import re
import tornado.auth
import tornado.database
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
import unicodedata
from tornado.options import define, options
define("port", default=8888, help="run on the given port", type=int)
define("mysql_host", default="127.0.0.1:3306", help="blog database host")
define("mysql_database", default="blog", help="blog database name")
define("mysql_user", default="blog", help="blog database user")
define("mysql_password", default="blog", help="blog database password")
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/", HomeHandler),
(r"/archive", ArchiveHandler),
(r"/feed", FeedHandler),
(r"/entry/([^/]+)", EntryHandler),
(r"/compose", ComposeHandler),
(r"/auth/login", AuthLoginHandler),
(r"/auth/logout", AuthLogoutHandler),
]
settings = dict(
blog_title=u"Tornado Blog",
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
ui_modules={"Entry": EntryModule},
xsrf_cookies=True,
cookie_secret="11oETzKXQAGaYdkL5gEmGeJJFuYh7EQnp2XdTP1o/Vo=",
login_url="/auth/login",
autoescape=None,
)
tornado.web.Application.__init__(self, handlers, **settings)
# Have one global connection to the blog DB across all handlers
self.db = tornado.database.Connection(
host=options.mysql_host, database=options.mysql_database,
user=options.mysql_user, password=options.mysql_password)
class BaseHandler(tornado.web.RequestHandler):
@property
def db(self):
return self.application.db
def get_current_user(self):
user_id = self.get_secure_cookie("user")
if not user_id: return None
return self.db.get("SELECT * FROM authors WHERE id = %s", int(user_id))
class HomeHandler(BaseHandler):
def get(self):
entries = self.db.query("SELECT * FROM entries ORDER BY published "
"DESC LIMIT 5")
if not entries:
self.redirect("/compose")
return
self.render("home.html", entries=entries)
class EntryHandler(BaseHandler):
def get(self, slug):
entry = self.db.get("SELECT * FROM entries WHERE slug = %s", slug)
if not entry: raise tornado.web.HTTPError(404)
self.render("entry.html", entry=entry)
class ArchiveHandler(BaseHandler):
def get(self):
entries = self.db.query("SELECT * FROM entries ORDER BY published "
"DESC")
self.render("archive.html", entries=entries)
class FeedHandler(BaseHandler):
def get(self):
entries = self.db.query("SELECT * FROM entries ORDER BY published "
"DESC LIMIT 10")
self.set_header("Content-Type", "application/atom+xml")
self.render("feed.xml", entries=entries)
class ComposeHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
id = self.get_argument("id", None)
entry = None
if id:
entry = self.db.get("SELECT * FROM entries WHERE id = %s", int(id))
self.render("compose.html", entry=entry)
@tornado.web.authenticated
def post(self):
id = self.get_argument("id", None)
title = self.get_argument("title")
text = self.get_argument("markdown")
html = markdown.markdown(text)
if id:
entry = self.db.get("SELECT * FROM entries WHERE id = %s", int(id))
if not entry: raise tornado.web.HTTPError(404)
slug = entry.slug
self.db.execute(
"UPDATE entries SET title = %s, markdown = %s, html = %s "
"WHERE id = %s", title, text, html, int(id))
else:
slug = unicodedata.normalize("NFKD", title).encode(
"ascii", "ignore")
slug = re.sub(r"[^\w]+", " ", slug)
slug = "-".join(slug.lower().strip().split())
if not slug: slug = "entry"
while True:
e = self.db.get("SELECT * FROM entries WHERE slug = %s", slug)
if not e: break
slug += "-2"
self.db.execute(
"INSERT INTO entries (author_id,title,slug,markdown,html,"
"published) VALUES (%s,%s,%s,%s,%s,UTC_TIMESTAMP())",
self.current_user.id, title, slug, text, html)
self.redirect("/entry/" + slug)
class AuthLoginHandler(BaseHandler, tornado.auth.GoogleMixin):
@tornado.web.asynchronous
def get(self):
if self.get_argument("openid.mode", None):
self.get_authenticated_user(self.async_callback(self._on_auth))
return
self.authenticate_redirect()
def _on_auth(self, user):
if not user:
raise tornado.web.HTTPError(500, "Google auth failed")
author = self.db.get("SELECT * FROM authors WHERE email = %s",
user["email"])
if not author:
# Auto-create first author
any_author = self.db.get("SELECT * FROM authors LIMIT 1")
if not any_author:
author_id = self.db.execute(
"INSERT INTO authors (email,name) VALUES (%s,%s)",
user["email"], user["name"])
else:
self.redirect("/")
return
else:
author_id = author["id"]
self.set_secure_cookie("user", str(author_id))
self.redirect(self.get_argument("next", "/"))
class AuthLogoutHandler(BaseHandler):
def get(self):
self.clear_cookie("user")
self.redirect(self.get_argument("next", "/"))
class EntryModule(tornado.web.UIModule):
def render(self, entry):
return self.render_string("modules/entry.html", entry=entry)
def main():
tornado.options.parse_command_line()
http_server = tornado.httpserver.HTTPServer(Application())
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
| apache-2.0 |
valentin-krasontovitsch/ansible | test/units/modules/cloud/amazon/test_ec2_vpc_nat_gateway.py | 51 | 6830 | import pytest
import unittest
import ansible.modules.cloud.amazon.ec2_vpc_nat_gateway as ng
boto3 = pytest.importorskip("boto3")
botocore = pytest.importorskip("botocore")
aws_region = 'us-west-2'
class AnsibleEc2VpcNatGatewayFunctions(unittest.TestCase):
def test_get_nat_gateways(self):
client = boto3.client('ec2', region_name=aws_region)
success, err_msg, stream = (
ng.get_nat_gateways(client, 'subnet-123456789', check_mode=True)
)
should_return = ng.DRY_RUN_GATEWAYS
self.assertTrue(success)
self.assertEqual(stream, should_return)
def test_get_nat_gateways_no_gateways_found(self):
client = boto3.client('ec2', region_name=aws_region)
success, err_msg, stream = (
ng.get_nat_gateways(client, 'subnet-1234567', check_mode=True)
)
self.assertTrue(success)
self.assertEqual(stream, [])
def test_wait_for_status(self):
client = boto3.client('ec2', region_name=aws_region)
success, err_msg, gws = (
ng.wait_for_status(
client, 5, 'nat-123456789', 'available', check_mode=True
)
)
should_return = ng.DRY_RUN_GATEWAYS[0]
self.assertTrue(success)
self.assertEqual(gws, should_return)
def test_wait_for_status_to_timeout(self):
client = boto3.client('ec2', region_name=aws_region)
success, err_msg, gws = (
ng.wait_for_status(
client, 2, 'nat-12345678', 'available', check_mode=True
)
)
self.assertFalse(success)
self.assertEqual(gws, {})
def test_gateway_in_subnet_exists_with_allocation_id(self):
client = boto3.client('ec2', region_name=aws_region)
gws, err_msg = (
ng.gateway_in_subnet_exists(
client, 'subnet-123456789', 'eipalloc-1234567', check_mode=True
)
)
should_return = ng.DRY_RUN_GATEWAYS
self.assertEqual(gws, should_return)
def test_gateway_in_subnet_exists_with_allocation_id_does_not_exist(self):
client = boto3.client('ec2', region_name=aws_region)
gws, err_msg = (
ng.gateway_in_subnet_exists(
client, 'subnet-123456789', 'eipalloc-123', check_mode=True
)
)
should_return = list()
self.assertEqual(gws, should_return)
def test_gateway_in_subnet_exists_without_allocation_id(self):
client = boto3.client('ec2', region_name=aws_region)
gws, err_msg = (
ng.gateway_in_subnet_exists(
client, 'subnet-123456789', check_mode=True
)
)
should_return = ng.DRY_RUN_GATEWAYS
self.assertEqual(gws, should_return)
def test_get_eip_allocation_id_by_address(self):
client = boto3.client('ec2', region_name=aws_region)
allocation_id, _ = (
ng.get_eip_allocation_id_by_address(
client, '55.55.55.55', check_mode=True
)
)
should_return = 'eipalloc-1234567'
self.assertEqual(allocation_id, should_return)
def test_get_eip_allocation_id_by_address_does_not_exist(self):
client = boto3.client('ec2', region_name=aws_region)
allocation_id, err_msg = (
ng.get_eip_allocation_id_by_address(
client, '52.52.52.52', check_mode=True
)
)
self.assertEqual(err_msg, 'EIP 52.52.52.52 does not exist')
self.assertTrue(allocation_id is None)
def test_allocate_eip_address(self):
client = boto3.client('ec2', region_name=aws_region)
success, err_msg, eip_id = (
ng.allocate_eip_address(
client, check_mode=True
)
)
self.assertTrue(success)
def test_release_address(self):
client = boto3.client('ec2', region_name=aws_region)
success, _ = (
ng.release_address(
client, 'eipalloc-1234567', check_mode=True
)
)
self.assertTrue(success)
def test_create(self):
client = boto3.client('ec2', region_name=aws_region)
success, changed, err_msg, results = (
ng.create(
client, 'subnet-123456', 'eipalloc-1234567', check_mode=True
)
)
self.assertTrue(success)
self.assertTrue(changed)
def test_pre_create(self):
client = boto3.client('ec2', region_name=aws_region)
success, changed, err_msg, results = (
ng.pre_create(
client, 'subnet-123456', check_mode=True
)
)
self.assertTrue(success)
self.assertTrue(changed)
def test_pre_create_idemptotent_with_allocation_id(self):
client = boto3.client('ec2', region_name=aws_region)
success, changed, err_msg, results = (
ng.pre_create(
client, 'subnet-123456789', allocation_id='eipalloc-1234567', check_mode=True
)
)
self.assertTrue(success)
self.assertFalse(changed)
def test_pre_create_idemptotent_with_eip_address(self):
client = boto3.client('ec2', region_name=aws_region)
success, changed, err_msg, results = (
ng.pre_create(
client, 'subnet-123456789', eip_address='55.55.55.55', check_mode=True
)
)
self.assertTrue(success)
self.assertFalse(changed)
def test_pre_create_idemptotent_if_exist_do_not_create(self):
client = boto3.client('ec2', region_name=aws_region)
success, changed, err_msg, results = (
ng.pre_create(
client, 'subnet-123456789', if_exist_do_not_create=True, check_mode=True
)
)
self.assertTrue(success)
self.assertFalse(changed)
def test_delete(self):
client = boto3.client('ec2', region_name=aws_region)
success, changed, err_msg, _ = (
ng.remove(
client, 'nat-123456789', check_mode=True
)
)
self.assertTrue(success)
self.assertTrue(changed)
def test_delete_and_release_ip(self):
client = boto3.client('ec2', region_name=aws_region)
success, changed, err_msg, _ = (
ng.remove(
client, 'nat-123456789', release_eip=True, check_mode=True
)
)
self.assertTrue(success)
self.assertTrue(changed)
def test_delete_if_does_not_exist(self):
client = boto3.client('ec2', region_name=aws_region)
success, changed, err_msg, _ = (
ng.remove(
client, 'nat-12345', check_mode=True
)
)
self.assertFalse(success)
self.assertFalse(changed)
| gpl-3.0 |
nkgilley/home-assistant | homeassistant/components/ios/notify.py | 6 | 3314 | """Support for iOS push notifications."""
import logging
import requests
from homeassistant.components import ios
from homeassistant.components.notify import (
ATTR_DATA,
ATTR_MESSAGE,
ATTR_TARGET,
ATTR_TITLE,
ATTR_TITLE_DEFAULT,
BaseNotificationService,
)
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
PUSH_URL = "https://ios-push.home-assistant.io/push"
# pylint: disable=invalid-name
def log_rate_limits(hass, target, resp, level=20):
"""Output rate limit log line at given level."""
rate_limits = resp["rateLimits"]
resetsAt = dt_util.parse_datetime(rate_limits["resetsAt"])
resetsAtTime = resetsAt - dt_util.utcnow()
rate_limit_msg = (
"iOS push notification rate limits for %s: "
"%d sent, %d allowed, %d errors, "
"resets in %s"
)
_LOGGER.log(
level,
rate_limit_msg,
ios.device_name_for_push_id(hass, target),
rate_limits["successful"],
rate_limits["maximum"],
rate_limits["errors"],
str(resetsAtTime).split(".")[0],
)
def get_service(hass, config, discovery_info=None):
"""Get the iOS notification service."""
if "notify.ios" not in hass.config.components:
# Need this to enable requirements checking in the app.
hass.config.components.add("notify.ios")
if not ios.devices_with_push(hass):
return None
return iOSNotificationService()
class iOSNotificationService(BaseNotificationService):
"""Implement the notification service for iOS."""
def __init__(self):
"""Initialize the service."""
@property
def targets(self):
"""Return a dictionary of registered targets."""
return ios.devices_with_push(self.hass)
def send_message(self, message="", **kwargs):
"""Send a message to the Lambda APNS gateway."""
data = {ATTR_MESSAGE: message}
if kwargs.get(ATTR_TITLE) is not None:
# Remove default title from notifications.
if kwargs.get(ATTR_TITLE) != ATTR_TITLE_DEFAULT:
data[ATTR_TITLE] = kwargs.get(ATTR_TITLE)
targets = kwargs.get(ATTR_TARGET)
if not targets:
targets = ios.enabled_push_ids(self.hass)
if kwargs.get(ATTR_DATA) is not None:
data[ATTR_DATA] = kwargs.get(ATTR_DATA)
for target in targets:
if target not in ios.enabled_push_ids(self.hass):
_LOGGER.error("The target (%s) does not exist in .ios.conf", targets)
return
data[ATTR_TARGET] = target
req = requests.post(PUSH_URL, json=data, timeout=10)
if req.status_code != 201:
fallback_error = req.json().get("errorMessage", "Unknown error")
fallback_message = (
f"Internal server error, please try again later: {fallback_error}"
)
message = req.json().get("message", fallback_message)
if req.status_code == 429:
_LOGGER.warning(message)
log_rate_limits(self.hass, target, req.json(), 30)
else:
_LOGGER.error(message)
else:
log_rate_limits(self.hass, target, req.json())
| apache-2.0 |
joker946/nova | nova/tests/unit/virt/hyperv/db_fakes.py | 48 | 5464 | # Copyright 2012 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Stubouts, mocks and fixtures for the test suite
"""
import uuid
from nova.compute import task_states
from nova.compute import vm_states
from nova import db
from nova import utils
from nova.virt.hyperv import constants
def get_fake_instance_data(name, project_id, user_id):
return {'name': name,
'id': 1,
'uuid': str(uuid.uuid4()),
'project_id': project_id,
'user_id': user_id,
'image_ref': "1",
'kernel_id': "1",
'ramdisk_id': "1",
'mac_address': "de:ad:be:ef:be:ef",
'flavor':
{'name': 'm1.tiny',
'memory_mb': 512,
'vcpus': 1,
'root_gb': 1024,
'flavorid': 1,
'rxtx_factor': 1}
}
def get_fake_image_data(project_id, user_id):
return {'name': 'image1',
'id': 1,
'project_id': project_id,
'user_id': user_id,
'image_ref': "1",
'kernel_id': "1",
'ramdisk_id': "1",
'mac_address': "de:ad:be:ef:be:ef",
'flavor': 'm1.tiny',
'properties': {
constants.IMAGE_PROP_VM_GEN: constants.IMAGE_PROP_VM_GEN_1}
}
def get_fake_volume_info_data(target_portal, volume_id):
return {
'driver_volume_type': 'iscsi',
'data': {
'volume_id': volume_id,
'target_iqn': 'iqn.2010-10.org.openstack:volume-' + volume_id,
'target_portal': target_portal,
'target_lun': 1,
'auth_method': 'CHAP',
'auth_username': 'fake_username',
'auth_password': 'fake_password',
'target_discovered': False,
},
'mount_device': 'vda',
'delete_on_termination': False
}
def get_fake_block_device_info(target_portal, volume_id):
connection_info = get_fake_volume_info_data(target_portal, volume_id)
return {'block_device_mapping': [{'connection_info': connection_info}],
'root_device_name': 'fake_root_device_name',
'ephemerals': [],
'swap': None
}
def stub_out_db_instance_api(stubs):
"""Stubs out the db API for creating Instances."""
FLAVORS = {
'm1.tiny': dict(memory_mb=512, vcpus=1, root_gb=0, flavorid=1),
'm1.small': dict(memory_mb=2048, vcpus=1, root_gb=20, flavorid=2),
'm1.medium': dict(memory_mb=4096, vcpus=2, root_gb=40, flavorid=3),
'm1.large': dict(memory_mb=8192, vcpus=4, root_gb=80, flavorid=4),
'm1.xlarge': dict(memory_mb=16384, vcpus=8, root_gb=160, flavorid=5)}
class FakeModel(object):
"""Stubs out for model."""
def __init__(self, values):
self.values = values
def get(self, key, default=None):
if key in self.values:
return self.values[key]
else:
return default
def __getattr__(self, name):
return self.values[name]
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, value):
self.values[key] = value
def __str__(self):
return str(self.values)
def fake_instance_create(context, values):
"""Stubs out the db.instance_create method."""
if 'flavor' not in values:
return
flavor = values['flavor']
base_options = {
'name': values['name'],
'id': values['id'],
'uuid': str(uuid.uuid4()),
'reservation_id': utils.generate_uid('r'),
'image_ref': values['image_ref'],
'kernel_id': values['kernel_id'],
'ramdisk_id': values['ramdisk_id'],
'vm_state': vm_states.BUILDING,
'task_state': task_states.SCHEDULING,
'user_id': values['user_id'],
'project_id': values['project_id'],
'flavor': flavor,
'memory_mb': flavor['memory_mb'],
'vcpus': flavor['vcpus'],
'mac_addresses': [{'address': values['mac_address']}],
'root_gb': flavor['root_gb'],
'system_metadata': {'image_shutdown_timeout': 0},
}
return FakeModel(base_options)
def fake_flavor_get_all(context, inactive=0, filters=None):
return FLAVORS.values()
def fake_flavor_get_by_name(context, name):
return FLAVORS[name]
def fake_block_device_mapping_get_all_by_instance(context, instance_uuid):
return {}
stubs.Set(db, 'instance_create', fake_instance_create)
stubs.Set(db, 'flavor_get_all', fake_flavor_get_all)
stubs.Set(db, 'flavor_get_by_name', fake_flavor_get_by_name)
stubs.Set(db, 'block_device_mapping_get_all_by_instance',
fake_block_device_mapping_get_all_by_instance)
| apache-2.0 |
Scarygami/gae-gcs-push2deploy-secrets | lib/apiclient/http.py | 102 | 52847 | # Copyright (C) 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes to encapsulate a single HTTP request.
The classes implement a command pattern, with every
object supporting an execute() method that does the
actuall HTTP request.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import StringIO
import base64
import copy
import gzip
import httplib2
import logging
import mimeparse
import mimetypes
import os
import random
import sys
import time
import urllib
import urlparse
import uuid
from email.generator import Generator
from email.mime.multipart import MIMEMultipart
from email.mime.nonmultipart import MIMENonMultipart
from email.parser import FeedParser
from errors import BatchError
from errors import HttpError
from errors import InvalidChunkSizeError
from errors import ResumableUploadError
from errors import UnexpectedBodyError
from errors import UnexpectedMethodError
from model import JsonModel
from oauth2client import util
from oauth2client.anyjson import simplejson
DEFAULT_CHUNK_SIZE = 512*1024
MAX_URI_LENGTH = 2048
class MediaUploadProgress(object):
"""Status of a resumable upload."""
def __init__(self, resumable_progress, total_size):
"""Constructor.
Args:
resumable_progress: int, bytes sent so far.
total_size: int, total bytes in complete upload, or None if the total
upload size isn't known ahead of time.
"""
self.resumable_progress = resumable_progress
self.total_size = total_size
def progress(self):
"""Percent of upload completed, as a float.
Returns:
the percentage complete as a float, returning 0.0 if the total size of
the upload is unknown.
"""
if self.total_size is not None:
return float(self.resumable_progress) / float(self.total_size)
else:
return 0.0
class MediaDownloadProgress(object):
"""Status of a resumable download."""
def __init__(self, resumable_progress, total_size):
"""Constructor.
Args:
resumable_progress: int, bytes received so far.
total_size: int, total bytes in complete download.
"""
self.resumable_progress = resumable_progress
self.total_size = total_size
def progress(self):
"""Percent of download completed, as a float.
Returns:
the percentage complete as a float, returning 0.0 if the total size of
the download is unknown.
"""
if self.total_size is not None:
return float(self.resumable_progress) / float(self.total_size)
else:
return 0.0
class MediaUpload(object):
"""Describes a media object to upload.
Base class that defines the interface of MediaUpload subclasses.
Note that subclasses of MediaUpload may allow you to control the chunksize
when uploading a media object. It is important to keep the size of the chunk
as large as possible to keep the upload efficient. Other factors may influence
the size of the chunk you use, particularly if you are working in an
environment where individual HTTP requests may have a hardcoded time limit,
such as under certain classes of requests under Google App Engine.
Streams are io.Base compatible objects that support seek(). Some MediaUpload
subclasses support using streams directly to upload data. Support for
streaming may be indicated by a MediaUpload sub-class and if appropriate for a
platform that stream will be used for uploading the media object. The support
for streaming is indicated by has_stream() returning True. The stream() method
should return an io.Base object that supports seek(). On platforms where the
underlying httplib module supports streaming, for example Python 2.6 and
later, the stream will be passed into the http library which will result in
less memory being used and possibly faster uploads.
If you need to upload media that can't be uploaded using any of the existing
MediaUpload sub-class then you can sub-class MediaUpload for your particular
needs.
"""
def chunksize(self):
"""Chunk size for resumable uploads.
Returns:
Chunk size in bytes.
"""
raise NotImplementedError()
def mimetype(self):
"""Mime type of the body.
Returns:
Mime type.
"""
return 'application/octet-stream'
def size(self):
"""Size of upload.
Returns:
Size of the body, or None of the size is unknown.
"""
return None
def resumable(self):
"""Whether this upload is resumable.
Returns:
True if resumable upload or False.
"""
return False
def getbytes(self, begin, end):
"""Get bytes from the media.
Args:
begin: int, offset from beginning of file.
length: int, number of bytes to read, starting at begin.
Returns:
A string of bytes read. May be shorter than length if EOF was reached
first.
"""
raise NotImplementedError()
def has_stream(self):
"""Does the underlying upload support a streaming interface.
Streaming means it is an io.IOBase subclass that supports seek, i.e.
seekable() returns True.
Returns:
True if the call to stream() will return an instance of a seekable io.Base
subclass.
"""
return False
def stream(self):
"""A stream interface to the data being uploaded.
Returns:
The returned value is an io.IOBase subclass that supports seek, i.e.
seekable() returns True.
"""
raise NotImplementedError()
@util.positional(1)
def _to_json(self, strip=None):
"""Utility function for creating a JSON representation of a MediaUpload.
Args:
strip: array, An array of names of members to not include in the JSON.
Returns:
string, a JSON representation of this instance, suitable to pass to
from_json().
"""
t = type(self)
d = copy.copy(self.__dict__)
if strip is not None:
for member in strip:
del d[member]
d['_class'] = t.__name__
d['_module'] = t.__module__
return simplejson.dumps(d)
def to_json(self):
"""Create a JSON representation of an instance of MediaUpload.
Returns:
string, a JSON representation of this instance, suitable to pass to
from_json().
"""
return self._to_json()
@classmethod
def new_from_json(cls, s):
"""Utility class method to instantiate a MediaUpload subclass from a JSON
representation produced by to_json().
Args:
s: string, JSON from to_json().
Returns:
An instance of the subclass of MediaUpload that was serialized with
to_json().
"""
data = simplejson.loads(s)
# Find and call the right classmethod from_json() to restore the object.
module = data['_module']
m = __import__(module, fromlist=module.split('.')[:-1])
kls = getattr(m, data['_class'])
from_json = getattr(kls, 'from_json')
return from_json(s)
class MediaIoBaseUpload(MediaUpload):
"""A MediaUpload for a io.Base objects.
Note that the Python file object is compatible with io.Base and can be used
with this class also.
fh = io.BytesIO('...Some data to upload...')
media = MediaIoBaseUpload(fh, mimetype='image/png',
chunksize=1024*1024, resumable=True)
farm.animals().insert(
id='cow',
name='cow.png',
media_body=media).execute()
Depending on the platform you are working on, you may pass -1 as the
chunksize, which indicates that the entire file should be uploaded in a single
request. If the underlying platform supports streams, such as Python 2.6 or
later, then this can be very efficient as it avoids multiple connections, and
also avoids loading the entire file into memory before sending it. Note that
Google App Engine has a 5MB limit on request size, so you should never set
your chunksize larger than 5MB, or to -1.
"""
@util.positional(3)
def __init__(self, fd, mimetype, chunksize=DEFAULT_CHUNK_SIZE,
resumable=False):
"""Constructor.
Args:
fd: io.Base or file object, The source of the bytes to upload. MUST be
opened in blocking mode, do not use streams opened in non-blocking mode.
The given stream must be seekable, that is, it must be able to call
seek() on fd.
mimetype: string, Mime-type of the file.
chunksize: int, File will be uploaded in chunks of this many bytes. Only
used if resumable=True. Pass in a value of -1 if the file is to be
uploaded as a single chunk. Note that Google App Engine has a 5MB limit
on request size, so you should never set your chunksize larger than 5MB,
or to -1.
resumable: bool, True if this is a resumable upload. False means upload
in a single request.
"""
super(MediaIoBaseUpload, self).__init__()
self._fd = fd
self._mimetype = mimetype
if not (chunksize == -1 or chunksize > 0):
raise InvalidChunkSizeError()
self._chunksize = chunksize
self._resumable = resumable
self._fd.seek(0, os.SEEK_END)
self._size = self._fd.tell()
def chunksize(self):
"""Chunk size for resumable uploads.
Returns:
Chunk size in bytes.
"""
return self._chunksize
def mimetype(self):
"""Mime type of the body.
Returns:
Mime type.
"""
return self._mimetype
def size(self):
"""Size of upload.
Returns:
Size of the body, or None of the size is unknown.
"""
return self._size
def resumable(self):
"""Whether this upload is resumable.
Returns:
True if resumable upload or False.
"""
return self._resumable
def getbytes(self, begin, length):
"""Get bytes from the media.
Args:
begin: int, offset from beginning of file.
length: int, number of bytes to read, starting at begin.
Returns:
A string of bytes read. May be shorted than length if EOF was reached
first.
"""
self._fd.seek(begin)
return self._fd.read(length)
def has_stream(self):
"""Does the underlying upload support a streaming interface.
Streaming means it is an io.IOBase subclass that supports seek, i.e.
seekable() returns True.
Returns:
True if the call to stream() will return an instance of a seekable io.Base
subclass.
"""
return True
def stream(self):
"""A stream interface to the data being uploaded.
Returns:
The returned value is an io.IOBase subclass that supports seek, i.e.
seekable() returns True.
"""
return self._fd
def to_json(self):
"""This upload type is not serializable."""
raise NotImplementedError('MediaIoBaseUpload is not serializable.')
class MediaFileUpload(MediaIoBaseUpload):
"""A MediaUpload for a file.
Construct a MediaFileUpload and pass as the media_body parameter of the
method. For example, if we had a service that allowed uploading images:
media = MediaFileUpload('cow.png', mimetype='image/png',
chunksize=1024*1024, resumable=True)
farm.animals().insert(
id='cow',
name='cow.png',
media_body=media).execute()
Depending on the platform you are working on, you may pass -1 as the
chunksize, which indicates that the entire file should be uploaded in a single
request. If the underlying platform supports streams, such as Python 2.6 or
later, then this can be very efficient as it avoids multiple connections, and
also avoids loading the entire file into memory before sending it. Note that
Google App Engine has a 5MB limit on request size, so you should never set
your chunksize larger than 5MB, or to -1.
"""
@util.positional(2)
def __init__(self, filename, mimetype=None, chunksize=DEFAULT_CHUNK_SIZE,
resumable=False):
"""Constructor.
Args:
filename: string, Name of the file.
mimetype: string, Mime-type of the file. If None then a mime-type will be
guessed from the file extension.
chunksize: int, File will be uploaded in chunks of this many bytes. Only
used if resumable=True. Pass in a value of -1 if the file is to be
uploaded in a single chunk. Note that Google App Engine has a 5MB limit
on request size, so you should never set your chunksize larger than 5MB,
or to -1.
resumable: bool, True if this is a resumable upload. False means upload
in a single request.
"""
self._filename = filename
fd = open(self._filename, 'rb')
if mimetype is None:
(mimetype, encoding) = mimetypes.guess_type(filename)
super(MediaFileUpload, self).__init__(fd, mimetype, chunksize=chunksize,
resumable=resumable)
def to_json(self):
"""Creating a JSON representation of an instance of MediaFileUpload.
Returns:
string, a JSON representation of this instance, suitable to pass to
from_json().
"""
return self._to_json(strip=['_fd'])
@staticmethod
def from_json(s):
d = simplejson.loads(s)
return MediaFileUpload(d['_filename'], mimetype=d['_mimetype'],
chunksize=d['_chunksize'], resumable=d['_resumable'])
class MediaInMemoryUpload(MediaIoBaseUpload):
"""MediaUpload for a chunk of bytes.
DEPRECATED: Use MediaIoBaseUpload with either io.TextIOBase or StringIO for
the stream.
"""
@util.positional(2)
def __init__(self, body, mimetype='application/octet-stream',
chunksize=DEFAULT_CHUNK_SIZE, resumable=False):
"""Create a new MediaInMemoryUpload.
DEPRECATED: Use MediaIoBaseUpload with either io.TextIOBase or StringIO for
the stream.
Args:
body: string, Bytes of body content.
mimetype: string, Mime-type of the file or default of
'application/octet-stream'.
chunksize: int, File will be uploaded in chunks of this many bytes. Only
used if resumable=True.
resumable: bool, True if this is a resumable upload. False means upload
in a single request.
"""
fd = StringIO.StringIO(body)
super(MediaInMemoryUpload, self).__init__(fd, mimetype, chunksize=chunksize,
resumable=resumable)
class MediaIoBaseDownload(object):
""""Download media resources.
Note that the Python file object is compatible with io.Base and can be used
with this class also.
Example:
request = farms.animals().get_media(id='cow')
fh = io.FileIO('cow.png', mode='wb')
downloader = MediaIoBaseDownload(fh, request, chunksize=1024*1024)
done = False
while done is False:
status, done = downloader.next_chunk()
if status:
print "Download %d%%." % int(status.progress() * 100)
print "Download Complete!"
"""
@util.positional(3)
def __init__(self, fd, request, chunksize=DEFAULT_CHUNK_SIZE):
"""Constructor.
Args:
fd: io.Base or file object, The stream in which to write the downloaded
bytes.
request: apiclient.http.HttpRequest, the media request to perform in
chunks.
chunksize: int, File will be downloaded in chunks of this many bytes.
"""
self._fd = fd
self._request = request
self._uri = request.uri
self._chunksize = chunksize
self._progress = 0
self._total_size = None
self._done = False
# Stubs for testing.
self._sleep = time.sleep
self._rand = random.random
@util.positional(1)
def next_chunk(self, num_retries=0):
"""Get the next chunk of the download.
Args:
num_retries: Integer, number of times to retry 500's with randomized
exponential backoff. If all retries fail, the raised HttpError
represents the last request. If zero (default), we attempt the
request only once.
Returns:
(status, done): (MediaDownloadStatus, boolean)
The value of 'done' will be True when the media has been fully
downloaded.
Raises:
apiclient.errors.HttpError if the response was not a 2xx.
httplib2.HttpLib2Error if a transport error has occured.
"""
headers = {
'range': 'bytes=%d-%d' % (
self._progress, self._progress + self._chunksize)
}
http = self._request.http
for retry_num in xrange(num_retries + 1):
if retry_num > 0:
self._sleep(self._rand() * 2**retry_num)
logging.warning(
'Retry #%d for media download: GET %s, following status: %d'
% (retry_num, self._uri, resp.status))
resp, content = http.request(self._uri, headers=headers)
if resp.status < 500:
break
if resp.status in [200, 206]:
if 'content-location' in resp and resp['content-location'] != self._uri:
self._uri = resp['content-location']
self._progress += len(content)
self._fd.write(content)
if 'content-range' in resp:
content_range = resp['content-range']
length = content_range.rsplit('/', 1)[1]
self._total_size = int(length)
if self._progress == self._total_size:
self._done = True
return MediaDownloadProgress(self._progress, self._total_size), self._done
else:
raise HttpError(resp, content, uri=self._uri)
class _StreamSlice(object):
"""Truncated stream.
Takes a stream and presents a stream that is a slice of the original stream.
This is used when uploading media in chunks. In later versions of Python a
stream can be passed to httplib in place of the string of data to send. The
problem is that httplib just blindly reads to the end of the stream. This
wrapper presents a virtual stream that only reads to the end of the chunk.
"""
def __init__(self, stream, begin, chunksize):
"""Constructor.
Args:
stream: (io.Base, file object), the stream to wrap.
begin: int, the seek position the chunk begins at.
chunksize: int, the size of the chunk.
"""
self._stream = stream
self._begin = begin
self._chunksize = chunksize
self._stream.seek(begin)
def read(self, n=-1):
"""Read n bytes.
Args:
n, int, the number of bytes to read.
Returns:
A string of length 'n', or less if EOF is reached.
"""
# The data left available to read sits in [cur, end)
cur = self._stream.tell()
end = self._begin + self._chunksize
if n == -1 or cur + n > end:
n = end - cur
return self._stream.read(n)
class HttpRequest(object):
"""Encapsulates a single HTTP request."""
@util.positional(4)
def __init__(self, http, postproc, uri,
method='GET',
body=None,
headers=None,
methodId=None,
resumable=None):
"""Constructor for an HttpRequest.
Args:
http: httplib2.Http, the transport object to use to make a request
postproc: callable, called on the HTTP response and content to transform
it into a data object before returning, or raising an exception
on an error.
uri: string, the absolute URI to send the request to
method: string, the HTTP method to use
body: string, the request body of the HTTP request,
headers: dict, the HTTP request headers
methodId: string, a unique identifier for the API method being called.
resumable: MediaUpload, None if this is not a resumbale request.
"""
self.uri = uri
self.method = method
self.body = body
self.headers = headers or {}
self.methodId = methodId
self.http = http
self.postproc = postproc
self.resumable = resumable
self.response_callbacks = []
self._in_error_state = False
# Pull the multipart boundary out of the content-type header.
major, minor, params = mimeparse.parse_mime_type(
headers.get('content-type', 'application/json'))
# The size of the non-media part of the request.
self.body_size = len(self.body or '')
# The resumable URI to send chunks to.
self.resumable_uri = None
# The bytes that have been uploaded.
self.resumable_progress = 0
# Stubs for testing.
self._rand = random.random
self._sleep = time.sleep
@util.positional(1)
def execute(self, http=None, num_retries=0):
"""Execute the request.
Args:
http: httplib2.Http, an http object to be used in place of the
one the HttpRequest request object was constructed with.
num_retries: Integer, number of times to retry 500's with randomized
exponential backoff. If all retries fail, the raised HttpError
represents the last request. If zero (default), we attempt the
request only once.
Returns:
A deserialized object model of the response body as determined
by the postproc.
Raises:
apiclient.errors.HttpError if the response was not a 2xx.
httplib2.HttpLib2Error if a transport error has occured.
"""
if http is None:
http = self.http
if self.resumable:
body = None
while body is None:
_, body = self.next_chunk(http=http, num_retries=num_retries)
return body
# Non-resumable case.
if 'content-length' not in self.headers:
self.headers['content-length'] = str(self.body_size)
# If the request URI is too long then turn it into a POST request.
if len(self.uri) > MAX_URI_LENGTH and self.method == 'GET':
self.method = 'POST'
self.headers['x-http-method-override'] = 'GET'
self.headers['content-type'] = 'application/x-www-form-urlencoded'
parsed = urlparse.urlparse(self.uri)
self.uri = urlparse.urlunparse(
(parsed.scheme, parsed.netloc, parsed.path, parsed.params, None,
None)
)
self.body = parsed.query
self.headers['content-length'] = str(len(self.body))
# Handle retries for server-side errors.
for retry_num in xrange(num_retries + 1):
if retry_num > 0:
self._sleep(self._rand() * 2**retry_num)
logging.warning('Retry #%d for request: %s %s, following status: %d'
% (retry_num, self.method, self.uri, resp.status))
resp, content = http.request(str(self.uri), method=str(self.method),
body=self.body, headers=self.headers)
if resp.status < 500:
break
for callback in self.response_callbacks:
callback(resp)
if resp.status >= 300:
raise HttpError(resp, content, uri=self.uri)
return self.postproc(resp, content)
@util.positional(2)
def add_response_callback(self, cb):
"""add_response_headers_callback
Args:
cb: Callback to be called on receiving the response headers, of signature:
def cb(resp):
# Where resp is an instance of httplib2.Response
"""
self.response_callbacks.append(cb)
@util.positional(1)
def next_chunk(self, http=None, num_retries=0):
"""Execute the next step of a resumable upload.
Can only be used if the method being executed supports media uploads and
the MediaUpload object passed in was flagged as using resumable upload.
Example:
media = MediaFileUpload('cow.png', mimetype='image/png',
chunksize=1000, resumable=True)
request = farm.animals().insert(
id='cow',
name='cow.png',
media_body=media)
response = None
while response is None:
status, response = request.next_chunk()
if status:
print "Upload %d%% complete." % int(status.progress() * 100)
Args:
http: httplib2.Http, an http object to be used in place of the
one the HttpRequest request object was constructed with.
num_retries: Integer, number of times to retry 500's with randomized
exponential backoff. If all retries fail, the raised HttpError
represents the last request. If zero (default), we attempt the
request only once.
Returns:
(status, body): (ResumableMediaStatus, object)
The body will be None until the resumable media is fully uploaded.
Raises:
apiclient.errors.HttpError if the response was not a 2xx.
httplib2.HttpLib2Error if a transport error has occured.
"""
if http is None:
http = self.http
if self.resumable.size() is None:
size = '*'
else:
size = str(self.resumable.size())
if self.resumable_uri is None:
start_headers = copy.copy(self.headers)
start_headers['X-Upload-Content-Type'] = self.resumable.mimetype()
if size != '*':
start_headers['X-Upload-Content-Length'] = size
start_headers['content-length'] = str(self.body_size)
for retry_num in xrange(num_retries + 1):
if retry_num > 0:
self._sleep(self._rand() * 2**retry_num)
logging.warning(
'Retry #%d for resumable URI request: %s %s, following status: %d'
% (retry_num, self.method, self.uri, resp.status))
resp, content = http.request(self.uri, method=self.method,
body=self.body,
headers=start_headers)
if resp.status < 500:
break
if resp.status == 200 and 'location' in resp:
self.resumable_uri = resp['location']
else:
raise ResumableUploadError(resp, content)
elif self._in_error_state:
# If we are in an error state then query the server for current state of
# the upload by sending an empty PUT and reading the 'range' header in
# the response.
headers = {
'Content-Range': 'bytes */%s' % size,
'content-length': '0'
}
resp, content = http.request(self.resumable_uri, 'PUT',
headers=headers)
status, body = self._process_response(resp, content)
if body:
# The upload was complete.
return (status, body)
# The httplib.request method can take streams for the body parameter, but
# only in Python 2.6 or later. If a stream is available under those
# conditions then use it as the body argument.
if self.resumable.has_stream() and sys.version_info[1] >= 6:
data = self.resumable.stream()
if self.resumable.chunksize() == -1:
data.seek(self.resumable_progress)
chunk_end = self.resumable.size() - self.resumable_progress - 1
else:
# Doing chunking with a stream, so wrap a slice of the stream.
data = _StreamSlice(data, self.resumable_progress,
self.resumable.chunksize())
chunk_end = min(
self.resumable_progress + self.resumable.chunksize() - 1,
self.resumable.size() - 1)
else:
data = self.resumable.getbytes(
self.resumable_progress, self.resumable.chunksize())
# A short read implies that we are at EOF, so finish the upload.
if len(data) < self.resumable.chunksize():
size = str(self.resumable_progress + len(data))
chunk_end = self.resumable_progress + len(data) - 1
headers = {
'Content-Range': 'bytes %d-%d/%s' % (
self.resumable_progress, chunk_end, size),
# Must set the content-length header here because httplib can't
# calculate the size when working with _StreamSlice.
'Content-Length': str(chunk_end - self.resumable_progress + 1)
}
for retry_num in xrange(num_retries + 1):
if retry_num > 0:
self._sleep(self._rand() * 2**retry_num)
logging.warning(
'Retry #%d for media upload: %s %s, following status: %d'
% (retry_num, self.method, self.uri, resp.status))
try:
resp, content = http.request(self.resumable_uri, method='PUT',
body=data,
headers=headers)
except:
self._in_error_state = True
raise
if resp.status < 500:
break
return self._process_response(resp, content)
def _process_response(self, resp, content):
"""Process the response from a single chunk upload.
Args:
resp: httplib2.Response, the response object.
content: string, the content of the response.
Returns:
(status, body): (ResumableMediaStatus, object)
The body will be None until the resumable media is fully uploaded.
Raises:
apiclient.errors.HttpError if the response was not a 2xx or a 308.
"""
if resp.status in [200, 201]:
self._in_error_state = False
return None, self.postproc(resp, content)
elif resp.status == 308:
self._in_error_state = False
# A "308 Resume Incomplete" indicates we are not done.
self.resumable_progress = int(resp['range'].split('-')[1]) + 1
if 'location' in resp:
self.resumable_uri = resp['location']
else:
self._in_error_state = True
raise HttpError(resp, content, uri=self.uri)
return (MediaUploadProgress(self.resumable_progress, self.resumable.size()),
None)
def to_json(self):
"""Returns a JSON representation of the HttpRequest."""
d = copy.copy(self.__dict__)
if d['resumable'] is not None:
d['resumable'] = self.resumable.to_json()
del d['http']
del d['postproc']
del d['_sleep']
del d['_rand']
return simplejson.dumps(d)
@staticmethod
def from_json(s, http, postproc):
"""Returns an HttpRequest populated with info from a JSON object."""
d = simplejson.loads(s)
if d['resumable'] is not None:
d['resumable'] = MediaUpload.new_from_json(d['resumable'])
return HttpRequest(
http,
postproc,
uri=d['uri'],
method=d['method'],
body=d['body'],
headers=d['headers'],
methodId=d['methodId'],
resumable=d['resumable'])
class BatchHttpRequest(object):
"""Batches multiple HttpRequest objects into a single HTTP request.
Example:
from apiclient.http import BatchHttpRequest
def list_animals(request_id, response, exception):
\"\"\"Do something with the animals list response.\"\"\"
if exception is not None:
# Do something with the exception.
pass
else:
# Do something with the response.
pass
def list_farmers(request_id, response, exception):
\"\"\"Do something with the farmers list response.\"\"\"
if exception is not None:
# Do something with the exception.
pass
else:
# Do something with the response.
pass
service = build('farm', 'v2')
batch = BatchHttpRequest()
batch.add(service.animals().list(), list_animals)
batch.add(service.farmers().list(), list_farmers)
batch.execute(http=http)
"""
@util.positional(1)
def __init__(self, callback=None, batch_uri=None):
"""Constructor for a BatchHttpRequest.
Args:
callback: callable, A callback to be called for each response, of the
form callback(id, response, exception). The first parameter is the
request id, and the second is the deserialized response object. The
third is an apiclient.errors.HttpError exception object if an HTTP error
occurred while processing the request, or None if no error occurred.
batch_uri: string, URI to send batch requests to.
"""
if batch_uri is None:
batch_uri = 'https://www.googleapis.com/batch'
self._batch_uri = batch_uri
# Global callback to be called for each individual response in the batch.
self._callback = callback
# A map from id to request.
self._requests = {}
# A map from id to callback.
self._callbacks = {}
# List of request ids, in the order in which they were added.
self._order = []
# The last auto generated id.
self._last_auto_id = 0
# Unique ID on which to base the Content-ID headers.
self._base_id = None
# A map from request id to (httplib2.Response, content) response pairs
self._responses = {}
# A map of id(Credentials) that have been refreshed.
self._refreshed_credentials = {}
def _refresh_and_apply_credentials(self, request, http):
"""Refresh the credentials and apply to the request.
Args:
request: HttpRequest, the request.
http: httplib2.Http, the global http object for the batch.
"""
# For the credentials to refresh, but only once per refresh_token
# If there is no http per the request then refresh the http passed in
# via execute()
creds = None
if request.http is not None and hasattr(request.http.request,
'credentials'):
creds = request.http.request.credentials
elif http is not None and hasattr(http.request, 'credentials'):
creds = http.request.credentials
if creds is not None:
if id(creds) not in self._refreshed_credentials:
creds.refresh(http)
self._refreshed_credentials[id(creds)] = 1
# Only apply the credentials if we are using the http object passed in,
# otherwise apply() will get called during _serialize_request().
if request.http is None or not hasattr(request.http.request,
'credentials'):
creds.apply(request.headers)
def _id_to_header(self, id_):
"""Convert an id to a Content-ID header value.
Args:
id_: string, identifier of individual request.
Returns:
A Content-ID header with the id_ encoded into it. A UUID is prepended to
the value because Content-ID headers are supposed to be universally
unique.
"""
if self._base_id is None:
self._base_id = uuid.uuid4()
return '<%s+%s>' % (self._base_id, urllib.quote(id_))
def _header_to_id(self, header):
"""Convert a Content-ID header value to an id.
Presumes the Content-ID header conforms to the format that _id_to_header()
returns.
Args:
header: string, Content-ID header value.
Returns:
The extracted id value.
Raises:
BatchError if the header is not in the expected format.
"""
if header[0] != '<' or header[-1] != '>':
raise BatchError("Invalid value for Content-ID: %s" % header)
if '+' not in header:
raise BatchError("Invalid value for Content-ID: %s" % header)
base, id_ = header[1:-1].rsplit('+', 1)
return urllib.unquote(id_)
def _serialize_request(self, request):
"""Convert an HttpRequest object into a string.
Args:
request: HttpRequest, the request to serialize.
Returns:
The request as a string in application/http format.
"""
# Construct status line
parsed = urlparse.urlparse(request.uri)
request_line = urlparse.urlunparse(
(None, None, parsed.path, parsed.params, parsed.query, None)
)
status_line = request.method + ' ' + request_line + ' HTTP/1.1\n'
major, minor = request.headers.get('content-type', 'application/json').split('/')
msg = MIMENonMultipart(major, minor)
headers = request.headers.copy()
if request.http is not None and hasattr(request.http.request,
'credentials'):
request.http.request.credentials.apply(headers)
# MIMENonMultipart adds its own Content-Type header.
if 'content-type' in headers:
del headers['content-type']
for key, value in headers.iteritems():
msg[key] = value
msg['Host'] = parsed.netloc
msg.set_unixfrom(None)
if request.body is not None:
msg.set_payload(request.body)
msg['content-length'] = str(len(request.body))
# Serialize the mime message.
fp = StringIO.StringIO()
# maxheaderlen=0 means don't line wrap headers.
g = Generator(fp, maxheaderlen=0)
g.flatten(msg, unixfrom=False)
body = fp.getvalue()
# Strip off the \n\n that the MIME lib tacks onto the end of the payload.
if request.body is None:
body = body[:-2]
return status_line.encode('utf-8') + body
def _deserialize_response(self, payload):
"""Convert string into httplib2 response and content.
Args:
payload: string, headers and body as a string.
Returns:
A pair (resp, content), such as would be returned from httplib2.request.
"""
# Strip off the status line
status_line, payload = payload.split('\n', 1)
protocol, status, reason = status_line.split(' ', 2)
# Parse the rest of the response
parser = FeedParser()
parser.feed(payload)
msg = parser.close()
msg['status'] = status
# Create httplib2.Response from the parsed headers.
resp = httplib2.Response(msg)
resp.reason = reason
resp.version = int(protocol.split('/', 1)[1].replace('.', ''))
content = payload.split('\r\n\r\n', 1)[1]
return resp, content
def _new_id(self):
"""Create a new id.
Auto incrementing number that avoids conflicts with ids already used.
Returns:
string, a new unique id.
"""
self._last_auto_id += 1
while str(self._last_auto_id) in self._requests:
self._last_auto_id += 1
return str(self._last_auto_id)
@util.positional(2)
def add(self, request, callback=None, request_id=None):
"""Add a new request.
Every callback added will be paired with a unique id, the request_id. That
unique id will be passed back to the callback when the response comes back
from the server. The default behavior is to have the library generate it's
own unique id. If the caller passes in a request_id then they must ensure
uniqueness for each request_id, and if they are not an exception is
raised. Callers should either supply all request_ids or nevery supply a
request id, to avoid such an error.
Args:
request: HttpRequest, Request to add to the batch.
callback: callable, A callback to be called for this response, of the
form callback(id, response, exception). The first parameter is the
request id, and the second is the deserialized response object. The
third is an apiclient.errors.HttpError exception object if an HTTP error
occurred while processing the request, or None if no errors occurred.
request_id: string, A unique id for the request. The id will be passed to
the callback with the response.
Returns:
None
Raises:
BatchError if a media request is added to a batch.
KeyError is the request_id is not unique.
"""
if request_id is None:
request_id = self._new_id()
if request.resumable is not None:
raise BatchError("Media requests cannot be used in a batch request.")
if request_id in self._requests:
raise KeyError("A request with this ID already exists: %s" % request_id)
self._requests[request_id] = request
self._callbacks[request_id] = callback
self._order.append(request_id)
def _execute(self, http, order, requests):
"""Serialize batch request, send to server, process response.
Args:
http: httplib2.Http, an http object to be used to make the request with.
order: list, list of request ids in the order they were added to the
batch.
request: list, list of request objects to send.
Raises:
httplib2.HttpLib2Error if a transport error has occured.
apiclient.errors.BatchError if the response is the wrong format.
"""
message = MIMEMultipart('mixed')
# Message should not write out it's own headers.
setattr(message, '_write_headers', lambda self: None)
# Add all the individual requests.
for request_id in order:
request = requests[request_id]
msg = MIMENonMultipart('application', 'http')
msg['Content-Transfer-Encoding'] = 'binary'
msg['Content-ID'] = self._id_to_header(request_id)
body = self._serialize_request(request)
msg.set_payload(body)
message.attach(msg)
body = message.as_string()
headers = {}
headers['content-type'] = ('multipart/mixed; '
'boundary="%s"') % message.get_boundary()
resp, content = http.request(self._batch_uri, method='POST', body=body,
headers=headers)
if resp.status >= 300:
raise HttpError(resp, content, uri=self._batch_uri)
# Now break out the individual responses and store each one.
boundary, _ = content.split(None, 1)
# Prepend with a content-type header so FeedParser can handle it.
header = 'content-type: %s\r\n\r\n' % resp['content-type']
for_parser = header + content
parser = FeedParser()
parser.feed(for_parser)
mime_response = parser.close()
if not mime_response.is_multipart():
raise BatchError("Response not in multipart/mixed format.", resp=resp,
content=content)
for part in mime_response.get_payload():
request_id = self._header_to_id(part['Content-ID'])
response, content = self._deserialize_response(part.get_payload())
self._responses[request_id] = (response, content)
@util.positional(1)
def execute(self, http=None):
"""Execute all the requests as a single batched HTTP request.
Args:
http: httplib2.Http, an http object to be used in place of the one the
HttpRequest request object was constructed with. If one isn't supplied
then use a http object from the requests in this batch.
Returns:
None
Raises:
httplib2.HttpLib2Error if a transport error has occured.
apiclient.errors.BatchError if the response is the wrong format.
"""
# If http is not supplied use the first valid one given in the requests.
if http is None:
for request_id in self._order:
request = self._requests[request_id]
if request is not None:
http = request.http
break
if http is None:
raise ValueError("Missing a valid http object.")
self._execute(http, self._order, self._requests)
# Loop over all the requests and check for 401s. For each 401 request the
# credentials should be refreshed and then sent again in a separate batch.
redo_requests = {}
redo_order = []
for request_id in self._order:
resp, content = self._responses[request_id]
if resp['status'] == '401':
redo_order.append(request_id)
request = self._requests[request_id]
self._refresh_and_apply_credentials(request, http)
redo_requests[request_id] = request
if redo_requests:
self._execute(http, redo_order, redo_requests)
# Now process all callbacks that are erroring, and raise an exception for
# ones that return a non-2xx response? Or add extra parameter to callback
# that contains an HttpError?
for request_id in self._order:
resp, content = self._responses[request_id]
request = self._requests[request_id]
callback = self._callbacks[request_id]
response = None
exception = None
try:
if resp.status >= 300:
raise HttpError(resp, content, uri=request.uri)
response = request.postproc(resp, content)
except HttpError, e:
exception = e
if callback is not None:
callback(request_id, response, exception)
if self._callback is not None:
self._callback(request_id, response, exception)
class HttpRequestMock(object):
"""Mock of HttpRequest.
Do not construct directly, instead use RequestMockBuilder.
"""
def __init__(self, resp, content, postproc):
"""Constructor for HttpRequestMock
Args:
resp: httplib2.Response, the response to emulate coming from the request
content: string, the response body
postproc: callable, the post processing function usually supplied by
the model class. See model.JsonModel.response() as an example.
"""
self.resp = resp
self.content = content
self.postproc = postproc
if resp is None:
self.resp = httplib2.Response({'status': 200, 'reason': 'OK'})
if 'reason' in self.resp:
self.resp.reason = self.resp['reason']
def execute(self, http=None):
"""Execute the request.
Same behavior as HttpRequest.execute(), but the response is
mocked and not really from an HTTP request/response.
"""
return self.postproc(self.resp, self.content)
class RequestMockBuilder(object):
"""A simple mock of HttpRequest
Pass in a dictionary to the constructor that maps request methodIds to
tuples of (httplib2.Response, content, opt_expected_body) that should be
returned when that method is called. None may also be passed in for the
httplib2.Response, in which case a 200 OK response will be generated.
If an opt_expected_body (str or dict) is provided, it will be compared to
the body and UnexpectedBodyError will be raised on inequality.
Example:
response = '{"data": {"id": "tag:google.c...'
requestBuilder = RequestMockBuilder(
{
'plus.activities.get': (None, response),
}
)
apiclient.discovery.build("plus", "v1", requestBuilder=requestBuilder)
Methods that you do not supply a response for will return a
200 OK with an empty string as the response content or raise an excpetion
if check_unexpected is set to True. The methodId is taken from the rpcName
in the discovery document.
For more details see the project wiki.
"""
def __init__(self, responses, check_unexpected=False):
"""Constructor for RequestMockBuilder
The constructed object should be a callable object
that can replace the class HttpResponse.
responses - A dictionary that maps methodIds into tuples
of (httplib2.Response, content). The methodId
comes from the 'rpcName' field in the discovery
document.
check_unexpected - A boolean setting whether or not UnexpectedMethodError
should be raised on unsupplied method.
"""
self.responses = responses
self.check_unexpected = check_unexpected
def __call__(self, http, postproc, uri, method='GET', body=None,
headers=None, methodId=None, resumable=None):
"""Implements the callable interface that discovery.build() expects
of requestBuilder, which is to build an object compatible with
HttpRequest.execute(). See that method for the description of the
parameters and the expected response.
"""
if methodId in self.responses:
response = self.responses[methodId]
resp, content = response[:2]
if len(response) > 2:
# Test the body against the supplied expected_body.
expected_body = response[2]
if bool(expected_body) != bool(body):
# Not expecting a body and provided one
# or expecting a body and not provided one.
raise UnexpectedBodyError(expected_body, body)
if isinstance(expected_body, str):
expected_body = simplejson.loads(expected_body)
body = simplejson.loads(body)
if body != expected_body:
raise UnexpectedBodyError(expected_body, body)
return HttpRequestMock(resp, content, postproc)
elif self.check_unexpected:
raise UnexpectedMethodError(methodId=methodId)
else:
model = JsonModel(False)
return HttpRequestMock(None, '{}', model.response)
class HttpMock(object):
"""Mock of httplib2.Http"""
def __init__(self, filename=None, headers=None):
"""
Args:
filename: string, absolute filename to read response from
headers: dict, header to return with response
"""
if headers is None:
headers = {'status': '200 OK'}
if filename:
f = file(filename, 'r')
self.data = f.read()
f.close()
else:
self.data = None
self.response_headers = headers
self.headers = None
self.uri = None
self.method = None
self.body = None
self.headers = None
def request(self, uri,
method='GET',
body=None,
headers=None,
redirections=1,
connection_type=None):
self.uri = uri
self.method = method
self.body = body
self.headers = headers
return httplib2.Response(self.response_headers), self.data
class HttpMockSequence(object):
"""Mock of httplib2.Http
Mocks a sequence of calls to request returning different responses for each
call. Create an instance initialized with the desired response headers
and content and then use as if an httplib2.Http instance.
http = HttpMockSequence([
({'status': '401'}, ''),
({'status': '200'}, '{"access_token":"1/3w","expires_in":3600}'),
({'status': '200'}, 'echo_request_headers'),
])
resp, content = http.request("http://examples.com")
There are special values you can pass in for content to trigger
behavours that are helpful in testing.
'echo_request_headers' means return the request headers in the response body
'echo_request_headers_as_json' means return the request headers in
the response body
'echo_request_body' means return the request body in the response body
'echo_request_uri' means return the request uri in the response body
"""
def __init__(self, iterable):
"""
Args:
iterable: iterable, a sequence of pairs of (headers, body)
"""
self._iterable = iterable
self.follow_redirects = True
def request(self, uri,
method='GET',
body=None,
headers=None,
redirections=1,
connection_type=None):
resp, content = self._iterable.pop(0)
if content == 'echo_request_headers':
content = headers
elif content == 'echo_request_headers_as_json':
content = simplejson.dumps(headers)
elif content == 'echo_request_body':
if hasattr(body, 'read'):
content = body.read()
else:
content = body
elif content == 'echo_request_uri':
content = uri
return httplib2.Response(resp), content
def set_user_agent(http, user_agent):
"""Set the user-agent on every request.
Args:
http - An instance of httplib2.Http
or something that acts like it.
user_agent: string, the value for the user-agent header.
Returns:
A modified instance of http that was passed in.
Example:
h = httplib2.Http()
h = set_user_agent(h, "my-app-name/6.0")
Most of the time the user-agent will be set doing auth, this is for the rare
cases where you are accessing an unauthenticated endpoint.
"""
request_orig = http.request
# The closure that will replace 'httplib2.Http.request'.
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
"""Modify the request headers to add the user-agent."""
if headers is None:
headers = {}
if 'user-agent' in headers:
headers['user-agent'] = user_agent + ' ' + headers['user-agent']
else:
headers['user-agent'] = user_agent
resp, content = request_orig(uri, method, body, headers,
redirections, connection_type)
return resp, content
http.request = new_request
return http
def tunnel_patch(http):
"""Tunnel PATCH requests over POST.
Args:
http - An instance of httplib2.Http
or something that acts like it.
Returns:
A modified instance of http that was passed in.
Example:
h = httplib2.Http()
h = tunnel_patch(h, "my-app-name/6.0")
Useful if you are running on a platform that doesn't support PATCH.
Apply this last if you are using OAuth 1.0, as changing the method
will result in a different signature.
"""
request_orig = http.request
# The closure that will replace 'httplib2.Http.request'.
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
"""Modify the request headers to add the user-agent."""
if headers is None:
headers = {}
if method == 'PATCH':
if 'oauth_token' in headers.get('authorization', ''):
logging.warning(
'OAuth 1.0 request made with Credentials after tunnel_patch.')
headers['x-http-method-override'] = "PATCH"
method = 'POST'
resp, content = request_orig(uri, method, body, headers,
redirections, connection_type)
return resp, content
http.request = new_request
return http
| apache-2.0 |
Lyleo/OmniMarkupPreviewer | OmniMarkupLib/Renderers/libs/pygments/lexers/hdl.py | 72 | 18250 | # -*- coding: utf-8 -*-
"""
pygments.lexers.hdl
~~~~~~~~~~~~~~~~~~~
Lexers for hardware descriptor languages.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, include, using, this, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Error
__all__ = ['VerilogLexer', 'SystemVerilogLexer', 'VhdlLexer']
class VerilogLexer(RegexLexer):
"""
For verilog source code with preprocessor directives.
.. versionadded:: 1.4
"""
name = 'verilog'
aliases = ['verilog', 'v']
filenames = ['*.v']
mimetypes = ['text/x-verilog']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+'
tokens = {
'root': [
(r'^\s*`define', Comment.Preproc, 'macro'),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'[{}#@]', Punctuation),
(r'L?"', String, 'string'),
(r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'([0-9]+)|(\'h)[0-9a-fA-F]+', Number.Hex),
(r'([0-9]+)|(\'b)[01]+', Number.Bin),
(r'([0-9]+)|(\'d)[0-9]+', Number.Integer),
(r'([0-9]+)|(\'o)[0-7]+', Number.Oct),
(r'\'[01xz]', Number),
(r'\d+[Ll]?', Number.Integer),
(r'\*/', Error),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.;\']', Punctuation),
(r'`[a-zA-Z_]\w*', Name.Constant),
(r'^(\s*)(package)(\s+)', bygroups(Text, Keyword.Namespace, Text)),
(r'^(\s*)(import)(\s+)', bygroups(Text, Keyword.Namespace, Text),
'import'),
(words((
'always', 'always_comb', 'always_ff', 'always_latch', 'and',
'assign', 'automatic', 'begin', 'break', 'buf', 'bufif0', 'bufif1',
'case', 'casex', 'casez', 'cmos', 'const', 'continue', 'deassign',
'default', 'defparam', 'disable', 'do', 'edge', 'else', 'end', 'endcase',
'endfunction', 'endgenerate', 'endmodule', 'endpackage', 'endprimitive',
'endspecify', 'endtable', 'endtask', 'enum', 'event', 'final', 'for',
'force', 'forever', 'fork', 'function', 'generate', 'genvar', 'highz0',
'highz1', 'if', 'initial', 'inout', 'input', 'integer', 'join', 'large',
'localparam', 'macromodule', 'medium', 'module', 'nand', 'negedge',
'nmos', 'nor', 'not', 'notif0', 'notif1', 'or', 'output', 'packed',
'parameter', 'pmos', 'posedge', 'primitive', 'pull0', 'pull1',
'pulldown', 'pullup', 'rcmos', 'ref', 'release', 'repeat', 'return',
'rnmos', 'rpmos', 'rtran', 'rtranif0', 'rtranif1', 'scalared', 'signed',
'small', 'specify', 'specparam', 'strength', 'string', 'strong0',
'strong1', 'struct', 'table', 'task', 'tran', 'tranif0', 'tranif1',
'type', 'typedef', 'unsigned', 'var', 'vectored', 'void', 'wait',
'weak0', 'weak1', 'while', 'xnor', 'xor'), suffix=r'\b'),
Keyword),
(words((
'accelerate', 'autoexpand_vectornets', 'celldefine', 'default_nettype',
'else', 'elsif', 'endcelldefine', 'endif', 'endprotect', 'endprotected',
'expand_vectornets', 'ifdef', 'ifndef', 'include', 'noaccelerate',
'noexpand_vectornets', 'noremove_gatenames', 'noremove_netnames',
'nounconnected_drive', 'protect', 'protected', 'remove_gatenames',
'remove_netnames', 'resetall', 'timescale', 'unconnected_drive',
'undef'), prefix=r'`', suffix=r'\b'),
Comment.Preproc),
(words((
'bits', 'bitstoreal', 'bitstoshortreal', 'countdrivers', 'display', 'fclose',
'fdisplay', 'finish', 'floor', 'fmonitor', 'fopen', 'fstrobe', 'fwrite',
'getpattern', 'history', 'incsave', 'input', 'itor', 'key', 'list', 'log',
'monitor', 'monitoroff', 'monitoron', 'nokey', 'nolog', 'printtimescale',
'random', 'readmemb', 'readmemh', 'realtime', 'realtobits', 'reset',
'reset_count', 'reset_value', 'restart', 'rtoi', 'save', 'scale', 'scope',
'shortrealtobits', 'showscopes', 'showvariables', 'showvars', 'sreadmemb',
'sreadmemh', 'stime', 'stop', 'strobe', 'time', 'timeformat', 'write'),
prefix=r'\$', suffix=r'\b'),
Name.Builtin),
(words((
'byte', 'shortint', 'int', 'longint', 'integer', 'time',
'bit', 'logic', 'reg', 'supply0', 'supply1', 'tri', 'triand',
'trior', 'tri0', 'tri1', 'trireg', 'uwire', 'wire', 'wand', 'wo'
'shortreal', 'real', 'realtime'), suffix=r'\b'),
Keyword.Type),
('[a-zA-Z_]\w*:(?!:)', Name.Label),
('[a-zA-Z_]\w*', Name),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'[^/\n]+', Comment.Preproc),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'//.*?\n', Comment.Single, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'import': [
(r'[\w:]+\*?', Name.Namespace, '#pop')
]
}
def get_tokens_unprocessed(self, text):
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
# Convention: mark all upper case names as constants
if token is Name:
if value.isupper():
token = Name.Constant
yield index, token, value
class SystemVerilogLexer(RegexLexer):
"""
Extends verilog lexer to recognise all SystemVerilog keywords from IEEE
1800-2009 standard.
.. versionadded:: 1.5
"""
name = 'systemverilog'
aliases = ['systemverilog', 'sv']
filenames = ['*.sv', '*.svh']
mimetypes = ['text/x-systemverilog']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+'
tokens = {
'root': [
(r'^\s*`define', Comment.Preproc, 'macro'),
(r'^(\s*)(package)(\s+)', bygroups(Text, Keyword.Namespace, Text)),
(r'^(\s*)(import)(\s+)', bygroups(Text, Keyword.Namespace, Text), 'import'),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'[{}#@]', Punctuation),
(r'L?"', String, 'string'),
(r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'([0-9]+)|(\'h)[0-9a-fA-F]+', Number.Hex),
(r'([0-9]+)|(\'b)[01]+', Number.Bin),
(r'([0-9]+)|(\'d)[0-9]+', Number.Integer),
(r'([0-9]+)|(\'o)[0-7]+', Number.Oct),
(r'\'[01xz]', Number),
(r'\d+[Ll]?', Number.Integer),
(r'\*/', Error),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.;\']', Punctuation),
(r'`[a-zA-Z_]\w*', Name.Constant),
(words((
'accept_on', 'alias', 'always', 'always_comb', 'always_ff', 'always_latch',
'and', 'assert', 'assign', 'assume', 'automatic', 'before', 'begin', 'bind', 'bins',
'binsof', 'bit', 'break', 'buf', 'bufif0', 'bufif1', 'byte', 'case', 'casex', 'casez',
'cell', 'chandle', 'checker', 'class', 'clocking', 'cmos', 'config', 'const', 'constraint',
'context', 'continue', 'cover', 'covergroup', 'coverpoint', 'cross', 'deassign',
'default', 'defparam', 'design', 'disable', 'dist', 'do', 'edge', 'else', 'end', 'endcase',
'endchecker', 'endclass', 'endclocking', 'endconfig', 'endfunction', 'endgenerate',
'endgroup', 'endinterface', 'endmodule', 'endpackage', 'endprimitive',
'endprogram', 'endproperty', 'endsequence', 'endspecify', 'endtable',
'endtask', 'enum', 'event', 'eventually', 'expect', 'export', 'extends', 'extern',
'final', 'first_match', 'for', 'force', 'foreach', 'forever', 'fork', 'forkjoin',
'function', 'generate', 'genvar', 'global', 'highz0', 'highz1', 'if', 'iff', 'ifnone',
'ignore_bins', 'illegal_bins', 'implies', 'import', 'incdir', 'include',
'initial', 'inout', 'input', 'inside', 'instance', 'int', 'integer', 'interface',
'intersect', 'join', 'join_any', 'join_none', 'large', 'let', 'liblist', 'library',
'local', 'localparam', 'logic', 'longint', 'macromodule', 'matches', 'medium',
'modport', 'module', 'nand', 'negedge', 'new', 'nexttime', 'nmos', 'nor', 'noshowcancelled',
'not', 'notif0', 'notif1', 'null', 'or', 'output', 'package', 'packed', 'parameter',
'pmos', 'posedge', 'primitive', 'priority', 'program', 'property', 'protected',
'pull0', 'pull1', 'pulldown', 'pullup', 'pulsestyle_ondetect', 'pulsestyle_onevent',
'pure', 'rand', 'randc', 'randcase', 'randsequence', 'rcmos', 'real', 'realtime',
'ref', 'reg', 'reject_on', 'release', 'repeat', 'restrict', 'return', 'rnmos',
'rpmos', 'rtran', 'rtranif0', 'rtranif1', 's_always', 's_eventually', 's_nexttime',
's_until', 's_until_with', 'scalared', 'sequence', 'shortint', 'shortreal',
'showcancelled', 'signed', 'small', 'solve', 'specify', 'specparam', 'static',
'string', 'strong', 'strong0', 'strong1', 'struct', 'super', 'supply0', 'supply1',
'sync_accept_on', 'sync_reject_on', 'table', 'tagged', 'task', 'this', 'throughout',
'time', 'timeprecision', 'timeunit', 'tran', 'tranif0', 'tranif1', 'tri', 'tri0',
'tri1', 'triand', 'trior', 'trireg', 'type', 'typedef', 'union', 'unique', 'unique0',
'unsigned', 'until', 'until_with', 'untyped', 'use', 'uwire', 'var', 'vectored',
'virtual', 'void', 'wait', 'wait_order', 'wand', 'weak', 'weak0', 'weak1', 'while',
'wildcard', 'wire', 'with', 'within', 'wor', 'xnor', 'xor'), suffix=r'\b'),
Keyword),
(words((
'`__FILE__', '`__LINE__', '`begin_keywords', '`celldefine', '`default_nettype',
'`define', '`else', '`elsif', '`end_keywords', '`endcelldefine', '`endif',
'`ifdef', '`ifndef', '`include', '`line', '`nounconnected_drive', '`pragma',
'`resetall', '`timescale', '`unconnected_drive', '`undef', '`undefineall'),
suffix=r'\b'),
Comment.Preproc),
(words((
'$display', '$displayb', '$displayh', '$displayo', '$dumpall', '$dumpfile',
'$dumpflush', '$dumplimit', '$dumpoff', '$dumpon', '$dumpports',
'$dumpportsall', '$dumpportsflush', '$dumpportslimit', '$dumpportsoff',
'$dumpportson', '$dumpvars', '$fclose', '$fdisplay', '$fdisplayb',
'$fdisplayh', '$fdisplayo', '$feof', '$ferror', '$fflush', '$fgetc',
'$fgets', '$finish', '$fmonitor', '$fmonitorb', '$fmonitorh', '$fmonitoro',
'$fopen', '$fread', '$fscanf', '$fseek', '$fstrobe', '$fstrobeb', '$fstrobeh',
'$fstrobeo', '$ftell', '$fwrite', '$fwriteb', '$fwriteh', '$fwriteo',
'$monitor', '$monitorb', '$monitorh', '$monitoro', '$monitoroff',
'$monitoron', '$plusargs', '$random', '$readmemb', '$readmemh', '$rewind',
'$sformat', '$sformatf', '$sscanf', '$strobe', '$strobeb', '$strobeh', '$strobeo',
'$swrite', '$swriteb', '$swriteh', '$swriteo', '$test', '$ungetc',
'$value$plusargs', '$write', '$writeb', '$writeh', '$writememb',
'$writememh', '$writeo'), suffix=r'\b'),
Name.Builtin),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
(words((
'byte', 'shortint', 'int', 'longint', 'integer', 'time',
'bit', 'logic', 'reg', 'supply0', 'supply1', 'tri', 'triand',
'trior', 'tri0', 'tri1', 'trireg', 'uwire', 'wire', 'wand', 'wo'
'shortreal', 'real', 'realtime'), suffix=r'\b'),
Keyword.Type),
('[a-zA-Z_]\w*:(?!:)', Name.Label),
('[a-zA-Z_]\w*', Name),
],
'classname': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop'),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'[^/\n]+', Comment.Preproc),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'//.*?\n', Comment.Single, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'import': [
(r'[\w:]+\*?', Name.Namespace, '#pop')
]
}
def get_tokens_unprocessed(self, text):
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
# Convention: mark all upper case names as constants
if token is Name:
if value.isupper():
token = Name.Constant
yield index, token, value
class VhdlLexer(RegexLexer):
"""
For VHDL source code.
.. versionadded:: 1.5
"""
name = 'vhdl'
aliases = ['vhdl']
filenames = ['*.vhdl', '*.vhd']
mimetypes = ['text/x-vhdl']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'--.*?$', Comment.Single),
(r"'(U|X|0|1|Z|W|L|H|-)'", String.Char),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r"'[a-z_]\w*", Name.Attribute),
(r'[()\[\],.;\']', Punctuation),
(r'"[^\n\\]*"', String),
(r'(library)(\s+)([a-z_]\w*)',
bygroups(Keyword, Text, Name.Namespace)),
(r'(use)(\s+)(entity)', bygroups(Keyword, Text, Keyword)),
(r'(use)(\s+)([a-z_][\w.]*)',
bygroups(Keyword, Text, Name.Namespace)),
(r'(entity|component)(\s+)([a-z_]\w*)',
bygroups(Keyword, Text, Name.Class)),
(r'(architecture|configuration)(\s+)([a-z_]\w*)(\s+)'
r'(of)(\s+)([a-z_]\w*)(\s+)(is)',
bygroups(Keyword, Text, Name.Class, Text, Keyword, Text,
Name.Class, Text, Keyword)),
(r'(end)(\s+)', bygroups(using(this), Text), 'endblock'),
include('types'),
include('keywords'),
include('numbers'),
(r'[a-z_]\w*', Name),
],
'endblock': [
include('keywords'),
(r'[a-z_]\w*', Name.Class),
(r'(\s+)', Text),
(r';', Punctuation, '#pop'),
],
'types': [
(words((
'boolean', 'bit', 'character', 'severity_level', 'integer', 'time',
'delay_length', 'natural', 'positive', 'string', 'bit_vector',
'file_open_kind', 'file_open_status', 'std_ulogic', 'std_ulogic_vector',
'std_logic', 'std_logic_vector'), suffix=r'\b'),
Keyword.Type),
],
'keywords': [
(words((
'abs', 'access', 'after', 'alias', 'all', 'and',
'architecture', 'array', 'assert', 'attribute', 'begin', 'block',
'body', 'buffer', 'bus', 'case', 'component', 'configuration',
'constant', 'disconnect', 'downto', 'else', 'elsif', 'end',
'entity', 'exit', 'file', 'for', 'function', 'generate',
'generic', 'group', 'guarded', 'if', 'impure', 'in',
'inertial', 'inout', 'is', 'label', 'library', 'linkage',
'literal', 'loop', 'map', 'mod', 'nand', 'new',
'next', 'nor', 'not', 'null', 'of', 'on',
'open', 'or', 'others', 'out', 'package', 'port',
'postponed', 'procedure', 'process', 'pure', 'range', 'record',
'register', 'reject', 'return', 'rol', 'ror', 'select',
'severity', 'signal', 'shared', 'sla', 'sli', 'sra',
'srl', 'subtype', 'then', 'to', 'transport', 'type',
'units', 'until', 'use', 'variable', 'wait', 'when',
'while', 'with', 'xnor', 'xor'), suffix=r'\b'),
Keyword),
],
'numbers': [
(r'\d{1,2}#[0-9a-f_]+#?', Number.Integer),
(r'\d+', Number.Integer),
(r'(\d+\.\d*|\.\d+|\d+)E[+-]?\d+', Number.Float),
(r'X"[0-9a-f_]+"', Number.Hex),
(r'O"[0-7_]+"', Number.Oct),
(r'B"[01_]+"', Number.Bin),
],
}
| mit |
ddico/odoo | addons/mrp_subcontracting/tests/common.py | 12 | 2455 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.tests.common import Form, SavepointCase
class TestMrpSubcontractingCommon(SavepointCase):
@classmethod
def setUpClass(cls):
super(TestMrpSubcontractingCommon, cls).setUpClass()
# 1: Create a subcontracting partner
main_partner = cls.env['res.partner'].create({'name': 'main_partner'})
cls.subcontractor_partner1 = cls.env['res.partner'].create({
'name': 'subcontractor_partner',
'parent_id': main_partner.id,
'company_id': cls.env.ref('base.main_company').id,
})
# 2. Create a BOM of subcontracting type
cls.comp1 = cls.env['product.product'].create({
'name': 'Component1',
'type': 'product',
'categ_id': cls.env.ref('product.product_category_all').id,
})
cls.comp2 = cls.env['product.product'].create({
'name': 'Component2',
'type': 'product',
'categ_id': cls.env.ref('product.product_category_all').id,
})
cls.finished = cls.env['product.product'].create({
'name': 'finished',
'type': 'product',
'categ_id': cls.env.ref('product.product_category_all').id,
})
bom_form = Form(cls.env['mrp.bom'])
bom_form.type = 'subcontract'
bom_form.product_tmpl_id = cls.finished.product_tmpl_id
bom_form.subcontractor_ids.add(cls.subcontractor_partner1)
with bom_form.bom_line_ids.new() as bom_line:
bom_line.product_id = cls.comp1
bom_line.product_qty = 1
with bom_form.bom_line_ids.new() as bom_line:
bom_line.product_id = cls.comp2
bom_line.product_qty = 1
cls.bom = bom_form.save()
# Create a BoM for cls.comp2
cls.comp2comp = cls.env['product.product'].create({
'name': 'component for Component2',
'type': 'product',
'categ_id': cls.env.ref('product.product_category_all').id,
})
bom_form = Form(cls.env['mrp.bom'])
bom_form.product_tmpl_id = cls.comp2.product_tmpl_id
with bom_form.bom_line_ids.new() as bom_line:
bom_line.product_id = cls.comp2comp
bom_line.product_qty = 1
cls.comp2_bom = bom_form.save()
cls.warehouse = cls.env['stock.warehouse'].search([], limit=1)
| agpl-3.0 |
YangSongzhou/django | tests/foreign_object/models/article.py | 87 | 3084 | from django.db import models
from django.db.models.fields.related import ForwardManyToOneDescriptor
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import get_language
class ArticleTranslationDescriptor(ForwardManyToOneDescriptor):
"""
The set of articletranslation should not set any local fields.
"""
def __set__(self, instance, value):
if instance is None:
raise AttributeError("%s must be accessed via instance" % self.field.name)
setattr(instance, self.cache_name, value)
if value is not None and not self.field.remote_field.multiple:
setattr(value, self.field.related.get_cache_name(), instance)
class ColConstraint(object):
# Anything with as_sql() method works in get_extra_restriction().
def __init__(self, alias, col, value):
self.alias, self.col, self.value = alias, col, value
def as_sql(self, compiler, connection):
qn = compiler.quote_name_unless_alias
return '%s.%s = %%s' % (qn(self.alias), qn(self.col)), [self.value]
class ActiveTranslationField(models.ForeignObject):
"""
This field will allow querying and fetching the currently active translation
for Article from ArticleTranslation.
"""
requires_unique_target = False
def get_extra_restriction(self, where_class, alias, related_alias):
return ColConstraint(alias, 'lang', get_language())
def get_extra_descriptor_filter(self, instance):
return {'lang': get_language()}
def contribute_to_class(self, cls, name):
super(ActiveTranslationField, self).contribute_to_class(cls, name)
setattr(cls, self.name, ArticleTranslationDescriptor(self))
@python_2_unicode_compatible
class Article(models.Model):
active_translation = ActiveTranslationField(
'ArticleTranslation',
from_fields=['id'],
to_fields=['article'],
related_name='+',
on_delete=models.CASCADE,
null=True,
)
pub_date = models.DateField()
def __str__(self):
try:
return self.active_translation.title
except ArticleTranslation.DoesNotExist:
return '[No translation found]'
class NewsArticle(Article):
pass
class ArticleTranslation(models.Model):
article = models.ForeignKey(Article, models.CASCADE)
lang = models.CharField(max_length=2)
title = models.CharField(max_length=100)
body = models.TextField()
abstract = models.CharField(max_length=400, null=True)
class Meta:
unique_together = ('article', 'lang')
ordering = ('active_translation__title',)
class ArticleTag(models.Model):
article = models.ForeignKey(
Article,
models.CASCADE,
related_name='tags',
related_query_name='tag',
)
name = models.CharField(max_length=255)
class ArticleIdea(models.Model):
articles = models.ManyToManyField(
Article,
related_name='ideas',
related_query_name='idea_things',
)
name = models.CharField(max_length=255)
| bsd-3-clause |
ivano666/tensorflow | tensorflow/python/framework/load_library.py | 9 | 4787 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Function for loading TensorFlow plugins."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import imp
import sys
import threading
from six.moves.builtins import bytes # pylint: disable=redefined-builtin
from tensorflow.core.framework import op_def_pb2
from tensorflow.core.lib.core import error_codes_pb2
from tensorflow.python import pywrap_tensorflow as py_tf
from tensorflow.python.framework import errors
from tensorflow.python.util import compat
# Thread safe dict to memoize the library filename to module mapping
_OP_LIBRARY_MAP = {}
_OP_LIBRARY_MAP_LOCK = threading.Lock()
def load_op_library(library_filename):
"""Loads a TensorFlow plugin, containing custom ops and kernels.
Pass "library_filename" to a platform-specific mechanism for dynamically
loading a library. The rules for determining the exact location of the
library are platform-specific and are not documented here.
Args:
library_filename: Path to the plugin.
Relative or absolute filesystem path to a dynamic library file.
Returns:
A python module containing the Python wrappers for Ops defined in
the plugin.
Raises:
RuntimeError: when unable to load the library or get the python wrappers.
"""
status = py_tf.TF_NewStatus()
lib_handle = py_tf.TF_LoadLibrary(library_filename, status)
try:
error_code = py_tf.TF_GetCode(status)
if error_code != 0:
error_msg = compat.as_text(py_tf.TF_Message(status))
with _OP_LIBRARY_MAP_LOCK:
if (error_code == error_codes_pb2.ALREADY_EXISTS and
'has already been loaded' in error_msg and
library_filename in _OP_LIBRARY_MAP):
return _OP_LIBRARY_MAP[library_filename]
# pylint: disable=protected-access
raise errors._make_specific_exception(None, None, error_msg, error_code)
# pylint: enable=protected-access
finally:
py_tf.TF_DeleteStatus(status)
op_list_str = py_tf.TF_GetOpList(lib_handle)
op_list = op_def_pb2.OpList()
op_list.ParseFromString(compat.as_bytes(op_list_str))
wrappers = py_tf.GetPythonWrappers(op_list_str)
# Get a unique name for the module.
module_name = hashlib.md5(wrappers).hexdigest()
module = imp.new_module(module_name)
# pylint: disable=exec-used
exec(wrappers, module.__dict__)
# Stash away the library handle for making calls into the dynamic library.
module.LIB_HANDLE = lib_handle
# OpDefs of the list of ops defined in the library.
module.OP_LIST = op_list
sys.modules[module_name] = module
# Memoize the filename to module mapping.
with _OP_LIBRARY_MAP_LOCK:
_OP_LIBRARY_MAP[library_filename] = module
return module
_FILE_SYSTEM_LIBRARY_MAP = {}
_FILE_SYSTEM_LIBRARY_MAP_LOCK = threading.Lock()
def load_file_system_library(library_filename):
"""Loads a TensorFlow plugin, containing file system implementation.
Pass `library_filename` to a platform-specific mechanism for dynamically
loading a library. The rules for determining the exact location of the
library are platform-specific and are not documented here.
Args:
library_filename: Path to the plugin.
Relative or absolute filesystem path to a dynamic library file.
Returns:
None.
Raises:
RuntimeError: when unable to load the library.
"""
status = py_tf.TF_NewStatus()
lib_handle = py_tf.TF_LoadLibrary(library_filename, status)
try:
error_code = py_tf.TF_GetCode(status)
if error_code != 0:
error_msg = compat.as_text(py_tf.TF_Message(status))
with _FILE_SYSTEM_LIBRARY_MAP_LOCK:
if (error_code == error_codes_pb2.ALREADY_EXISTS and
'has already been loaded' in error_msg and
library_filename in _FILE_SYSTEM_LIBRARY_MAP):
return
# pylint: disable=protected-access
raise errors._make_specific_exception(None, None, error_msg, error_code)
# pylint: enable=protected-access
finally:
py_tf.TF_DeleteStatus(status)
with _FILE_SYSTEM_LIBRARY_MAP_LOCK:
_FILE_SYSTEM_LIBRARY_MAP[library_filename] = lib_handle
| apache-2.0 |
ytsapras/robonet_site | scripts/tests/test_lco_api_tools.py | 1 | 4347 | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 18 16:23:33 2018
@author: rstreet
"""
import sys
import os
cwd = os.getcwd()
sys.path.append(os.path.join(cwd,'..'))
import lco_api_tools
from datetime import datetime, timedelta
import pytz
import observation_classes
def run_tests():
"""Function to run the suite of tests. Since access to LCO's API
requires a user token, these tests require this token as input and so
don't conform to the usual pytest format.
"""
if len(sys.argv) == 3:
token = sys.argv[1]
user_id = sys.argv[2]
else:
token = raw_input('Please enter your LCO API token: ')
user_id = raw_input('Please enter your LCO user ID: ')
#test_get_subrequests_status(token,user_id)
#test_get_status_active_obs_subrequests(token,user_id)
test_lco_userrequest_query(token,user_id)
def test_get_subrequests_status(token,user_id):
track_id = 624354
test_sr = observation_classes.SubObsRequest()
subrequests = lco_api_tools.get_subrequests_status(token,user_id)
assert type(subrequests) == type([])
for item in subrequests:
assert type(item) == type(test_sr)
def test_get_status_active_obs_subrequests(token,user_id):
"""Function to test the return of active observations between a given date
range, with the current status of those requests"""
test_sr = observation_classes.SubObsRequest()
start_date = datetime.now() - timedelta(seconds=2.0*24.0*60.0*60.0)
start_date = start_date.replace(tzinfo=pytz.UTC)
end_date = datetime.now() + timedelta(seconds=2.0*24.0*60.0*60.0)
end_date = end_date.replace(tzinfo=pytz.UTC)
obs_list = [
{'pk':16880, 'grp_id': 'REALO20180422T20.59162096', 'track_id': '633617',
'timestamp': datetime(2018, 4, 22, 20, 45, 29),
'time_expire': datetime(2018, 4, 23, 20, 45, 29),
'status': 'AC'},
{'pk': 16881, 'grp_id': 'REALO20180422T20.59207874', 'track_id': '633618',
'timestamp': datetime(2018, 4, 22, 20, 45, 31),
'time_expire': datetime(2018, 4, 23, 20, 45, 31),
'status': 'AC'}
]
active_obs = lco_api_tools.get_status_active_obs_subrequests(obs_list,
token,
user_id,
start_date,
end_date)
assert type(active_obs) == type({})
for grp_id, item in active_obs.items():
assert type(grp_id) == type('foo')
assert type(item) == type({})
assert 'obsrequest' in item.keys()
assert 'subrequests' in item.keys()
for sr in item['subrequests']:
assert type(sr) == type(test_sr)
def test_lco_userrequest_query(token,user_id):
"""Function to test the function to query Valhalla for userrequests status"""
response = lco_api_tools.lco_userrequest_query(token,user_id)
assert 'results' in response.keys()
print 'Query on user ID:'
for item in response['results']:
print item['group_id'], item['id']
for sr in item['requests']:
print '--> ',sr['id'],sr['state']
start_date = datetime.now() - timedelta(days=5.0)
end_date = datetime.now() + timedelta(days=5.0)
start_date = datetime.strptime('2018-05-01T00:00:00',"%Y-%m-%dT%H:%M:%S")
end_date = datetime.strptime('2018-05-30T00:00:00',"%Y-%m-%dT%H:%M:%S")
response = lco_api_tools.lco_userrequest_query(token,user_id,
created_after=start_date,
created_before=end_date)
assert 'results' in response.keys()
print '\nQuery within date range:'
for item in response['results']:
print item['group_id'], item['id']
for sr in item['requests']:
print '--> ',sr['id'],sr['state']
if __name__ == '__main__':
run_tests()
| gpl-2.0 |
silverpapa/py-flask-signup | tests/application-tests.py | 1 | 1643 | # Copyright 2013. Amazon Web Services, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import application
import unittest
from application import application
from flask import Flask, current_app, request, Response
""" Main test cases for our application """
class AppTestCase(unittest.TestCase):
#application = Flask(__name__)
def setUp(self):
application.testing = True
with application.app_context():
self.client = current_app.test_client()
def test_load_config(self):
""" Test that we can load our config properly """
self.assertTrue(1)
def test_get_test(self):
""" Test hitting /test and that we get a correct HTTP response """
self.assertTrue(1)
def test_get_form(self):
""" Test that we can get a signup form XXX"""
self.assertTrue(1)
def test_get_user(self):
""" Test that we can get a user context """
self.assertTrue(1)
def test_login(self):
""" Test that we can authenticate as a user """
self.assertTrue(1)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
mistoll/ros_buildfarm | scripts/release/run_release_reconfigure_job.py | 3 | 2469 | #!/usr/bin/env python3
# Copyright 2014-2016 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import copy
import sys
from ros_buildfarm.argument import add_argument_build_name
from ros_buildfarm.argument import add_argument_config_url
from ros_buildfarm.argument import \
add_argument_distribution_repository_key_files
from ros_buildfarm.argument import add_argument_distribution_repository_urls
from ros_buildfarm.argument import add_argument_dockerfile_dir
from ros_buildfarm.argument import add_argument_dry_run
from ros_buildfarm.argument import add_argument_groovy_script
from ros_buildfarm.argument import add_argument_package_names
from ros_buildfarm.argument import add_argument_rosdistro_name
from ros_buildfarm.common import get_distribution_repository_keys
from ros_buildfarm.common import get_user_id
from ros_buildfarm.templates import create_dockerfile
def main(argv=sys.argv[1:]):
parser = argparse.ArgumentParser(
description="Run the 'release' job")
add_argument_config_url(parser)
add_argument_rosdistro_name(parser)
add_argument_build_name(parser, 'source')
add_argument_distribution_repository_urls(parser)
add_argument_distribution_repository_key_files(parser)
add_argument_groovy_script(parser)
add_argument_dockerfile_dir(parser)
add_argument_dry_run(parser)
add_argument_package_names(parser)
args = parser.parse_args(argv)
data = copy.deepcopy(args.__dict__)
data.update({
'distribution_repository_urls': args.distribution_repository_urls,
'distribution_repository_keys': get_distribution_repository_keys(
args.distribution_repository_urls,
args.distribution_repository_key_files),
'uid': get_user_id(),
})
create_dockerfile(
'release/release_create_reconfigure_task.Dockerfile.em',
data, args.dockerfile_dir)
if __name__ == '__main__':
main()
| apache-2.0 |
rawWhipIT/p22-goldant-buildpack | vendor/setuptools-7.0/setuptools/tests/test_upload_docs.py | 522 | 2139 | """build_ext tests
"""
import sys, os, shutil, tempfile, unittest, site, zipfile
from setuptools.command.upload_docs import upload_docs
from setuptools.dist import Distribution
SETUP_PY = """\
from setuptools import setup
setup(name='foo')
"""
class TestUploadDocsTest(unittest.TestCase):
def setUp(self):
self.dir = tempfile.mkdtemp()
setup = os.path.join(self.dir, 'setup.py')
f = open(setup, 'w')
f.write(SETUP_PY)
f.close()
self.old_cwd = os.getcwd()
os.chdir(self.dir)
self.upload_dir = os.path.join(self.dir, 'build')
os.mkdir(self.upload_dir)
# A test document.
f = open(os.path.join(self.upload_dir, 'index.html'), 'w')
f.write("Hello world.")
f.close()
# An empty folder.
os.mkdir(os.path.join(self.upload_dir, 'empty'))
if sys.version >= "2.6":
self.old_base = site.USER_BASE
site.USER_BASE = upload_docs.USER_BASE = tempfile.mkdtemp()
self.old_site = site.USER_SITE
site.USER_SITE = upload_docs.USER_SITE = tempfile.mkdtemp()
def tearDown(self):
os.chdir(self.old_cwd)
shutil.rmtree(self.dir)
if sys.version >= "2.6":
shutil.rmtree(site.USER_BASE)
shutil.rmtree(site.USER_SITE)
site.USER_BASE = self.old_base
site.USER_SITE = self.old_site
def test_create_zipfile(self):
# Test to make sure zipfile creation handles common cases.
# This explicitly includes a folder containing an empty folder.
dist = Distribution()
cmd = upload_docs(dist)
cmd.upload_dir = self.upload_dir
cmd.target_dir = self.upload_dir
tmp_dir = tempfile.mkdtemp()
tmp_file = os.path.join(tmp_dir, 'foo.zip')
try:
zip_file = cmd.create_zipfile(tmp_file)
assert zipfile.is_zipfile(tmp_file)
zip_file = zipfile.ZipFile(tmp_file) # woh...
assert zip_file.namelist() == ['index.html']
zip_file.close()
finally:
shutil.rmtree(tmp_dir)
| mit |
SUSE/azurectl | azurectl/utils/xz.py | 1 | 2078 | # Copyright (c) 2015 SUSE Linux GmbH. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import lzma
import os
class XZ(object):
"""
Implements decompression of lzma compressed files
"""
LZMA_STREAM_BUFFER_SIZE = 8192
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.lzma_stream.close()
def __init__(self, lzma_stream, buffer_size=LZMA_STREAM_BUFFER_SIZE):
self.buffer_size = int(buffer_size)
self.lzma = lzma.LZMADecompressor()
self.lzma_stream = lzma_stream
self.buffered_bytes = b''
def read(self, size):
if self.lzma.eof and not self.buffered_bytes:
return None
chunks = self.buffered_bytes
bytes_uncompressed = len(chunks)
while not self.lzma.eof and bytes_uncompressed < size:
chunks += self.lzma.decompress(
self.lzma.unused_data + self.lzma_stream.read(self.buffer_size)
)
bytes_uncompressed = len(chunks)
self.buffered_bytes = chunks[size:]
return chunks[:size]
@classmethod
def close(self):
self.lzma_stream.close()
@classmethod
def open(self, file_name, buffer_size=LZMA_STREAM_BUFFER_SIZE):
self.lzma_stream = open(file_name, 'rb')
return XZ(self.lzma_stream, buffer_size)
@classmethod
def uncompressed_size(self, file_name):
with lzma.open(file_name) as lzma_stream:
lzma_stream.seek(0, os.SEEK_END)
return lzma_stream.tell()
| apache-2.0 |
eLBati/odoo | openerp/tools/cache.py | 41 | 4676 | import lru
import logging
logger = logging.getLogger(__name__)
class ormcache(object):
""" LRU cache decorator for orm methods,
"""
def __init__(self, skiparg=2, size=8192, multi=None, timeout=None):
self.skiparg = skiparg
self.size = size
self.method = None
self.stat_miss = 0
self.stat_hit = 0
self.stat_err = 0
def __call__(self,m):
self.method = m
def lookup(self2, cr, *args, **argv):
r = self.lookup(self2, cr, *args, **argv)
return r
lookup.clear_cache = self.clear
return lookup
def stat(self):
return "lookup-stats hit=%s miss=%s err=%s ratio=%.1f" % (self.stat_hit,self.stat_miss,self.stat_err, (100*float(self.stat_hit))/(self.stat_miss+self.stat_hit) )
def lru(self, self2):
try:
ormcache = getattr(self2, '_ormcache')
except AttributeError:
ormcache = self2._ormcache = {}
try:
d = ormcache[self.method]
except KeyError:
d = ormcache[self.method] = lru.LRU(self.size)
return d
def lookup(self, self2, cr, *args, **argv):
d = self.lru(self2)
key = args[self.skiparg-2:]
try:
r = d[key]
self.stat_hit += 1
return r
except KeyError:
self.stat_miss += 1
value = d[key] = self.method(self2, cr, *args)
return value
except TypeError:
self.stat_err += 1
return self.method(self2, cr, *args)
def clear(self, self2, *args):
""" Remove *args entry from the cache or all keys if *args is undefined
"""
d = self.lru(self2)
if args:
logger.warn("ormcache.clear arguments are deprecated and ignored "
"(while clearing caches on (%s).%s)",
self2._name, self.method.__name__)
d.clear()
self2.pool._any_cache_cleared = True
class ormcache_context(ormcache):
def __init__(self, skiparg=2, size=8192, accepted_keys=()):
super(ormcache_context,self).__init__(skiparg,size)
self.accepted_keys = accepted_keys
def lookup(self, self2, cr, *args, **argv):
d = self.lru(self2)
context = argv.get('context', {})
ckey = filter(lambda x: x[0] in self.accepted_keys, context.items())
ckey.sort()
d = self.lru(self2)
key = args[self.skiparg-2:]+tuple(ckey)
try:
r = d[key]
self.stat_hit += 1
return r
except KeyError:
self.stat_miss += 1
value = d[key] = self.method(self2, cr, *args, **argv)
return value
except TypeError:
self.stat_err += 1
return self.method(self2, cr, *args, **argv)
class ormcache_multi(ormcache):
def __init__(self, skiparg=2, size=8192, multi=3):
super(ormcache_multi,self).__init__(skiparg,size)
self.multi = multi - 2
def lookup(self, self2, cr, *args, **argv):
d = self.lru(self2)
args = list(args)
multi = self.multi
ids = args[multi]
r = {}
miss = []
for i in ids:
args[multi] = i
key = tuple(args[self.skiparg-2:])
try:
r[i] = d[key]
self.stat_hit += 1
except Exception:
self.stat_miss += 1
miss.append(i)
if miss:
args[multi] = miss
r.update(self.method(self2, cr, *args))
for i in miss:
args[multi] = i
key = tuple(args[self.skiparg-2:])
d[key] = r[i]
return r
class dummy_cache(object):
""" Cache decorator replacement to actually do no caching.
"""
def __init__(self, *l, **kw):
pass
def __call__(self, fn):
fn.clear_cache = self.clear
return fn
def clear(self, *l, **kw):
pass
if __name__ == '__main__':
class A():
@ormcache()
def m(self,a,b):
print "A::m(", self,a,b
return 1
@ormcache_multi(multi=3)
def n(self,cr,uid,ids):
print "m", self,cr,uid,ids
return dict([(i,i) for i in ids])
a=A()
r=a.m(1,2)
r=a.m(1,2)
r=a.n("cr",1,[1,2,3,4])
r=a.n("cr",1,[1,2])
print r
for i in a._ormcache:
print a._ormcache[i].d
a.m.clear_cache()
a.n.clear_cache(a,1,1)
r=a.n("cr",1,[1,2])
print r
r=a.n("cr",1,[1,2])
# For backward compatibility
cache = ormcache
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
kevin-coder/tensorflow-fork | tensorflow/python/data/experimental/ops/resampling.py | 10 | 11934 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Resampling dataset transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.experimental.ops import interleave_ops
from tensorflow.python.data.experimental.ops import scan_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export("data.experimental.rejection_resample")
def rejection_resample(class_func, target_dist, initial_dist=None, seed=None):
"""A transformation that resamples a dataset to achieve a target distribution.
**NOTE** Resampling is performed via rejection sampling; some fraction
of the input values will be dropped.
Args:
class_func: A function mapping an element of the input dataset to a scalar
`tf.int32` tensor. Values should be in `[0, num_classes)`.
target_dist: A floating point type tensor, shaped `[num_classes]`.
initial_dist: (Optional.) A floating point type tensor, shaped
`[num_classes]`. If not provided, the true class distribution is
estimated live in a streaming fashion.
seed: (Optional.) Python integer seed for the resampler.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
"""Function from `Dataset` to `Dataset` that applies the transformation."""
target_dist_t = ops.convert_to_tensor(target_dist, name="target_dist")
class_values_ds = dataset.map(class_func)
# Get initial distribution.
if initial_dist is not None:
initial_dist_t = ops.convert_to_tensor(initial_dist, name="initial_dist")
acceptance_dist, prob_of_original = (
_calculate_acceptance_probs_with_mixing(initial_dist_t,
target_dist_t))
initial_dist_ds = dataset_ops.Dataset.from_tensors(
initial_dist_t).repeat()
acceptance_dist_ds = dataset_ops.Dataset.from_tensors(
acceptance_dist).repeat()
prob_of_original_ds = dataset_ops.Dataset.from_tensors(
prob_of_original).repeat()
else:
initial_dist_ds = _estimate_initial_dist_ds(
target_dist_t, class_values_ds)
acceptance_and_original_prob_ds = initial_dist_ds.map(
lambda initial: _calculate_acceptance_probs_with_mixing( # pylint: disable=g-long-lambda
initial, target_dist_t))
acceptance_dist_ds = acceptance_and_original_prob_ds.map(
lambda accept_prob, _: accept_prob)
prob_of_original_ds = acceptance_and_original_prob_ds.map(
lambda _, prob_original: prob_original)
filtered_ds = _filter_ds(dataset, acceptance_dist_ds, initial_dist_ds,
class_values_ds, seed)
# Prefetch filtered dataset for speed.
filtered_ds = filtered_ds.prefetch(3)
prob_original_static = _get_prob_original_static(
initial_dist_t, target_dist_t) if initial_dist is not None else None
if prob_original_static == 1:
return dataset_ops.Dataset.zip((class_values_ds, dataset))
elif prob_original_static == 0:
return filtered_ds
else:
return interleave_ops.sample_from_datasets(
[dataset_ops.Dataset.zip((class_values_ds, dataset)), filtered_ds],
weights=prob_of_original_ds.map(lambda prob: [(prob, 1.0 - prob)]),
seed=seed)
return _apply_fn
def _get_prob_original_static(initial_dist_t, target_dist_t):
"""Returns the static probability of sampling from the original.
`tensor_util.constant_value(prob_of_original)` returns `None` if it encounters
an Op that it isn't defined for. We have some custom logic to avoid this.
Args:
initial_dist_t: A tensor of the initial distribution.
target_dist_t: A tensor of the target distribution.
Returns:
The probability of sampling from the original distribution as a constant,
if it is a constant, or `None`.
"""
init_static = tensor_util.constant_value(initial_dist_t)
target_static = tensor_util.constant_value(target_dist_t)
if init_static is None or target_static is None:
return None
else:
return np.min(target_static / init_static)
def _filter_ds(dataset, acceptance_dist_ds, initial_dist_ds, class_values_ds,
seed):
"""Filters a dataset based on per-class acceptance probabilities.
Args:
dataset: The dataset to be filtered.
acceptance_dist_ds: A dataset of acceptance probabilities.
initial_dist_ds: A dataset of the initial probability distribution, given or
estimated.
class_values_ds: A dataset of the corresponding classes.
seed: (Optional.) Python integer seed for the resampler.
Returns:
A dataset of (class value, data) after filtering.
"""
def maybe_warn_on_large_rejection(accept_dist, initial_dist):
proportion_rejected = math_ops.reduce_sum((1 - accept_dist) * initial_dist)
return control_flow_ops.cond(
math_ops.less(proportion_rejected, .5),
lambda: accept_dist,
lambda: logging_ops.Print( # pylint: disable=g-long-lambda
accept_dist, [proportion_rejected, initial_dist, accept_dist],
message="Proportion of examples rejected by sampler is high: ",
summarize=100,
first_n=10))
acceptance_dist_ds = (dataset_ops.Dataset.zip((acceptance_dist_ds,
initial_dist_ds))
.map(maybe_warn_on_large_rejection))
def _gather_and_copy(class_val, acceptance_prob, data):
return class_val, array_ops.gather(acceptance_prob, class_val), data
current_probabilities_and_class_and_data_ds = dataset_ops.Dataset.zip(
(class_values_ds, acceptance_dist_ds, dataset)).map(_gather_and_copy)
filtered_ds = (
current_probabilities_and_class_and_data_ds
.filter(lambda _1, p, _2: random_ops.random_uniform([], seed=seed) < p))
return filtered_ds.map(lambda class_value, _, data: (class_value, data))
def _estimate_initial_dist_ds(
target_dist_t, class_values_ds, dist_estimation_batch_size=32,
smoothing_constant=10):
num_classes = (target_dist_t.shape[0] or array_ops.shape(target_dist_t)[0])
initial_examples_per_class_seen = array_ops.fill(
[num_classes], np.int64(smoothing_constant))
def update_estimate_and_tile(num_examples_per_class_seen, c):
updated_examples_per_class_seen, dist = _estimate_data_distribution(
c, num_examples_per_class_seen)
tiled_dist = array_ops.tile(
array_ops.expand_dims(dist, 0), [dist_estimation_batch_size, 1])
return updated_examples_per_class_seen, tiled_dist
initial_dist_ds = (class_values_ds.batch(dist_estimation_batch_size)
.apply(scan_ops.scan(initial_examples_per_class_seen,
update_estimate_and_tile))
.apply(batching.unbatch()))
return initial_dist_ds
def _get_target_to_initial_ratio(initial_probs, target_probs):
# Add tiny to initial_probs to avoid divide by zero.
denom = (initial_probs + np.finfo(initial_probs.dtype.as_numpy_dtype).tiny)
return target_probs / denom
def _estimate_data_distribution(c, num_examples_per_class_seen):
"""Estimate data distribution as labels are seen.
Args:
c: The class labels. Type `int32`, shape `[batch_size]`.
num_examples_per_class_seen: Type `int64`, shape `[num_classes]`,
containing counts.
Returns:
num_examples_per_lass_seen: Updated counts. Type `int64`, shape
`[num_classes]`.
dist: The updated distribution. Type `float32`, shape `[num_classes]`.
"""
num_classes = num_examples_per_class_seen.get_shape()[0]
# Update the class-count based on what labels are seen in batch.
num_examples_per_class_seen = math_ops.add(
num_examples_per_class_seen, math_ops.reduce_sum(
array_ops.one_hot(c, num_classes, dtype=dtypes.int64), 0))
init_prob_estimate = math_ops.truediv(
num_examples_per_class_seen,
math_ops.reduce_sum(num_examples_per_class_seen))
dist = math_ops.cast(init_prob_estimate, dtypes.float32)
return num_examples_per_class_seen, dist
def _calculate_acceptance_probs_with_mixing(initial_probs, target_probs):
"""Calculates the acceptance probabilities and mixing ratio.
In this case, we assume that we can *either* sample from the original data
distribution with probability `m`, or sample from a reshaped distribution
that comes from rejection sampling on the original distribution. This
rejection sampling is done on a per-class basis, with `a_i` representing the
probability of accepting data from class `i`.
This method is based on solving the following analysis for the reshaped
distribution:
Let F be the probability of a rejection (on any example).
Let p_i be the proportion of examples in the data in class i (init_probs)
Let a_i is the rate the rejection sampler should *accept* class i
Let t_i is the target proportion in the minibatches for class i (target_probs)
```
F = sum_i(p_i * (1-a_i))
= 1 - sum_i(p_i * a_i) using sum_i(p_i) = 1
```
An example with class `i` will be accepted if `k` rejections occur, then an
example with class `i` is seen by the rejector, and it is accepted. This can
be written as follows:
```
t_i = sum_k=0^inf(F^k * p_i * a_i)
= p_i * a_j / (1 - F) using geometric series identity, since 0 <= F < 1
= p_i * a_i / sum_j(p_j * a_j) using F from above
```
Note that the following constraints hold:
```
0 <= p_i <= 1, sum_i(p_i) = 1
0 <= a_i <= 1
0 <= t_i <= 1, sum_i(t_i) = 1
```
A solution for a_i in terms of the other variables is the following:
```a_i = (t_i / p_i) / max_i[t_i / p_i]```
If we try to minimize the amount of data rejected, we get the following:
M_max = max_i [ t_i / p_i ]
M_min = min_i [ t_i / p_i ]
The desired probability of accepting data if it comes from class `i`:
a_i = (t_i/p_i - m) / (M_max - m)
The desired probability of pulling a data element from the original dataset,
rather than the filtered one:
m = M_min
Args:
initial_probs: A Tensor of the initial probability distribution, given or
estimated.
target_probs: A Tensor of the corresponding classes.
Returns:
(A 1D Tensor with the per-class acceptance probabilities, the desired
probability of pull from the original distribution.)
"""
ratio_l = _get_target_to_initial_ratio(initial_probs, target_probs)
max_ratio = math_ops.reduce_max(ratio_l)
min_ratio = math_ops.reduce_min(ratio_l)
# Target prob to sample from original distribution.
m = min_ratio
# TODO(joelshor): Simplify fraction, if possible.
a_i = (ratio_l - m) / (max_ratio - m)
return a_i, m
| apache-2.0 |
ColorFuzzy/tornado | maint/vm/windows/bootstrap.py | 13 | 3239 | r"""Installs files needed for tornado testing on windows.
These instructions are compatible with the VMs provided by http://modern.ie.
The bootstrapping script works on the WinXP/IE6 and Win8/IE10 configurations,
although tornado's tests do not pass on XP.
1) Install virtualbox guest additions (from the device menu in virtualbox)
2) Set up a shared folder to the root of your tornado repo. It must be a
read-write mount to use tox, although the tests can be run directly
in a read-only mount. This will probably assign drive letter E:.
3) Install Python 2.7 from python.org.
4) Run this script by double-clicking it, or running
"c:\python27\python.exe bootstrap.py" in a shell.
To run the tests by hand, cd to e:\ and run
c:\python27\python.exe -m tornado.test.runtests
To run the tests with tox, cd to e:\maint\vm\windows and run
c:\python27\scripts\tox
To run under cygwin (which must be installed separately), run
cd /cygdrive/e; python -m tornado.test.runtests
"""
import os
import subprocess
import sys
import urllib
TMPDIR = r'c:\tornado_bootstrap'
PYTHON_VERSIONS = [
(r'c:\python27\python.exe', 'http://www.python.org/ftp/python/2.7.3/python-2.7.3.msi'),
(r'c:\python33\python.exe', 'http://www.python.org/ftp/python/3.3.0/python-3.3.0.msi'),
]
SCRIPTS_DIR = r'c:\python27\scripts'
EASY_INSTALL = os.path.join(SCRIPTS_DIR, 'easy_install.exe')
PY_PACKAGES = ['tox', 'virtualenv', 'pip']
def download_to_cache(url, local_name=None):
if local_name is None:
local_name = url.split('/')[-1]
filename = os.path.join(TMPDIR, local_name)
if not os.path.exists(filename):
data = urllib.urlopen(url).read()
with open(filename, 'wb') as f:
f.write(data)
return filename
def main():
if not os.path.exists(TMPDIR):
os.mkdir(TMPDIR)
os.chdir(TMPDIR)
for exe, url in PYTHON_VERSIONS:
if os.path.exists(exe):
print "%s already exists, skipping" % exe
continue
print "Installing %s" % url
filename = download_to_cache(url)
# http://blog.jaraco.com/2012/01/how-i-install-python-on-windows.html
subprocess.check_call(['msiexec', '/i', filename,
'ALLUSERS=1', '/passive'])
if not os.path.exists(EASY_INSTALL):
filename = download_to_cache('http://python-distribute.org/distribute_setup.py')
subprocess.check_call([sys.executable, filename])
subprocess.check_call([EASY_INSTALL] + PY_PACKAGES)
# cygwin's setup.exe doesn't like being run from a script (looks
# UAC-related). If it did, something like this might install it.
# (install python, python-setuptools, python3, and easy_install
# unittest2 (cygwin's python 2 is 2.6))
#filename = download_to_cache('http://cygwin.com/setup.exe')
#CYGTMPDIR = os.path.join(TMPDIR, 'cygwin')
#if not os.path.exists(CYGTMPDIR):
# os.mkdir(CYGTMPDIR)
## http://www.jbmurphy.com/2011/06/16/powershell-script-to-install-cygwin/
#CYGWIN_ARGS = [filename, '-q', '-l', CYGTMPDIR,
# '-s', 'http://mirror.nyi.net/cygwin/', '-R', r'c:\cygwin']
#subprocess.check_call(CYGWIN_ARGS)
if __name__ == '__main__':
main()
| apache-2.0 |
tealover/nova | nova/tests/unit/api/openstack/compute/test_plugins/basic.py | 46 | 1179 | # Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Basic Test Extension"""
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
ALIAS = 'test-basic'
class BasicController(wsgi.Controller):
def index(self, req):
data = {'param': 'val'}
return data
class Basic(extensions.V3APIExtensionBase):
"""Basic Test Extension."""
name = "BasicTest"
alias = ALIAS
version = 1
def get_resources(self):
resource = extensions.ResourceExtension('test', BasicController())
return [resource]
def get_controller_extensions(self):
return []
| apache-2.0 |
valexandersaulys/airbnb_kaggle_contest | venv/lib/python3.4/site-packages/dateutil/tzwin.py | 111 | 6149 | # This code was originally contributed by Jeffrey Harris.
import datetime
import struct
from six.moves import winreg
__all__ = ["tzwin", "tzwinlocal"]
ONEWEEK = datetime.timedelta(7)
TZKEYNAMENT = r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time Zones"
TZKEYNAME9X = r"SOFTWARE\Microsoft\Windows\CurrentVersion\Time Zones"
TZLOCALKEYNAME = r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation"
def _settzkeyname():
handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
try:
winreg.OpenKey(handle, TZKEYNAMENT).Close()
TZKEYNAME = TZKEYNAMENT
except WindowsError:
TZKEYNAME = TZKEYNAME9X
handle.Close()
return TZKEYNAME
TZKEYNAME = _settzkeyname()
class tzwinbase(datetime.tzinfo):
"""tzinfo class based on win32's timezones available in the registry."""
def utcoffset(self, dt):
if self._isdst(dt):
return datetime.timedelta(minutes=self._dstoffset)
else:
return datetime.timedelta(minutes=self._stdoffset)
def dst(self, dt):
if self._isdst(dt):
minutes = self._dstoffset - self._stdoffset
return datetime.timedelta(minutes=minutes)
else:
return datetime.timedelta(0)
def tzname(self, dt):
if self._isdst(dt):
return self._dstname
else:
return self._stdname
def list():
"""Return a list of all time zones known to the system."""
handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
tzkey = winreg.OpenKey(handle, TZKEYNAME)
result = [winreg.EnumKey(tzkey, i)
for i in range(winreg.QueryInfoKey(tzkey)[0])]
tzkey.Close()
handle.Close()
return result
list = staticmethod(list)
def display(self):
return self._display
def _isdst(self, dt):
if not self._dstmonth:
# dstmonth == 0 signals the zone has no daylight saving time
return False
dston = picknthweekday(dt.year, self._dstmonth, self._dstdayofweek,
self._dsthour, self._dstminute,
self._dstweeknumber)
dstoff = picknthweekday(dt.year, self._stdmonth, self._stddayofweek,
self._stdhour, self._stdminute,
self._stdweeknumber)
if dston < dstoff:
return dston <= dt.replace(tzinfo=None) < dstoff
else:
return not dstoff <= dt.replace(tzinfo=None) < dston
class tzwin(tzwinbase):
def __init__(self, name):
self._name = name
# multiple contexts only possible in 2.7 and 3.1, we still support 2.6
with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle:
with winreg.OpenKey(handle,
"%s\%s" % (TZKEYNAME, name)) as tzkey:
keydict = valuestodict(tzkey)
self._stdname = keydict["Std"].encode("iso-8859-1")
self._dstname = keydict["Dlt"].encode("iso-8859-1")
self._display = keydict["Display"]
# See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm
tup = struct.unpack("=3l16h", keydict["TZI"])
self._stdoffset = -tup[0]-tup[1] # Bias + StandardBias * -1
self._dstoffset = self._stdoffset-tup[2] # + DaylightBias * -1
# for the meaning see the win32 TIME_ZONE_INFORMATION structure docs
# http://msdn.microsoft.com/en-us/library/windows/desktop/ms725481(v=vs.85).aspx
(self._stdmonth,
self._stddayofweek, # Sunday = 0
self._stdweeknumber, # Last = 5
self._stdhour,
self._stdminute) = tup[4:9]
(self._dstmonth,
self._dstdayofweek, # Sunday = 0
self._dstweeknumber, # Last = 5
self._dsthour,
self._dstminute) = tup[12:17]
def __repr__(self):
return "tzwin(%s)" % repr(self._name)
def __reduce__(self):
return (self.__class__, (self._name,))
class tzwinlocal(tzwinbase):
def __init__(self):
with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle:
with winreg.OpenKey(handle, TZLOCALKEYNAME) as tzlocalkey:
keydict = valuestodict(tzlocalkey)
self._stdname = keydict["StandardName"].encode("iso-8859-1")
self._dstname = keydict["DaylightName"].encode("iso-8859-1")
try:
with winreg.OpenKey(
handle, "%s\%s" % (TZKEYNAME, self._stdname)) as tzkey:
_keydict = valuestodict(tzkey)
self._display = _keydict["Display"]
except OSError:
self._display = None
self._stdoffset = -keydict["Bias"]-keydict["StandardBias"]
self._dstoffset = self._stdoffset-keydict["DaylightBias"]
# See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm
tup = struct.unpack("=8h", keydict["StandardStart"])
(self._stdmonth,
self._stddayofweek, # Sunday = 0
self._stdweeknumber, # Last = 5
self._stdhour,
self._stdminute) = tup[1:6]
tup = struct.unpack("=8h", keydict["DaylightStart"])
(self._dstmonth,
self._dstdayofweek, # Sunday = 0
self._dstweeknumber, # Last = 5
self._dsthour,
self._dstminute) = tup[1:6]
def __reduce__(self):
return (self.__class__, ())
def picknthweekday(year, month, dayofweek, hour, minute, whichweek):
"""dayofweek == 0 means Sunday, whichweek 5 means last instance"""
first = datetime.datetime(year, month, 1, hour, minute)
weekdayone = first.replace(day=((dayofweek-first.isoweekday()) % 7+1))
for n in range(whichweek):
dt = weekdayone+(whichweek-n)*ONEWEEK
if dt.month == month:
return dt
def valuestodict(key):
"""Convert a registry key's values to a dictionary."""
dict = {}
size = winreg.QueryInfoKey(key)[1]
for i in range(size):
data = winreg.EnumValue(key, i)
dict[data[0]] = data[1]
return dict
| gpl-2.0 |
Tesora/tesora-project-config | nodepool/scripts/prepare_tempest_testrepository.py | 3 | 1655 | #!/usr/bin/env python2
#
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
from subunit2sql.db import api
from subunit2sql import shell
from subunit2sql import write_subunit
DB_URI = 'mysql+pymysql://query:query@logstash.openstack.org/subunit2sql'
if len(sys.argv) == 2:
TEMPEST_PATH = sys.argv[1]
elif len(sys.argv) > 2:
TEMPEST_PATH = sys.argv[1]
DB_URI = sys.argv[2]
else:
TEMPEST_PATH = '/opt/stack/new/tempest'
def main():
shell.parse_args([])
shell.CONF.set_override('connection', DB_URI, group='database')
session = api.get_session()
runs = api.get_recent_successful_runs_by_run_metadata(
'build_name', 'gate-tempest-dsvm-neutron-full',
num_runs=10, session=session)
session.close()
preseed_path = os.path.join(TEMPEST_PATH, 'preseed-streams')
if not os.path.isdir(preseed_path):
os.mkdir(preseed_path)
for run in runs:
with open(os.path.join(preseed_path, run.uuid + '.subunit'), 'w') as fd:
write_subunit.sql2subunit(run.uuid, fd)
if __name__ == '__main__':
main()
| apache-2.0 |
deniederhut/weather_report | weather_report/classifiers.py | 2 | 4370 | #!/usr/bin/env python
"""
Sentiment classifiers
"""
import collections
import datetime
import json
from nltk import word_tokenize
from nltk.corpus import wordnet as wn
import os
from pkg_resources import resource_string
from textblob import TextBlob
import time
class Classifier(object):
"""MetaClass for classifier objects"""
def __init__(self):
self.data = {}
__type__ = 'meta'
now = datetime.datetime.now()
city = 'Pythopolis'
items = 0
terms = 0
def write(self, filepath):
if not os.path.isfile(filepath):
with open(filepath, 'w') as f:
f.write(','.join([
'city', 'year', 'month', 'mday', 'wday', 'hour', 'source', 'type', 'variable', 'value', 'n_items', 'n_terms'
]))
for variable in self.data:
with open(filepath, 'a') as f:
f.write('\n' + ','.join([str(item) for item in [
self.city,
self.now.year,
self.now.month,
self.now.day,
self.now.weekday(),
self.now.hour,
self.__class__(),
self.type,
variable,
self.data[variable],
self.items,
self.terms
]]))
class CountDict(Classifier):
"""A simple dictionary method for mood analysis"""
def __class__(self):
return "CountDict"
def __init__(self):
f = resource_string(__name__, 'data/emo_dict.json')
lookup = json.loads(f.decode('utf-8'))
self.data = {key:0 for key in lookup}
self.lookup = lookup
self.type = 'count'
def classify(self, text):
"""Count number of matches for emotion categories"""
if type(text) == str:
text = [text]
if type(text) == tuple:
text = list(text)
for item in text:
self.items += 1
self.terms += len(item.split(' '))
for key in self.data:
self.data[key] += len(set(item.lower().split()) & set(self.lookup[key]))
return self
class PolarSummary(Classifier):
"""
A summary of sentiment and subjectivity using pattern's classifier (via TextBlob)
"""
def __class__(self):
return "PolarSummary"
def __init__(self):
self.data = {'polarity':0, 'subjectivity':0}
self.type = 'polarity'
def classify(self, text):
"""Calculate sentiment summary"""
if type(text) == str:
text = [text]
if type(text) == tuple:
text = list(text)
for item in text:
self.items += 1
self.terms += len(item.split(' '))
item = TextBlob(item)
self.data['polarity'] = self.data['polarity'] * self.items/(self.items+1) + item.sentiment.polarity / (self.items+1)
self.data['subjectivity'] = self.data['subjectivity'] * self.items/(self.items+1) + item.sentiment.subjectivity / (self.items+1)
return self
class WordNetDict(Classifier):
"""
Unsupervised mood extraction using WordNet's hypernym paths
"""
def __class__(self):
return "WordNetDict"
def __init__(self):
self.data = {}
self.type = 'count'
self.emotion = wn.synset('emotion.n.01')
def classify(self, text):
"""Count number/kind of emotion terms"""
if type(text) == str:
text = [text]
if type(text) == tuple:
text = list(text)
for item in text:
self.items += 1
self.terms += len(item.split())
for term in word_tokenize(item):
for syn in wn.synsets(term):
for path in syn.hypernym_paths():
if self.emotion in path:
self.update_from_path(path)
return self
def update_from_path(self, path):
index = path.index(self.emotion)
if len(path) > index + 2:
self.inc(self.name_from_synset(path[index + 2]))
else:
pass
def inc(self, key):
if key in self.data:
self.data[key] += 1
else:
self.data.update({key : 1})
@staticmethod
def name_from_synset(syn):
return syn.name().split('.')[0].replace('_', ' ')
| bsd-2-clause |
fitermay/intellij-community | python/helpers/python-skeletons/StringIO.py | 80 | 2737 | """Skeleton for 'StringIO' stdlib module."""
import StringIO as _StringIO
class StringIO(object):
"""Reads and writes a string buffer (also known as memory files)."""
def __init__(self, buffer=None):
"""When a StringIO object is created, it can be initialized to an existing
string by passing the string to the constructor.
:type buffer: T <= bytes | unicode
:rtype: _StringIO.StringIO[T]
"""
self.closed = False
def getvalue(self):
"""Retrieve the entire contents of the "file" at any time before the
StringIO object's close() method is called.
:rtype: T
"""
pass
def close(self):
"""Free the memory buffer.
:rtype: None
"""
pass
def flush(self):
"""Flush the internal buffer.
:rtype: None
"""
pass
def isatty(self):
"""Return True if the file is connected to a tty(-like) device,
else False.
:rtype: bool
"""
return False
def __iter__(self):
"""Return an iterator over lines.
:rtype: _StringIO.StringIO[T]
"""
return self
def next(self):
"""Returns the next input line.
:rtype: T
"""
pass
def read(self, size=-1):
"""Read at most size bytes or characters from the buffer.
:type size: numbers.Integral
:rtype: T
"""
pass
def readline(self, size=-1):
"""Read one entire line from the buffer.
:type size: numbers.Integral
:rtype: T
"""
pass
def readlines(self, sizehint=-1):
"""Read until EOF using readline() and return a list containing the
lines thus read.
:type sizehint: numbers.Integral
:rtype: list[T]
"""
pass
def seek(self, offset, whence=0):
"""Set the buffer's current position, like stdio's fseek().
:type offset: numbers.Integral
:type whence: numbers.Integral
:rtype: None
"""
pass
def tell(self):
"""Return the buffer's current position, like stdio's ftell().
:rtype: int
"""
pass
def truncate(self, size=-1):
"""Truncate the buffer's size.
:type size: numbers.Integral
:rtype: None
"""
pass
def write(self, str):
""""Write bytes or a string to the buffer.
:type str: T
:rtype: None
"""
pass
def writelines(self, sequence):
"""Write a sequence of bytes or strings to the buffer.
:type sequence: collections.Iterable[T]
:rtype: None
"""
pass
| apache-2.0 |
welltempered/rpy2-heroku | rpy/rlike/tests/test_functional.py | 5 | 1117 | import unittest
import itertools
import rpy2.rlike.functional as rlf
class TapplyTestCase(unittest.TestCase):
def testSumByString(self):
seq = ( 1, 2, 3, 4, 5, 6)
tags = ('a', 'b', 'a', 'c', 'b', 'a')
expected = {'a': 1+3+6,
'b': 2+5,
'c': 4}
res = rlf.tapply(seq, tags, sum)
for k, v in res:
self.assertEqual(expected[k], v)
class VectorizeTestCase(unittest.TestCase):
def simpleFunction(self, subject_fun):
def f(x):
return x ** 2
f_iter = subject_fun(f)
seq = (1, 2, 3)
res = f_iter(seq)
for va, vb in itertools.izip(seq, res):
self.assertEqual(va ** 2, vb)
def testIterify(self):
self.simpleFunction(rlf.iterify)
def testListify(self):
self.simpleFunction(rlf.listify)
def suite():
suite = unittest.TestLoader().loadTestsFromTestCase(TapplyTestCase)
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(VectorizeTestCase))
return suite
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
robinro/ansible-modules-extras | network/f5/bigip_pool.py | 32 | 19028 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Matt Hite <mhite@hotmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: bigip_pool
short_description: "Manages F5 BIG-IP LTM pools"
description:
- Manages F5 BIG-IP LTM pools via iControl SOAP API
version_added: 1.2
author:
- Matt Hite (@mhite)
- Tim Rupp (@caphrim007)
notes:
- Requires BIG-IP software version >= 11
- F5 developed module 'bigsuds' required (see http://devcentral.f5.com)
- Best run as a local_action in your playbook
requirements:
- bigsuds
options:
state:
description:
- Pool/pool member state
required: false
default: present
choices:
- present
- absent
aliases: []
name:
description:
- Pool name
required: true
default: null
choices: []
aliases:
- pool
partition:
description:
- Partition of pool/pool member
required: false
default: 'Common'
choices: []
aliases: []
lb_method:
description:
- Load balancing method
version_added: "1.3"
required: False
default: 'round_robin'
choices:
- round_robin
- ratio_member
- least_connection_member
- observed_member
- predictive_member
- ratio_node_address
- least_connection_node_address
- fastest_node_address
- observed_node_address
- predictive_node_address
- dynamic_ratio
- fastest_app_response
- least_sessions
- dynamic_ratio_member
- l3_addr
- weighted_least_connection_member
- weighted_least_connection_node_address
- ratio_session
- ratio_least_connection_member
- ratio_least_connection_node_address
aliases: []
monitor_type:
description:
- Monitor rule type when monitors > 1
version_added: "1.3"
required: False
default: null
choices: ['and_list', 'm_of_n']
aliases: []
quorum:
description:
- Monitor quorum value when monitor_type is m_of_n
version_added: "1.3"
required: False
default: null
choices: []
aliases: []
monitors:
description:
- Monitor template name list. Always use the full path to the monitor.
version_added: "1.3"
required: False
default: null
choices: []
aliases: []
slow_ramp_time:
description:
- Sets the ramp-up time (in seconds) to gradually ramp up the load on
newly added or freshly detected up pool members
version_added: "1.3"
required: False
default: null
choices: []
aliases: []
reselect_tries:
description:
- Sets the number of times the system tries to contact a pool member
after a passive failure
version_added: "2.2"
required: False
default: null
choices: []
aliases: []
service_down_action:
description:
- Sets the action to take when node goes down in pool
version_added: "1.3"
required: False
default: null
choices:
- none
- reset
- drop
- reselect
aliases: []
host:
description:
- "Pool member IP"
required: False
default: null
choices: []
aliases:
- address
port:
description:
- Pool member port
required: False
default: null
choices: []
aliases: []
extends_documentation_fragment: f5
'''
EXAMPLES = '''
- name: Create pool
bigip_pool:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "present"
name: "my-pool"
partition: "Common"
lb_method: "least_connection_member"
slow_ramp_time: 120
delegate_to: localhost
- name: Modify load balancer method
bigip_pool:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "present"
name: "my-pool"
partition: "Common"
lb_method: "round_robin"
- name: Add pool member
bigip_pool:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "present"
name: "my-pool"
partition: "Common"
host: "{{ ansible_default_ipv4["address"] }}"
port: 80
- name: Remove pool member from pool
bigip_pool:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "absent"
name: "my-pool"
partition: "Common"
host: "{{ ansible_default_ipv4["address"] }}"
port: 80
- name: Delete pool
bigip_pool:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "absent"
name: "my-pool"
partition: "Common"
'''
RETURN = '''
'''
def pool_exists(api, pool):
# hack to determine if pool exists
result = False
try:
api.LocalLB.Pool.get_object_status(pool_names=[pool])
result = True
except bigsuds.OperationFailed as e:
if "was not found" in str(e):
result = False
else:
# genuine exception
raise
return result
def create_pool(api, pool, lb_method):
# create requires lb_method but we don't want to default
# to a value on subsequent runs
if not lb_method:
lb_method = 'round_robin'
lb_method = "LB_METHOD_%s" % lb_method.strip().upper()
api.LocalLB.Pool.create_v2(pool_names=[pool], lb_methods=[lb_method],
members=[[]])
def remove_pool(api, pool):
api.LocalLB.Pool.delete_pool(pool_names=[pool])
def get_lb_method(api, pool):
lb_method = api.LocalLB.Pool.get_lb_method(pool_names=[pool])[0]
lb_method = lb_method.strip().replace('LB_METHOD_', '').lower()
return lb_method
def set_lb_method(api, pool, lb_method):
lb_method = "LB_METHOD_%s" % lb_method.strip().upper()
api.LocalLB.Pool.set_lb_method(pool_names=[pool], lb_methods=[lb_method])
def get_monitors(api, pool):
result = api.LocalLB.Pool.get_monitor_association(pool_names=[pool])[0]['monitor_rule']
monitor_type = result['type'].split("MONITOR_RULE_TYPE_")[-1].lower()
quorum = result['quorum']
monitor_templates = result['monitor_templates']
return (monitor_type, quorum, monitor_templates)
def set_monitors(api, pool, monitor_type, quorum, monitor_templates):
monitor_type = "MONITOR_RULE_TYPE_%s" % monitor_type.strip().upper()
monitor_rule = {'type': monitor_type, 'quorum': quorum, 'monitor_templates': monitor_templates}
monitor_association = {'pool_name': pool, 'monitor_rule': monitor_rule}
api.LocalLB.Pool.set_monitor_association(monitor_associations=[monitor_association])
def get_slow_ramp_time(api, pool):
result = api.LocalLB.Pool.get_slow_ramp_time(pool_names=[pool])[0]
return result
def set_slow_ramp_time(api, pool, seconds):
api.LocalLB.Pool.set_slow_ramp_time(pool_names=[pool], values=[seconds])
def get_reselect_tries(api, pool):
result = api.LocalLB.Pool.get_reselect_tries(pool_names=[pool])[0]
return result
def set_reselect_tries(api, pool, tries):
api.LocalLB.Pool.set_reselect_tries(pool_names=[pool], values=[tries])
def get_action_on_service_down(api, pool):
result = api.LocalLB.Pool.get_action_on_service_down(pool_names=[pool])[0]
result = result.split("SERVICE_DOWN_ACTION_")[-1].lower()
return result
def set_action_on_service_down(api, pool, action):
action = "SERVICE_DOWN_ACTION_%s" % action.strip().upper()
api.LocalLB.Pool.set_action_on_service_down(pool_names=[pool], actions=[action])
def member_exists(api, pool, address, port):
# hack to determine if member exists
result = False
try:
members = [{'address': address, 'port': port}]
api.LocalLB.Pool.get_member_object_status(pool_names=[pool],
members=[members])
result = True
except bigsuds.OperationFailed as e:
if "was not found" in str(e):
result = False
else:
# genuine exception
raise
return result
def delete_node_address(api, address):
result = False
try:
api.LocalLB.NodeAddressV2.delete_node_address(nodes=[address])
result = True
except bigsuds.OperationFailed as e:
if "is referenced by a member of pool" in str(e):
result = False
else:
# genuine exception
raise
return result
def remove_pool_member(api, pool, address, port):
members = [{'address': address, 'port': port}]
api.LocalLB.Pool.remove_member_v2(pool_names=[pool], members=[members])
def add_pool_member(api, pool, address, port):
members = [{'address': address, 'port': port}]
api.LocalLB.Pool.add_member_v2(pool_names=[pool], members=[members])
def main():
lb_method_choices = ['round_robin', 'ratio_member',
'least_connection_member', 'observed_member',
'predictive_member', 'ratio_node_address',
'least_connection_node_address',
'fastest_node_address', 'observed_node_address',
'predictive_node_address', 'dynamic_ratio',
'fastest_app_response', 'least_sessions',
'dynamic_ratio_member', 'l3_addr',
'weighted_least_connection_member',
'weighted_least_connection_node_address',
'ratio_session', 'ratio_least_connection_member',
'ratio_least_connection_node_address']
monitor_type_choices = ['and_list', 'm_of_n']
service_down_choices = ['none', 'reset', 'drop', 'reselect']
argument_spec = f5_argument_spec()
meta_args = dict(
name=dict(type='str', required=True, aliases=['pool']),
lb_method=dict(type='str', choices=lb_method_choices),
monitor_type=dict(type='str', choices=monitor_type_choices),
quorum=dict(type='int'),
monitors=dict(type='list'),
slow_ramp_time=dict(type='int'),
reselect_tries=dict(type='int'),
service_down_action=dict(type='str', choices=service_down_choices),
host=dict(type='str', aliases=['address']),
port=dict(type='int')
)
argument_spec.update(meta_args)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
if not bigsuds_found:
module.fail_json(msg="the python bigsuds module is required")
if module.params['validate_certs']:
import ssl
if not hasattr(ssl, 'SSLContext'):
module.fail_json(msg='bigsuds does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task')
server = module.params['server']
server_port = module.params['server_port']
user = module.params['user']
password = module.params['password']
state = module.params['state']
partition = module.params['partition']
validate_certs = module.params['validate_certs']
name = module.params['name']
pool = fq_name(partition, name)
lb_method = module.params['lb_method']
if lb_method:
lb_method = lb_method.lower()
monitor_type = module.params['monitor_type']
if monitor_type:
monitor_type = monitor_type.lower()
quorum = module.params['quorum']
monitors = module.params['monitors']
if monitors:
monitors = []
for monitor in module.params['monitors']:
monitors.append(fq_name(partition, monitor))
slow_ramp_time = module.params['slow_ramp_time']
reselect_tries = module.params['reselect_tries']
service_down_action = module.params['service_down_action']
if service_down_action:
service_down_action = service_down_action.lower()
host = module.params['host']
address = fq_name(partition, host)
port = module.params['port']
# sanity check user supplied values
if (host and port is None) or (port is not None and not host):
module.fail_json(msg="both host and port must be supplied")
if port is not None and (0 > port or port > 65535):
module.fail_json(msg="valid ports must be in range 0 - 65535")
if monitors:
if len(monitors) == 1:
# set default required values for single monitor
quorum = 0
monitor_type = 'single'
elif len(monitors) > 1:
if not monitor_type:
module.fail_json(msg="monitor_type required for monitors > 1")
if monitor_type == 'm_of_n' and not quorum:
module.fail_json(msg="quorum value required for monitor_type m_of_n")
if monitor_type != 'm_of_n':
quorum = 0
elif monitor_type:
# no monitors specified but monitor_type exists
module.fail_json(msg="monitor_type require monitors parameter")
elif quorum is not None:
# no monitors specified but quorum exists
module.fail_json(msg="quorum requires monitors parameter")
try:
api = bigip_api(server, user, password, validate_certs, port=server_port)
result = {'changed': False} # default
if state == 'absent':
if host and port and pool:
# member removal takes precedent
if pool_exists(api, pool) and member_exists(api, pool, address, port):
if not module.check_mode:
remove_pool_member(api, pool, address, port)
deleted = delete_node_address(api, address)
result = {'changed': True, 'deleted': deleted}
else:
result = {'changed': True}
elif pool_exists(api, pool):
# no host/port supplied, must be pool removal
if not module.check_mode:
# hack to handle concurrent runs of module
# pool might be gone before we actually remove it
try:
remove_pool(api, pool)
result = {'changed': True}
except bigsuds.OperationFailed as e:
if "was not found" in str(e):
result = {'changed': False}
else:
# genuine exception
raise
else:
# check-mode return value
result = {'changed': True}
elif state == 'present':
update = False
if not pool_exists(api, pool):
# pool does not exist -- need to create it
if not module.check_mode:
# a bit of a hack to handle concurrent runs of this module.
# even though we've checked the pool doesn't exist,
# it may exist by the time we run create_pool().
# this catches the exception and does something smart
# about it!
try:
create_pool(api, pool, lb_method)
result = {'changed': True}
except bigsuds.OperationFailed as e:
if "already exists" in str(e):
update = True
else:
# genuine exception
raise
else:
if monitors:
set_monitors(api, pool, monitor_type, quorum, monitors)
if slow_ramp_time:
set_slow_ramp_time(api, pool, slow_ramp_time)
if reselect_tries:
set_reselect_tries(api, pool, reselect_tries)
if service_down_action:
set_action_on_service_down(api, pool, service_down_action)
if host and port:
add_pool_member(api, pool, address, port)
else:
# check-mode return value
result = {'changed': True}
else:
# pool exists -- potentially modify attributes
update = True
if update:
if lb_method and lb_method != get_lb_method(api, pool):
if not module.check_mode:
set_lb_method(api, pool, lb_method)
result = {'changed': True}
if monitors:
t_monitor_type, t_quorum, t_monitor_templates = get_monitors(api, pool)
if (t_monitor_type != monitor_type) or (t_quorum != quorum) or (set(t_monitor_templates) != set(monitors)):
if not module.check_mode:
set_monitors(api, pool, monitor_type, quorum, monitors)
result = {'changed': True}
if slow_ramp_time and slow_ramp_time != get_slow_ramp_time(api, pool):
if not module.check_mode:
set_slow_ramp_time(api, pool, slow_ramp_time)
result = {'changed': True}
if reselect_tries and reselect_tries != get_reselect_tries(api, pool):
if not module.check_mode:
set_reselect_tries(api, pool, reselect_tries)
result = {'changed': True}
if service_down_action and service_down_action != get_action_on_service_down(api, pool):
if not module.check_mode:
set_action_on_service_down(api, pool, service_down_action)
result = {'changed': True}
if (host and port) and not member_exists(api, pool, address, port):
if not module.check_mode:
add_pool_member(api, pool, address, port)
result = {'changed': True}
if (host and port == 0) and not member_exists(api, pool, address, port):
if not module.check_mode:
add_pool_member(api, pool, address, port)
result = {'changed': True}
except Exception as e:
module.fail_json(msg="received exception: %s" % e)
module.exit_json(**result)
from ansible.module_utils.basic import *
from ansible.module_utils.f5 import *
if __name__ == '__main__':
main()
| gpl-3.0 |
copperleaftech/django-import-export | tests/core/models.py | 2 | 2427 | from __future__ import unicode_literals
import random
import string
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Author(models.Model):
name = models.CharField(max_length=100)
birthday = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Category(models.Model):
name = models.CharField(
max_length=100,
unique=True,
)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Book(models.Model):
name = models.CharField('Book name', max_length=100)
author = models.ForeignKey(Author, blank=True, null=True, on_delete=models.CASCADE)
author_email = models.EmailField('Author email', max_length=75, blank=True)
imported = models.BooleanField(default=False)
published = models.DateField('Published', blank=True, null=True)
published_time = models.TimeField('Time published', blank=True, null=True)
price = models.DecimalField(max_digits=10, decimal_places=2, null=True,
blank=True)
categories = models.ManyToManyField(Category, blank=True)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Parent(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Child(models.Model):
parent = models.ForeignKey(Parent, on_delete=models.CASCADE)
name = models.CharField(max_length=100)
def __str__(self):
return '%s - child of %s' % (self.name, self.parent.name)
class Profile(models.Model):
user = models.OneToOneField('auth.User', on_delete=models.CASCADE)
is_private = models.BooleanField(default=True)
class Entry(models.Model):
user = models.ForeignKey('auth.User', on_delete=models.CASCADE)
class WithDefault(models.Model):
name = models.CharField('Default', max_length=75, blank=True,
default='foo_bar')
def random_name():
chars = string.ascii_lowercase
return ''.join(random.SystemRandom().choice(chars) for _ in range(100))
class WithDynamicDefault(models.Model):
name = models.CharField('Dyn Default', max_length=100,
default=random_name)
class WithFloatField(models.Model):
f = models.FloatField(blank=True, null=True)
| bsd-2-clause |
sahana/Turkey | modules/templates/IFRC/config.py | 4 | 201775 | # -*- coding: utf-8 -*-
import datetime
try:
# Python 2.7
from collections import OrderedDict
except:
# Python 2.6
from gluon.contrib.simplejson.ordered_dict import OrderedDict
from gluon import current
from gluon.storage import Storage
from controllers import deploy_index
def config(settings):
"""
Template settings for IFRC's Resource Management System
http://eden.sahanafoundation.org/wiki/Deployments/IFRC
"""
T = current.T
# -----------------------------------------------------------------------------
# Pre-Populate
#settings.base.prepopulate += ("IFRC", "IFRC/Train", "IFRC/Demo")
settings.base.prepopulate += ("IFRC", "IFRC/Train")
settings.base.system_name = T("Resource Management System")
settings.base.system_name_short = T("RMS")
# =============================================================================
# System Settings
# -----------------------------------------------------------------------------
# Security Policy
settings.security.policy = 8 # Delegations
settings.security.map = True
# Authorization Settings
settings.auth.registration_requires_approval = True
settings.auth.registration_requires_verification = True
settings.auth.registration_requests_organisation = True
settings.auth.registration_organisation_required = True
settings.auth.registration_requests_site = True
settings.auth.registration_link_user_to = {"staff": T("Staff"),
"volunteer": T("Volunteer"),
"member": T("Member")
}
settings.auth.record_approval = True
# @ToDo: Should we fallback to organisation_id if site_id is None?
settings.auth.registration_roles = {"site_id": ["reader",
],
}
# Owner Entity
settings.auth.person_realm_human_resource_site_then_org = True
settings.auth.person_realm_member_org = True
# Activate entity role manager tabs for OrgAdmins
settings.auth.entity_role_manager = True
def ifrc_realm_entity(table, row):
"""
Assign a Realm Entity to records
"""
tablename = table._tablename
# Do not apply realms for Master Data
# @ToDo: Restore Realms and add a role/functionality support for Master Data
if tablename in ("hrm_certificate",
"hrm_department",
"hrm_job_title",
"hrm_course",
"hrm_programme",
"member_membership_type",
"vol_award",
):
return None
db = current.db
s3db = current.s3db
# Entity reference fields
EID = "pe_id"
OID = "organisation_id"
SID = "site_id"
#GID = "group_id"
PID = "person_id"
# Owner Entity Foreign Key
realm_entity_fks = dict(pr_contact = [("org_organisation", EID),
("po_household", EID),
("pr_person", EID),
],
pr_contact_emergency = EID,
pr_physical_description = EID,
pr_address = [("org_organisation", EID),
("pr_person", EID),
],
pr_image = EID,
pr_identity = PID,
pr_education = PID,
pr_group = OID,
pr_note = PID,
hrm_human_resource = SID,
hrm_training = PID,
inv_recv = SID,
inv_send = SID,
inv_track_item = "track_org_id",
inv_adj_item = "adj_id",
req_req_item = "req_id",
org_capacity_assessment_data = "assessment_id",
po_household = "area_id",
po_organisation_area = "area_id",
)
# Default Foreign Keys (ordered by priority)
default_fks = ("household_id",
"catalog_id",
"project_id",
"project_location_id",
)
# Link Tables
#realm_entity_link_table = dict(
# project_task = Storage(tablename = "project_task_project",
# link_key = "task_id"
# )
# )
#if tablename in realm_entity_link_table:
# # Replace row with the record from the link table
# link_table = realm_entity_link_table[tablename]
# table = s3db[link_table.tablename]
# rows = db(table[link_table.link_key] == row.id).select(table.id,
# limitby=(0, 1))
# if rows:
# # Update not Create
# row = rows.first()
# Check if there is a FK to inherit the realm_entity
realm_entity = 0
fk = realm_entity_fks.get(tablename, None)
fks = [fk] if not isinstance(fk, list) else list(fk)
fks.extend(default_fks)
for default_fk in fks:
if isinstance(default_fk, tuple):
instance_type, fk = default_fk
else:
instance_type, fk = None, default_fk
if fk not in table.fields:
continue
# Inherit realm_entity from parent record
if fk == EID:
if instance_type:
ftable = s3db.table(instance_type)
if not ftable:
continue
else:
ftable = s3db.pr_person
query = (ftable[EID] == row[EID])
else:
ftablename = table[fk].type[10:] # reference tablename
ftable = s3db[ftablename]
query = (table.id == row.id) & \
(table[fk] == ftable.id)
record = db(query).select(ftable.realm_entity,
limitby=(0, 1)).first()
if record:
realm_entity = record.realm_entity
break
#else:
# Continue to loop through the rest of the default_fks
# Fall back to default get_realm_entity function
use_user_organisation = False
#use_user_root_organisation = False
# Suppliers & Partners are owned by the user's organisation
# @note: when the organisation record is first written, no
# type-link would exist yet, so this needs to be
# called again every time the type-links for an
# organisation change in order to be effective
if realm_entity == 0 and tablename == "org_organisation":
ottable = s3db.org_organisation_type
ltable = db.org_organisation_organisation_type
query = (ltable.organisation_id == row.id) & \
(ottable.id == ltable.organisation_type_id) & \
(ottable.name == "Red Cross / Red Crescent")
rclink = db(query).select(ltable.id, limitby=(0, 1)).first()
if not rclink:
use_user_organisation = True
# Facilities & Requisitions are owned by the user's organisation
elif tablename in ("org_facility", "req_req"):
use_user_organisation = True
elif tablename == "hrm_training":
# Inherit realm entity from the related HR record
htable = s3db.hrm_human_resource
query = (table.id == row.id) & \
(htable.person_id == table.person_id) & \
(htable.deleted != True)
rows = db(query).select(htable.realm_entity, limitby=(0, 2))
if len(rows) == 1:
realm_entity = rows.first().realm_entity
else:
# Ambiguous => try course organisation
ctable = s3db.hrm_course
otable = s3db.org_organisation
query = (table.id == row.id) & \
(ctable.id == table.course_id) & \
(otable.id == ctable.organisation_id)
row = db(query).select(otable.pe_id,
limitby=(0, 1)).first()
if row:
realm_entity = row.pe_id
# otherwise: inherit from the person record
elif realm_entity == 0 and tablename == "pr_group":
# Groups are owned by the user's organisation if not linked to an Organisation directly
use_user_organisation = True
auth = current.auth
user = auth.user
if user:
if use_user_organisation:
# @ToDo - this might cause issues if the user's org is different from the realm that gave them permissions to create the Org
realm_entity = s3db.pr_get_pe_id("org_organisation",
user.organisation_id)
#elif use_user_root_organisation:
# realm_entity = s3db.pr_get_pe_id("org_organisation",
# auth.root_org())
return realm_entity
settings.auth.realm_entity = ifrc_realm_entity
# -----------------------------------------------------------------------------
# Theme (folder to use for views/layout.html)
settings.base.theme = "IFRC"
settings.base.xtheme = "IFRC/xtheme-ifrc.css"
# Formstyle
settings.ui.formstyle = "table"
settings.ui.filter_formstyle = "table_inline"
# Uncomment to disable responsive behavior of datatables
settings.ui.datatables_responsive = False
settings.ui.custom_icons = {
"male": "icon-male",
"female": "icon-female",
"medical": "icon-plus-sign-alt",
}
settings.gis.map_height = 600
settings.gis.map_width = 869
# Display Resources recorded to Admin-Level Locations on the map
# @ToDo: Move into gis_config?
settings.gis.display_L0 = True
# GeoNames username
settings.gis.geonames_username = "rms_dev"
# -----------------------------------------------------------------------------
# L10n (Localization) settings
settings.L10n.languages = OrderedDict([
("ar", "العربية"),
("en-gb", "English"),
("es", "Español"),
("fr", "Français"),
("km", "ភាសាខ្មែរ"), # Khmer
("mg", "Мalagasy"),
("mn", "Монгол хэл"), # Mongolian
("my", "မြန်မာစာ"), # Burmese
("ne", "नेपाली"), # Nepali
("prs", "دری"), # Dari
("ps", "پښتو"), # Pashto
#("th", "ภาษาไทย"), # Thai
("vi", "Tiếng Việt"), # Vietnamese
("zh-cn", "中文 (简体)"),
])
# Default Language
settings.L10n.default_language = "en-gb"
# Default timezone for users
settings.L10n.utc_offset = "+0700"
# Number formats (defaults to ISO 31-0)
# Decimal separator for numbers (defaults to ,)
settings.L10n.decimal_separator = "."
# Thousands separator for numbers (defaults to space)
settings.L10n.thousands_separator = ","
# Unsortable 'pretty' date format (for use in English)
# For a potential way to sort, see http://datatables.net/manual/orthogonal-data
settings.L10n.date_format = "%d-%b-%Y"
# Uncomment this to Translate Layer Names
settings.L10n.translate_gis_layer = True
# Translate Location Names
settings.L10n.translate_gis_location = True
# Uncomment this for Alternate Location Names
settings.L10n.name_alt_gis_location = True
# Uncomment this to Translate Organisation Names/Acronyms
settings.L10n.translate_org_organisation = True
# Names of Orgs with specific settings
ARCS = "Afghan Red Crescent Society"
AURC = "Australian Red Cross"
BRCS = "Bangladesh Red Crescent Society"
CRMADA = "Malagasy Red Cross Society"
CVTL = "Timor-Leste Red Cross Society (Cruz Vermelha de Timor-Leste)"
IRCS = "Iraqi Red Crescent Society"
NRCS = "Nepal Red Cross Society"
NZRC = "New Zealand Red Cross"
PMI = "Indonesian Red Cross Society (Palang Merah Indonesia)"
PRC = "Philippine Red Cross"
VNRC = "Viet Nam Red Cross"
# -----------------------------------------------------------------------------
def airegex(default):
""" NS-specific settings for accent-insensitive searching """
root_org = current.auth.root_org_name()
if root_org == VNRC:
return True
else:
return False
settings.database.airegex = airegex
# -----------------------------------------------------------------------------
# Finance settings
#
def currencies(default):
""" RMS- and NS-specific currencies """
# Currencies that are common for all NS
currencies = {"EUR" : "Euros",
"CHF" : "Swiss Francs",
"USD" : "United States Dollars",
}
# NS-specific currencies
root_org = current.auth.root_org_name()
if root_org == ARCS:
currencies["AFN"] = "Afghani"
elif root_org == AURC:
currencies["AUD"] = "Australian Dollars"
elif root_org == BRCS:
currencies["BDT"] = "Taka"
elif root_org == CRMADA:
currencies["CAD"] = "Canadian Dollars"
currencies["MGA"] = "Malagasy Ariary"
currencies["NOK"] = "Norwegian Krone"
elif root_org == IRCS:
currencies["IQD"] = "Iraqi Dinar"
elif root_org == NRCS:
currencies["NPR"] = "Nepalese Rupee"
elif root_org == NZRC:
currencies["NZD"] = "New Zealand Dollars"
elif root_org == PMI:
currencies["IDR"] = "Indonesian Rupiah"
elif root_org == PRC:
currencies["PHP"] = "Philippine Pesos"
elif root_org == VNRC:
currencies["VND"] = "Vietnamese Dong"
else:
currencies["GBP"] = "Great British Pounds"
currencies["CAD"] = "Canadian Dollars"
return currencies
settings.fin.currencies = currencies
def currency_default(default):
""" NS-specific default currencies """
root_org = current.auth.root_org_name()
if root_org == ARCS:
default = "AFN"
elif root_org == AURC:
default = "AUD"
elif root_org == BRCS:
default = "BDT"
elif root_org == CRMADA:
default = "MGA"
elif root_org == IRCS:
default = "IQD"
elif root_org == NRCS:
default = "NPR"
elif root_org == NZRC:
default = "NZD"
elif root_org == PMI:
default = "IDR"
elif root_org == PRC:
default = "PHP"
elif root_org == VNRC:
default = "VND"
#else:
#default = "USD"
return default
settings.fin.currency_default = currency_default
# -----------------------------------------------------------------------------
def pdf_bidi(default):
""" NS-specific selection of whether to support BiDi in PDF output """
root_org = current.auth.root_org_name()
if root_org in (ARCS, IRCS):
default = True
return default
settings.L10n.pdf_bidi = pdf_bidi
# -----------------------------------------------------------------------------
def pdf_export_font(default):
""" NS-specific selection of which font to use in PDF output """
root_org = current.auth.root_org_name()
if root_org in (ARCS, IRCS):
# Use Unifont even in English since there is data stored with non-English characters
default = ["unifont", "unifont"]
return default
settings.L10n.pdf_export_font = pdf_export_font
# -----------------------------------------------------------------------------
def postcode_selector(default):
""" NS-specific selection of whether to show Postcode """
root_org = current.auth.root_org_name()
if root_org in (ARCS, IRCS, VNRC):
default = False
return default
settings.gis.postcode_selector = postcode_selector
# -----------------------------------------------------------------------------
def label_fullname(default):
""" NS-specific selection of label for the AddPersonWidget2's Name field """
if current.session.s3.language == "mg":
# Allow for better localisation
default = "Full Name"
return default
settings.pr.label_fullname = label_fullname
# -----------------------------------------------------------------------------
# Enable this for a UN-style deployment
#settings.ui.cluster = True
# Enable this to use the label 'Camp' instead of 'Shelter'
settings.ui.camp = True
# -----------------------------------------------------------------------------
# Filter Manager
settings.search.filter_manager = False
# -----------------------------------------------------------------------------
# Default Summary
settings.ui.summary = ({"common": True,
"name": "add",
"widgets": [{"method": "create"}],
},
{"name": "table",
"label": "Table",
"widgets": [{"method": "datatable"}],
},
{"name": "charts",
"label": "Report",
"widgets": [{"method": "report", "ajax_init": True}],
},
{"name": "map",
"label": "Map",
"widgets": [{"method": "map", "ajax_init": True}],
},
)
# -----------------------------------------------------------------------------
# Content Management
#
# Uncomment this to hide CMS from module index pages
settings.cms.hide_index = True
settings.cms.richtext = True
# -----------------------------------------------------------------------------
# Messaging
# Parser
settings.msg.parser = "IFRC"
# =============================================================================
# Module Settings
# -----------------------------------------------------------------------------
# Organisation Management
# Enable the use of Organisation Branches
settings.org.branches = True
# Set the length of the auto-generated org/site code the default is 10
settings.org.site_code_len = 3
# Set the label for Sites
settings.org.site_label = "Office/Warehouse/Facility"
# Enable certain fields just for specific Organisations
# @ToDo: Make these Lazy settings
settings.org.dependent_fields = \
{"pr_person.middle_name" : (CVTL, VNRC),
"pr_person_details.mother_name" : (BRCS, ),
"pr_person_details.father_name" : (ARCS, BRCS, IRCS),
"pr_person_details.grandfather_name" : (ARCS, IRCS),
"pr_person_details.year_of_birth" : (ARCS, ),
"pr_person_details.affiliations" : (PRC, ),
"pr_person_details.company" : (PRC, ),
"vol_details.card" : (ARCS, ),
"vol_volunteer_cluster.vol_cluster_type_id" : (PRC, ),
"vol_volunteer_cluster.vol_cluster_id" : (PRC, ),
"vol_volunteer_cluster.vol_cluster_position_id" : (PRC, ),
}
# -----------------------------------------------------------------------------
# Human Resource Management
# Uncomment to allow Staff & Volunteers to be registered without an email address
settings.hrm.email_required = False
# Uncomment to filter certificates by (root) Organisation & hence not allow Certificates from other orgs to be added to a profile (except by Admin)
settings.hrm.filter_certificates = True
# Uncomment to show the Organisation name in HR represents
settings.hrm.show_organisation = True
# Uncomment to allow HRs to have multiple Job Titles
settings.hrm.multiple_job_titles = True
# Uncomment to have each root Org use a different Job Title Catalog
settings.hrm.org_dependent_job_titles = True
# Uncomment to disable the use of HR Credentials
settings.hrm.use_credentials = False
# Uncomment to enable the use of HR Education
settings.hrm.use_education = True
# Custom label for Organisations in HR module
settings.hrm.organisation_label = "National Society / Branch"
# Uncomment to consolidate tabs into a single CV
settings.hrm.cv_tab = True
# Uncomment to consolidate tabs into Staff Record (set to False to hide the tab)
settings.hrm.record_tab = "record"
# Uncomment to do a search for duplicates in AddPersonWidget2
settings.pr.lookup_duplicates = True
# RDRT
settings.deploy.hr_label = "Member"
settings.customise_deploy_home = deploy_index
# Enable the use of Organisation Regions
settings.org.regions = True
# Make Organisation Regions Hierarchical
settings.org.regions_hierarchical = True
# Uncomment to allow hierarchical categories of Skills, which each need their own set of competency levels.
settings.hrm.skill_types = True
# RDRT overrides these within controller:
# Uncomment to disable Staff experience
settings.hrm.staff_experience = False
# Activity types for experience record
settings.hrm.activity_types = {"rdrt": "RDRT Mission"}
# -----------------------------------------------------------------------------
# Projects
# Uncomment this to use settings suitable for a global/regional organisation (e.g. DRR)
settings.project.mode_3w = True
# Uncomment this to use DRR (Disaster Risk Reduction) extensions
settings.project.mode_drr = True
# Uncomment this to use Activity Types for Activities & Projects
settings.project.activity_types = True
# Uncomment this to use Codes for projects
settings.project.codes = True
# Uncomment this to call project locations 'Communities'
settings.project.community = True
# Uncomment this to enable Demographics in 3W projects
settings.project.demographics = True
# Uncomment this to enable Hazards in 3W projects
settings.project.hazards = True
# Uncomment this to use multiple Budgets per project
settings.project.multiple_budgets = True
# Uncomment this to use multiple Organisations per project
settings.project.multiple_organisations = True
# Uncomment this to enable Programmes in projects
settings.project.programmes = True
# Uncomment this to enable Themes in 3W projects
settings.project.themes = True
# Uncomment this to customise
# Links to Filtered Components for Donors & Partners
settings.project.organisation_roles = {
1: T("Host National Society"),
2: T("Partner"),
3: T("Donor"),
#4: T("Customer"), # T("Beneficiary")?
#5: T("Supplier"),
9: T("Partner National Society"),
}
# -----------------------------------------------------------------------------
# Inventory Management
settings.inv.show_mode_of_transport = True
settings.inv.send_show_time_in = True
#settings.inv.collapse_tabs = True
# Uncomment if you need a simpler (but less accountable) process for managing stock levels
settings.inv.direct_stock_edits = True
settings.inv.org_dependent_warehouse_types = True
# Settings for HNRC:
#settings.inv.stock_count = False
#settings.inv.item_status = {#0: current.messages["NONE"], # Not defined yet
# 0: T("Good"),
# 1: T("Damaged"),
# #1: T("Dump"),
# #2: T("Sale"),
# #3: T("Reject"),
# #4: T("Surplus")
# }
#settings.inv.recv_types = {#0: current.messages["NONE"], In Shipment Types
# #11: T("Internal Shipment"), In Shipment Types
# 32: T("Donation"),
# 34: T("Purchase"),
# 36: T("Consignment"), # Borrowed
# 37: T("In Transit"), # Loaning warehouse space to another agency
# }
# -----------------------------------------------------------------------------
# Request Management
# Uncomment to disable Inline Forms in Requests module
settings.req.inline_forms = False
settings.req.req_type = ["Stock"]
settings.req.use_commit = False
# Should Requests ask whether Transportation is required?
settings.req.ask_transport = True
settings.req.pack_values = False
# Disable Request Matching as we don't want users making requests to see what stock is available
#settings.req.prompt_match = False # HNRC
# Uncomment to disable Recurring Request
#settings.req.recurring = False # HNRC
# =============================================================================
# Template Modules
# Comment/uncomment modules here to disable/enable them
settings.modules = OrderedDict([
# Core modules which shouldn't be disabled
("default", Storage(
name_nice = "RMS",
restricted = False, # Use ACLs to control access to this module
access = None, # All Users (inc Anonymous) can see this module in the default menu & access the controller
#module_type = None # This item is not shown in the menu
)),
("admin", Storage(
name_nice = T("Administration"),
#description = "Site Administration",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
#module_type = None # This item is handled separately for the menu
)),
("appadmin", Storage(
name_nice = T("Administration"),
#description = "Site Administration",
restricted = True,
#module_type = None # No Menu
)),
("errors", Storage(
name_nice = T("Ticket Viewer"),
#description = "Needed for Breadcrumbs",
restricted = False,
#module_type = None # No Menu
)),
("sync", Storage(
name_nice = T("Synchronization"),
#description = "Synchronization",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
#module_type = None # This item is handled separately for the menu
)),
("translate", Storage(
name_nice = T("Translation Functionality"),
#description = "Selective translation of strings based on module.",
#module_type = None,
)),
# Uncomment to enable internal support requests
("support", Storage(
name_nice = T("Support"),
#description = "Support Requests",
restricted = True,
#module_type = None # This item is handled separately for the menu
)),
("gis", Storage(
name_nice = T("Map"),
#description = "Situation Awareness & Geospatial Analysis",
restricted = True,
#module_type = 6, # 6th item in the menu
)),
("pr", Storage(
name_nice = T("Person Registry"),
#description = "Central point to record details on People",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu (access to controller is possible to all still)
#module_type = 10
)),
("org", Storage(
name_nice = T("Organizations"),
#description = 'Lists "who is doing what & where". Allows relief agencies to coordinate their activities',
restricted = True,
#module_type = 1
)),
# All modules below here should be possible to disable safely
("hrm", Storage(
name_nice = T("Staff"),
#description = "Human Resources Management",
restricted = True,
#module_type = 2,
)),
("vol", Storage(
name_nice = T("Volunteers"),
#description = "Human Resources Management",
restricted = True,
#module_type = 2,
)),
("cms", Storage(
name_nice = T("Content Management"),
#description = "Content Management System",
restricted = True,
module_type = None,
)),
("doc", Storage(
name_nice = T("Documents"),
#description = "A library of digital resources, such as photos, documents and reports",
restricted = True,
#module_type = 10,
)),
("msg", Storage(
name_nice = T("Messaging"),
#description = "Sends & Receives Alerts via Email & SMS",
restricted = True,
# The user-visible functionality of this module isn't normally required. Rather it's main purpose is to be accessed from other modules.
#module_type = None,
)),
("supply", Storage(
name_nice = T("Supply Chain Management"),
#description = "Used within Inventory Management, Request Management and Asset Management",
restricted = True,
#module_type = None, # Not displayed
)),
("inv", Storage(
name_nice = T("Warehouses"),
#description = "Receiving and Sending Items",
restricted = True,
#module_type = 4
)),
("asset", Storage(
name_nice = T("Assets"),
#description = "Recording and Assigning Assets",
restricted = True,
#module_type = 5,
)),
("req", Storage(
name_nice = T("Requests"),
#description = "Manage requests for supplies, assets, staff or other resources. Matches against Inventories where supplies are requested.",
restricted = True,
#module_type = 10,
)),
("project", Storage(
name_nice = T("Projects"),
#description = "Tracking of Projects, Activities and Tasks",
restricted = True,
#module_type = 2
)),
("budget", Storage(
name_nice = T("Budgets"),
#description = "Tracking of Budgets",
restricted = True,
#module_type = None
)),
("survey", Storage(
name_nice = T("Assessments"),
#description = "Create, enter, and manage surveys.",
restricted = True,
#module_type = 5,
)),
("event", Storage(
name_nice = T("Events"),
#description = "Events",
restricted = True,
#module_type = 10
)),
("member", Storage(
name_nice = T("Members"),
#description = "Membership Management System",
restricted = True,
#module_type = 10,
)),
("deploy", Storage(
name_nice = T("Regional Disaster Response Teams"),
#description = "Alerting and Deployment of Disaster Response Teams",
restricted = True,
#module_type = 10,
)),
("dvr", Storage(
name_nice = T("Disaster Victim Registry"),
#description = "Population Outreach",
restricted = True,
#module_type = 10,
)),
("po", Storage(
name_nice = T("Recovery Outreach"),
#description = "Population Outreach",
restricted = True,
#module_type = 10,
)),
("stats", Storage(
name_nice = T("Statistics"),
#description = "Manages statistics",
restricted = True,
#module_type = None,
)),
("vulnerability", Storage(
name_nice = T("Vulnerability"),
#description = "Manages vulnerability indicators",
restricted = True,
#module_type = 10,
)),
])
# -------------------------------------------------------------------------
# Functions which are local to this Template
# -------------------------------------------------------------------------
def ns_only(tablename,
fieldname = "organisation_id",
required = True,
branches = True,
updateable = True,
limit_filter_opts = True,
hierarchy = True,
):
"""
Function to configure an organisation_id field to be restricted to just
NS/Branch
@param required: Field is mandatory
@param branches: Include Branches
@param updateable: Limit to Orgs which the user can update
@param limit_filter_opts: Also limit the Filter options
@param hierarchy: Use the hierarchy widget (unsuitable for use in Inline Components)
NB If limit_filter_opts=True, apply in customise_xx_controller inside prep,
after standard_prep is run
"""
# Lookup organisation_type_id for Red Cross
db = current.db
s3db = current.s3db
ttable = s3db.org_organisation_type
try:
type_id = db(ttable.name == "Red Cross / Red Crescent").select(ttable.id,
limitby=(0, 1),
cache = s3db.cache,
).first().id
except:
# No IFRC prepop done - skip (e.g. testing impacts of CSS changes in this theme)
return
# Load standard model
f = s3db[tablename][fieldname]
if limit_filter_opts:
# Find the relevant filter widget & limit it's options
filter_widgets = s3db.get_config(tablename, "filter_widgets")
filter_widget = None
if filter_widgets:
from s3 import FS, S3HierarchyFilter
for w in filter_widgets:
if isinstance(w, S3HierarchyFilter) and \
w.field == "organisation_id":
filter_widget = w
break
if filter_widget is not None:
selector = FS("organisation_organisation_type.organisation_type_id")
filter_widget.opts["filter"] = (selector == type_id)
# Label
if branches:
f.label = T("National Society / Branch")
else:
f.label = T("National Society")
# Requires
# Filter by type
ltable = db.org_organisation_organisation_type
rows = db(ltable.organisation_type_id == type_id).select(ltable.organisation_id)
filter_opts = [row.organisation_id for row in rows]
auth = current.auth
s3_has_role = auth.s3_has_role
Admin = s3_has_role("ADMIN")
if branches:
if Admin:
parent = True
else:
# @ToDo: Set the represent according to whether the user can see resources of just a single NS or multiple
# @ToDo: Consider porting this into core
user = auth.user
if user:
realms = user.realms
#delegations = user.delegations
if realms:
parent = True
else:
parent = False
else:
parent = True
else:
# Keep the represent function as simple as possible
parent = False
# Exclude branches
btable = s3db.org_organisation_branch
rows = db((btable.deleted != True) &
(btable.branch_id.belongs(filter_opts))).select(btable.branch_id)
filter_opts = list(set(filter_opts) - set(row.branch_id for row in rows))
organisation_represent = s3db.org_OrganisationRepresent
represent = organisation_represent(parent=parent)
f.represent = represent
from s3 import IS_ONE_OF
requires = IS_ONE_OF(db, "org_organisation.id",
represent,
filterby = "id",
filter_opts = filter_opts,
updateable = updateable,
orderby = "org_organisation.name",
sort = True)
if not required:
from gluon import IS_EMPTY_OR
requires = IS_EMPTY_OR(requires)
f.requires = requires
if parent and hierarchy:
# Use hierarchy-widget
from s3 import FS, S3HierarchyWidget
# No need for parent in represent (it's a hierarchy view)
node_represent = organisation_represent(parent=False)
# Filter by type
# (no need to exclude branches - we wouldn't be here if we didn't use branches)
selector = FS("organisation_organisation_type.organisation_type_id")
f.widget = S3HierarchyWidget(lookup="org_organisation",
filter=(selector == type_id),
represent=node_represent,
multiple=False,
leafonly=False,
)
else:
# Dropdown not Autocomplete
f.widget = None
# Comment
if (Admin or s3_has_role("ORG_ADMIN")):
# Need to do import after setting Theme
from s3layouts import S3PopupLink
from s3 import S3ScriptItem
add_link = S3PopupLink(c = "org",
f = "organisation",
vars = {"organisation_type.name":"Red Cross / Red Crescent"},
label = T("Create National Society"),
title = T("National Society"),
)
comment = f.comment
if not comment or isinstance(comment, S3PopupLink):
f.comment = add_link
elif isinstance(comment[1], S3ScriptItem):
# Don't overwrite scripts
f.comment[0] = add_link
else:
f.comment = add_link
else:
# Not allowed to add NS/Branch
f.comment = ""
# -----------------------------------------------------------------------------
def user_org_default_filter(selector, tablename=None):
"""
Default filter for organisation_id:
* Use the user's organisation if logged-in and associated with an
organisation.
"""
auth = current.auth
user_org_id = auth.is_logged_in() and auth.user.organisation_id
if user_org_id:
return user_org_id
else:
# no default
return {}
# -----------------------------------------------------------------------------
def user_org_and_children_default_filter(selector, tablename=None):
"""
Default filter for organisation_id:
* Use the user's organisation if logged-in and associated with an
organisation.
"""
auth = current.auth
user_org_id = auth.is_logged_in() and auth.user.organisation_id
if user_org_id:
db = current.db
s3db = current.s3db
otable = s3db.org_organisation
org = db(otable.id == user_org_id).select(otable.pe_id,
limitby=(0, 1)
).first()
if org:
pe_id = org.pe_id
pe_ids = s3db.pr_get_descendants((pe_id,),
entity_types=("org_organisation",))
rows = db(otable.pe_id.belongs(pe_ids)).select(otable.id)
ids = [row.id for row in rows]
ids.append(user_org_id)
return ids
else:
return user_org_id
else:
# no default
return {}
# -----------------------------------------------------------------------------
# Org-dependent settings
# => lazy settings because they require user authentication and we want them to
# work across multiple controllers (inc menus) without too much code
#
def auth_realm_entity_types(default):
""" Which entity types to use as realm entities in role manager """
auth = current.auth
if auth.s3_has_role(auth.get_system_roles().ADMIN) or \
current.auth.root_org_name() == NZRC:
return list(default) + ["po_area"]
return default
settings.auth.realm_entity_types = auth_realm_entity_types
def hide_third_gender(default):
""" Whether to hide the third person gender """
root_org = current.auth.root_org_name()
if root_org == NRCS:
return False
return True
settings.pr.hide_third_gender = hide_third_gender
def location_filter_bulk_select_option(default):
""" Whether to show a bulk select option in location filters """
root_org = current.auth.root_org_name()
if root_org == VNRC:
return True
return default
settings.ui.location_filter_bulk_select_option = location_filter_bulk_select_option
def mandatory_last_name(default):
""" Whether the Last Name is Mandatory """
root_org = current.auth.root_org_name()
if root_org in (ARCS, IRCS, CRMADA):
return False
return True
settings.L10n.mandatory_lastname = mandatory_last_name
def hrm_use_certificates(default):
""" Whether to use Certificates """
root_org = current.auth.root_org_name()
if root_org == IRCS:
if current.request.controller == "vol":
return True
else:
return False
elif root_org == VNRC:
return False
return True
settings.hrm.use_certificates = hrm_use_certificates
def hrm_use_code(default):
""" Whether to use Staff-ID/Volunteer-ID """
root_org = current.auth.root_org_name()
if root_org in (ARCS, IRCS):
return True # use for both staff and volunteers
return False
settings.hrm.use_code = hrm_use_code
def hrm_use_skills(default):
""" Whether to use Skills """
root_org = current.auth.root_org_name()
if root_org in (ARCS, CRMADA, IRCS, PMI, VNRC):
return True
return False
settings.hrm.use_skills = hrm_use_skills
def hrm_teams(default):
""" Whether to use Teams """
if current.request.controller == "vol":
root_org = current.auth.root_org_name()
if root_org == IRCS:
return False
return default
settings.hrm.teams = hrm_teams
def hrm_teams_orgs(default):
""" Whether Teams should link to 1 or more Orgs """
root_org = current.auth.root_org_name()
if root_org == VNRC:
# Multiple Orgs
return 2
# Single Org
return default
settings.hrm.teams_orgs = hrm_teams_orgs
def hrm_trainings_external(default):
""" Whether Training Courses should be split into Internal & External """
root_org = current.auth.root_org_name()
if root_org == CRMADA:
return True
return False
settings.hrm.trainings_external = hrm_trainings_external
def hrm_vol_active(default):
""" Whether & How to track Volunteers as Active """
root_org = current.auth.root_org_name()
if root_org in (ARCS, IRCS):
# Simple checkbox
return True
elif root_org in (CVTL, PMI, PRC):
# Use formula based on hrm_programme
return vol_programme_active
elif root_org in (CRMADA, ):
# Use formula based on vol_activity
return vol_activity_active
return False
settings.hrm.vol_active = hrm_vol_active
def pr_person_availability_options(default):
root_org = current.auth.root_org_name()
if root_org == VNRC:
# Doesn't seem used anyway...perhaps a bug in hrm_Record?
return {1: T("No Restrictions"),
2: T("Weekends only"),
3: T("School Holidays only"),
}
elif root_org == CRMADA:
return {1: "%s, %s" % (T("Frequent"), T("1-2 times per week")),
2: "%s, %s" % (T("Sometimes"), T("When needed")),
3: "%s, %s" % (T("Projects"), T("1-3 times per month")),
4: T("Once per month"),
5: T("Exceptional Cases"),
6: T("Other"),
}
# Default to Off
return None
settings.pr.person_availability_options = pr_person_availability_options
def hrm_vol_availability_tab(default):
""" Whether to show Volunteer Availability Tab """
root_org = current.auth.root_org_name()
if root_org == CRMADA:
return True
# Default to Off
return None
settings.hrm.vol_availability_tab = hrm_vol_availability_tab
def hrm_vol_departments(default):
""" Whether to use Volunteer Departments """
root_org = current.auth.root_org_name()
if root_org == IRCS:
return True
return False
settings.hrm.vol_departments = hrm_vol_departments
def hrm_vol_experience(default):
""" What type(s) of experience to use for Volunteers """
root_org = current.auth.root_org_name()
if root_org in (IRCS, PMI, VNRC):
return "both"
elif root_org == CRMADA:
return "activity"
return default
settings.hrm.vol_experience = hrm_vol_experience
def hrm_vol_roles(default):
""" Whether to use Volunteer Roles """
root_org = current.auth.root_org_name()
if root_org in (IRCS, VNRC):
return False
return True
settings.hrm.vol_roles = hrm_vol_roles
def pr_name_format(default):
""" Format to use to expand peoples' names """
root_org = current.auth.root_org_name()
if root_org == VNRC:
return "%(last_name)s %(middle_name)s %(first_name)s"
#elif root_org == CRMADA:
# return "%(last_name)s %(first_name)s %(middle_name)s"
return default
settings.pr.name_format = pr_name_format
def pr_request_father_name(default):
""" Whether to request Father's Name in AddPersonWidget2 """
root_org = current.auth.root_org_name()
if root_org in (ARCS, BRCS, IRCS):
return True
return False
settings.pr.request_father_name = pr_request_father_name
def pr_request_grandfather_name(default):
""" Whether to request GrandFather's Name in AddPersonWidget2 """
root_org = current.auth.root_org_name()
if root_org in (ARCS, BRCS, IRCS):
return True
return False
settings.pr.request_grandfather_name = pr_request_grandfather_name
def training_instructors(default):
""" Whether to track internal/external training instructors """
root_org = current.auth.root_org_name()
if root_org == NRCS:
return "both"
return default
settings.hrm.training_instructors = training_instructors
#def ui_autocomplete_delay(default):
# """ Delay in milliseconds before autocomplete starts searching """
# root_org = current.auth.root_org_name()
# if root_org == ARCS:
# return 800
# return default
#settings.ui.autocomplete_delay = ui_autocomplete_delay
def l10n_calendar(default):
""" Which calendar to use """
root_org = current.auth.root_org_name()
if root_org == ARCS:
return "Afghan"
return default
settings.L10n.calendar = l10n_calendar
# -------------------------------------------------------------------------
def customise_asset_asset_controller(**attr):
tablename = "asset_asset"
# Default Filter
from s3 import s3_set_default_filter
s3_set_default_filter("~.organisation_id",
user_org_and_children_default_filter,
tablename = tablename)
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
# Organisation needs to be an NS/Branch
ns_only(tablename,
required = True,
branches = True,
limit_filter_opts = True,
)
# Set the NS filter as Visible so that the default filter works
filter_widgets = current.s3db.get_config(tablename, "filter_widgets")
for widget in filter_widgets:
if widget.field == "organisation_id":
widget.opts.hidden = False
break
return result
s3.prep = custom_prep
return attr
settings.customise_asset_asset_controller = customise_asset_asset_controller
# -----------------------------------------------------------------------------
def customise_asset_asset_resource(r, tablename):
# Load standard model
s3db = current.s3db
table = s3db.asset_asset
# Custom CRUD Form to allow ad-hoc Kits & link to Teams
from s3 import S3SQLCustomForm, S3SQLInlineComponent
table.kit.readable = table.kit.writable = True
crud_form = S3SQLCustomForm("number",
"type",
"item_id",
"organisation_id",
"site_id",
"kit",
# If not ad-hoc Kit
"sn",
"supply_org_id",
"purchase_date",
"purchase_price",
"purchase_currency",
# If ad-hoc Kit
S3SQLInlineComponent(
"item",
label = T("Items"),
fields = ("item_id",
"quantity",
"sn",
# These are too wide for the screen & hence hide the AddResourceLinks
#"supply_org_id",
#"purchase_date",
#"purchase_price",
#"purchase_currency",
"comments",
),
),
S3SQLInlineComponent(
"group",
label = T("Team"),
fields = [("", "group_id")],
filterby = dict(field = "group_type",
options = 3
),
multiple = False,
),
"comments",
)
from s3 import S3OptionsFilter
filter_widgets = s3db.get_config(tablename, "filter_widgets")
filter_widgets.insert(-2, S3OptionsFilter("group.group_id",
label = T("Team"),
represent = "%(name)s",
hidden = True,
))
s3db.configure(tablename,
crud_form = crud_form,
)
settings.customise_asset_asset_resource = customise_asset_asset_resource
# -----------------------------------------------------------------------------
def customise_auth_user_controller(**attr):
"""
Customise admin/user() and default/user() controllers
"""
#if "arg" in attr and attr["arg"] == "register":
# Organisation needs to be an NS/Branch
ns_only("auth_user",
required = True,
branches = True,
updateable = False, # Need to see all Orgs in Registration screens
)
# Different settings for different NS
# Not possible for registration form, so fake with language!
root_org = current.auth.root_org_name()
if root_org == VNRC or current.session.s3.language == "vi":
# Too late to do via settings
#settings.org.site_label = "Office/Center"
current.db.auth_user.site_id.label = T("Office/Center")
return attr
settings.customise_auth_user_controller = customise_auth_user_controller
# -----------------------------------------------------------------------------
def customise_deploy_alert_resource(r, tablename):
current.s3db.deploy_alert_recipient.human_resource_id.label = T("Member")
settings.customise_deploy_alert_resource = customise_deploy_alert_resource
# -----------------------------------------------------------------------------
def customise_deploy_application_resource(r, tablename):
r.table.human_resource_id.label = T("Member")
settings.customise_deploy_application_resource = customise_deploy_application_resource
# -----------------------------------------------------------------------------
def _customise_assignment_fields(**attr):
MEMBER = T("Member")
from gluon.html import DIV
hr_comment = \
DIV(_class="tooltip",
_title="%s|%s" % (MEMBER,
current.messages.AUTOCOMPLETE_HELP))
from s3 import IS_ONE_OF
s3db = current.s3db
atable = s3db.deploy_assignment
atable.human_resource_id.label = MEMBER
atable.human_resource_id.comment = hr_comment
field = atable.job_title_id
field.comment = None
field.label = T("Sector")
field.requires = IS_ONE_OF(current.db, "hrm_job_title.id",
field.represent,
filterby = "type",
filter_opts = (4,),
)
# Default activity_type when creating experience records from assignments
activity_type = s3db.hrm_experience.activity_type
activity_type.default = activity_type.update = "rdrt"
return
# -----------------------------------------------------------------------------
def customise_deploy_assignment_controller(**attr):
s3db = current.s3db
table = s3db.deploy_assignment
# Labels
table.job_title_id.label = T("RDRT Type")
table.start_date.label = T("Deployment Date")
#table.end_date.label = T("EOM")
# List fields
list_fields = [(T("Mission"), "mission_id$name"),
(T("Appeal Code"), "mission_id$code"),
(T("Country"), "mission_id$location_id"),
(T("Disaster Type"), "mission_id$event_type_id"),
# @todo: replace by date of first alert?
(T("Date"), "mission_id$created_on"),
"job_title_id",
(T("Member"), "human_resource_id$person_id"),
(T("Deploying NS"), "human_resource_id$organisation_id"),
"start_date",
"end_date",
"appraisal.rating",
# @todo: Comments of the mission (=>XLS only)
]
# Report options
report_fact = [(T("Number of Deployments"), "count(human_resource_id)"),
(T("Average Rating"), "avg(appraisal.rating)"),
]
report_axis = [(T("Appeal Code"), "mission_id$code"),
(T("Country"), "mission_id$location_id"),
(T("Disaster Type"), "mission_id$event_type_id"),
(T("RDRT Type"), "job_title_id"),
(T("Deploying NS"), "human_resource_id$organisation_id"),
]
report_options = Storage(
rows=report_axis,
cols=report_axis,
fact=report_fact,
defaults=Storage(rows="mission_id$location_id",
cols="mission_id$event_type_id",
fact="count(human_resource_id)",
totals=True
)
)
s3db.configure("deploy_assignment",
list_fields = list_fields,
report_options = report_options,
)
# CRUD Strings
current.response.s3.crud_strings["deploy_assignment"] = Storage(
label_create = T("Add Deployment"),
title_display = T("Deployment Details"),
title_list = T("Deployments"),
title_update = T("Edit Deployment Details"),
title_upload = T("Import Deployments"),
label_list_button = T("List Deployments"),
label_delete_button = T("Delete Deployment"),
msg_record_created = T("Deployment added"),
msg_record_modified = T("Deployment Details updated"),
msg_record_deleted = T("Deployment deleted"),
msg_list_empty = T("No Deployments currently registered"))
_customise_assignment_fields()
# Restrict Location to just Countries
from s3 import S3Represent
field = s3db.deploy_mission.location_id
field.represent = S3Represent(lookup="gis_location", translate=True)
return attr
settings.customise_deploy_assignment_controller = customise_deploy_assignment_controller
# -----------------------------------------------------------------------------
def customise_deploy_mission_controller(**attr):
db = current.db
s3db = current.s3db
s3 = current.response.s3
MEMBER = T("Member")
from gluon.html import DIV
hr_comment = \
DIV(_class="tooltip",
_title="%s|%s" % (MEMBER,
current.messages.AUTOCOMPLETE_HELP))
table = s3db.deploy_mission
table.code.label = T("Appeal Code")
table.event_type_id.label = T("Disaster Type")
table.organisation_id.readable = table.organisation_id.writable = False
# Restrict Location to just Countries
from s3 import S3Represent, S3MultiSelectWidget
field = table.location_id
field.label = current.messages.COUNTRY
field.requires = s3db.gis_country_requires
field.widget = S3MultiSelectWidget(multiple=False)
field.represent = S3Represent(lookup="gis_location", translate=True)
rtable = s3db.deploy_response
rtable.human_resource_id.label = MEMBER
rtable.human_resource_id.comment = hr_comment
_customise_assignment_fields()
# Report options
report_fact = [(T("Number of Missions"), "count(id)"),
(T("Number of Countries"), "count(location_id)"),
(T("Number of Disaster Types"), "count(event_type_id)"),
(T("Number of Responses"), "sum(response_count)"),
(T("Number of Deployments"), "sum(hrquantity)"),
]
report_axis = ["code",
"location_id",
"event_type_id",
"status",
]
report_options = Storage(rows = report_axis,
cols = report_axis,
fact = report_fact,
defaults = Storage(rows = "location_id",
cols = "event_type_id",
fact = "sum(hrquantity)",
totals = True,
),
)
s3db.configure("deploy_mission",
report_options = report_options,
)
# CRUD Strings
s3.crud_strings["deploy_assignment"] = Storage(
label_create = T("New Deployment"),
title_display = T("Deployment Details"),
title_list = T("Deployments"),
title_update = T("Edit Deployment Details"),
title_upload = T("Import Deployments"),
label_list_button = T("List Deployments"),
label_delete_button = T("Delete Deployment"),
msg_record_created = T("Deployment added"),
msg_record_modified = T("Deployment Details updated"),
msg_record_deleted = T("Deployment deleted"),
msg_list_empty = T("No Deployments currently registered"))
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
if r.interactive and not current.auth.s3_has_role("RDRT_ADMIN"):
# Limit write-access to these fields to RDRT Admins:
fields = ("name",
"event_type_id",
"location_id",
"code",
"status",
)
table = r.resource.table
for f in fields:
if f in table:
table[f].writable = False
if not r.component and r.method == "create":
# Org is always IFRC
otable = s3db.org_organisation
query = (otable.name == "International Federation of Red Cross and Red Crescent Societies")
organisation = db(query).select(otable.id,
limitby = (0, 1),
).first()
if organisation:
r.table.organisation_id.default = organisation.id
return result
s3.prep = custom_prep
return attr
settings.customise_deploy_mission_controller = customise_deploy_mission_controller
# -----------------------------------------------------------------------------
def poi_marker_fn(record):
"""
Function to decide which Marker to use for PoI KML export
"""
db = current.db
table = db.gis_poi_type
ptype = db(table.id == record.poi_type_id).select(table.name,
limitby=(0, 1)
).first()
if ptype:
marker = ptype.name.lower().replace(" ", "_")\
.replace("_cccm", "_CCCM")\
.replace("_nfi_", "_NFI_")\
.replace("_ngo_", "_NGO_")\
.replace("_wash", "_WASH")
marker = "OCHA/%s_40px.png" % marker
else:
# Fallback
marker = "marker_red.png"
return Storage(image = marker)
# -----------------------------------------------------------------------------
def customise_gis_poi_resource(r, tablename):
if r.representation == "kml":
# Custom Marker function
current.s3db.configure("gis_poi",
marker_fn = poi_marker_fn,
)
settings.customise_gis_poi_resource = customise_gis_poi_resource
# -----------------------------------------------------------------------------
def customise_hrm_certificate_controller(**attr):
# Organisation needs to be an NS
ns_only("hrm_certificate",
required = False,
branches = False,
)
return attr
settings.customise_hrm_certificate_controller = customise_hrm_certificate_controller
# -----------------------------------------------------------------------------
def customise_hrm_course_controller(**attr):
tablename = "hrm_course"
# Organisation needs to be an NS
ns_only(tablename,
required = False,
branches = False,
)
# Different settings for different NS
root_org = current.auth.root_org_name()
if root_org == VNRC:
# Keep things simple
return attr
# Load standard model
s3db = current.s3db
table = s3db.hrm_course
list_fields = ["code",
"name",
]
ADMIN = current.session.s3.system_roles.ADMIN
if current.auth.s3_has_role(ADMIN):
list_fields.append("organisation_id")
if settings.get_hrm_trainings_external():
list_fields.append("external")
list_fields.append((T("Sectors"), "course_sector.sector_id"))
from s3 import S3SQLCustomForm, S3SQLInlineLink
crud_form = S3SQLCustomForm("code",
"name",
"external",
"organisation_id",
S3SQLInlineLink("sector",
field = "sector_id",
label = T("Sectors"),
),
)
s3db.configure(tablename,
crud_form = crud_form,
list_fields = list_fields,
)
return attr
settings.customise_hrm_course_controller = customise_hrm_course_controller
# -----------------------------------------------------------------------------
def customise_hrm_credential_controller(**attr):
# Currently just used by RDRT
table = current.s3db.hrm_credential
field = table.job_title_id
field.comment = None
field.label = T("Sector")
from s3 import IS_ONE_OF
field.requires = IS_ONE_OF(current.db, "hrm_job_title.id",
field.represent,
filterby = "type",
filter_opts = (4,),
)
table.organisation_id.readable = table.organisation_id.writable = False
table.performance_rating.readable = table.performance_rating.writable = False
table.start_date.readable = table.start_date.writable = False
table.end_date.readable = table.end_date.writable = False
return attr
settings.customise_hrm_credential_controller = customise_hrm_credential_controller
# -----------------------------------------------------------------------------
def customise_hrm_department_controller(**attr):
# Organisation needs to be an NS
ns_only("hrm_department",
required = False,
branches = False,
)
return attr
settings.customise_hrm_department_controller = customise_hrm_department_controller
# -----------------------------------------------------------------------------
def customise_hrm_experience_controller(**attr):
s3 = current.response.s3
root_org = current.auth.root_org_name()
vnrc = False
if root_org == VNRC:
vnrc = True
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
if not standard_prep(r):
return False
if vnrc:
department_id = r.table.department_id
department_id.readable = department_id.writable = True
if r.controller == "deploy":
# Popups in RDRT Member Profile
table = r.table
job_title_id = table.job_title_id
job_title_id.label = T("Sector / Area of Expertise")
job_title_id.comment = None
jtable = current.s3db.hrm_job_title
query = (jtable.type == 4)
if r.method == "update" and r.record.job_title_id:
# Allow to keep the current value
query |= (jtable.id == r.record.job_title_id)
from s3 import IS_ONE_OF
job_title_id.requires = IS_ONE_OF(current.db(query),
"hrm_job_title.id",
job_title_id.represent,
)
job_title = table.job_title
job_title.readable = job_title.writable = True
return True
s3.prep = custom_prep
return attr
settings.customise_hrm_experience_controller = customise_hrm_experience_controller
# -----------------------------------------------------------------------------
def rdrt_member_profile_header(r):
""" Custom profile header to allow update of RDRT roster status """
record = r.record
if not record:
return ""
person_id = record.person_id
from s3 import s3_fullname, s3_avatar_represent
name = s3_fullname(person_id)
table = r.table
# Organisation
comments = table.organisation_id.represent(record.organisation_id)
from s3 import s3_unicode
from gluon.html import A, DIV, H2, LABEL, P, SPAN
# Add job title if present
job_title_id = record.job_title_id
if job_title_id:
comments = (SPAN("%s, " % \
s3_unicode(table.job_title_id.represent(job_title_id))),
comments)
# Determine the current roster membership status (active/inactive)
atable = current.s3db.deploy_application
status = atable.active
query = atable.human_resource_id == r.id
row = current.db(query).select(atable.id,
atable.active,
limitby=(0, 1)).first()
if row:
active = 1 if row.active else 0
status_id = row.id
roster_status = status.represent(row.active)
else:
active = None
status_id = None
roster_status = current.messages.UNKNOWN_OPT
if status_id and \
current.auth.s3_has_permission("update",
"deploy_application",
record_id=status_id):
# Make inline-editable
roster_status = A(roster_status,
data = {"status": active},
_id = "rdrt-roster-status",
_title = T("Click to edit"),
)
s3 = current.response.s3
script = "/%s/static/themes/IFRC/js/rdrt.js" % r.application
if script not in s3.scripts:
s3.scripts.append(script)
script = '''$.rdrtStatus('%(url)s','%(active)s','%(inactive)s','%(submit)s')'''
from gluon import URL
options = {"url": URL(c="deploy", f="application",
args=["%s.s3json" % status_id]),
"active": status.represent(True),
"inactive": status.represent(False),
"submit": T("Save"),
}
s3.jquery_ready.append(script % options)
else:
# Read-only
roster_status = SPAN(roster_status)
# Render profile header
return DIV(A(s3_avatar_represent(person_id,
tablename="pr_person",
_class="media-object",
),
_class="pull-left",
),
H2(name),
P(comments),
DIV(LABEL(status.label + ": "), roster_status),
_class="profile-header",
)
# -----------------------------------------------------------------------------
def emergency_contact_represent(row):
"""
Representation of Emergency Contacts (S3Represent label renderer)
@param row: the row
"""
items = [row["pr_contact_emergency.name"]]
relationship = row["pr_contact_emergency.relationship"]
if relationship:
items.append(" (%s)" % relationship)
phone_number = row["pr_contact_emergency.phone"]
if phone_number:
items.append(": %s" % phone_number)
return "".join(items)
# -----------------------------------------------------------------------------
def customise_vol_activity_controller(**attr):
# Organisation needs to be an NS/Branch
ns_only("vol_activity",
required = False,
branches = True,
)
return attr
settings.customise_vol_activity_controller = customise_vol_activity_controller
# -----------------------------------------------------------------------------
def customise_vol_volunteer_award_resource(r, tablename):
root_org = current.auth.root_org_name()
if root_org == IRCS:
table = current.s3db.vol_volunteer_award
table.award_id.label = T("Recommendation Letter Type")
table.award_id.comment = None
table.number.readable = table.number.writable = True
table.file.readable = table.file.writable = True
current.response.s3.crud_strings["vol_volunteer_award"] = Storage(
label_create = T("Add Recommendation Letter"),
title_display = T("Recommendation Letter Details"),
title_list = T("Recommendation Letters"),
title_update = T("Edit Recommendation Letter"),
label_list_button = T("List Recommendation Letters"),
label_delete_button = T("Delete Recommendation Letter"),
msg_record_created = T("Recommendation Letter added"),
msg_record_modified = T("Recommendation Letter updated"),
msg_record_deleted = T("Recommendation Letter removed"),
msg_no_match = T("No entries found"),
msg_list_empty = T("Currently no recommendation letters registered"))
settings.customise_vol_volunteer_award_resource = customise_vol_volunteer_award_resource
# -----------------------------------------------------------------------------
def customise_vol_award_resource(r, tablename):
root_org = current.auth.root_org_name()
if root_org == IRCS:
current.response.s3.crud_strings["vol_award"] = Storage(
label_create = T("Add Recommendation Letter Type"),
title_display = T("Recommendation Letter Type Details"),
title_list = T("Recommendation Letter Types"),
title_update = T("Edit Recommendation Letter Type"),
label_list_button = T("List Recommendation Letter Types"),
label_delete_button = T("Delete Recommendation Letter Type"),
msg_record_created = T("Recommendation Letter Type added"),
msg_record_modified = T("Recommendation Letter Type updated"),
msg_record_deleted = T("Recommendation Letter Type removed"),
msg_no_match = T("No entries found"),
msg_list_empty = T("Currently no recommendation letter types registered"))
settings.customise_vol_award_resource = customise_vol_award_resource
# -----------------------------------------------------------------------------
def customise_hrm_human_resource_resource(r, tablename):
controller = r.controller
if controller == "vol":
T = current.T
root_org = current.auth.root_org_name()
if root_org == IRCS:
s3db = current.s3db
table = s3db.hrm_human_resource
table.start_date.label = T("Appointment Date")
def vol_service_record_manager(default):
from s3 import s3_fullname
return s3_fullname(current.auth.s3_logged_in_person())
settings.hrm.vol_service_record_manager = vol_service_record_manager
from s3 import IS_ADD_PERSON_WIDGET2, S3SQLCustomForm, S3SQLInlineComponent
table.person_id.requires = IS_ADD_PERSON_WIDGET2(first_name_only = True)
table.code.label = T("Appointment Number")
phtable = s3db.hrm_programme_hours
phtable.date.label = T("Direct Date")
phtable.contract.label = T("Direct Number")
phtable.contract.readable = phtable.contract.writable = True
crud_form = S3SQLCustomForm("organisation_id",
"person_id",
S3SQLInlineComponent("home_address",
label = T("Address"),
fields = [("", "location_id"),
],
default = {"type": 1}, # Current Home Address
link = False,
multiple = False,
),
"department_id",
"start_date",
"code",
S3SQLInlineComponent("programme_hours",
label = T("Contract"),
fields = ["programme_id",
"date",
(T("End Date"), "end_date"),
"contract",
],
link = False,
multiple = False,
),
S3SQLInlineComponent("education",
label = T("Education"),
fields = [(T("Education Level"), "level_id"),
"institute",
"year",
],
link = False,
multiple = False,
),
"details.active",
)
s3db.configure("hrm_human_resource",
crud_form = crud_form
)
elif root_org == NRCS:
# Expose volunteer_type field with these options:
types = {"PROGRAMME": T("Program Volunteer"),
"GOVERNANCE": T("Governance Volunteer"),
}
field = current.s3db.vol_details.volunteer_type
field.readable = field.writable = True
from gluon.validators import IS_EMPTY_OR, IS_IN_SET
field.requires = IS_EMPTY_OR(IS_IN_SET(types))
from s3 import S3Represent
field.represent = S3Represent(options=types)
elif controller == "hrm":
root_org = current.auth.root_org_name()
if root_org == IRCS:
T = current.T
s3db = current.s3db
table = s3db.hrm_human_resource
table.start_date.label = T("Appointment Date")
# All staff have open-ended contracts
table.end_date.readable = table.end_date.writable = False
from s3 import IS_ADD_PERSON_WIDGET2, S3SQLCustomForm, S3SQLInlineComponent
table.person_id.requires = IS_ADD_PERSON_WIDGET2(first_name_only = True)
table.code.label = T("Appointment Number")
hrm_status_opts = s3db.hrm_status_opts
hrm_status_opts[3] = T("End Service")
table.status.represent = lambda opt: \
hrm_status_opts.get(opt, UNKNOWN_OPT),
from gluon.validators import IS_IN_SET
table.status.requires = IS_IN_SET(hrm_status_opts,
zero=None)
ctable = s3db.hrm_contract
ctable.name.label = T("Direct Number")
ctable.date.label = T("Direct Date")
crud_fields = ["organisation_id",
"site_id",
"person_id",
"job_title_id",
"department_id",
"start_date",
"code",
S3SQLInlineComponent("contract",
label=T("Contract"),
fields=["name",
"date"
],
multiple=True,
),
"comments",
]
method = r.method
if method and method in ("record" "update"):
crud_fields.append("status")
s3db.configure("hrm_human_resource",
crud_form = S3SQLCustomForm(*crud_fields),
)
settings.customise_hrm_human_resource_resource = customise_hrm_human_resource_resource
# -----------------------------------------------------------------------------
def customise_hrm_human_resource_controller(**attr):
s3db = current.s3db
# Special cases for different NS
arcs = False
vnrc = False
root_org = current.auth.root_org_name()
controller = current.request.controller
if controller != "deploy" and root_org != CRMADA: # CRMADA have too many branches which causes issues
# Default Filter
from s3 import s3_set_default_filter
s3_set_default_filter("~.organisation_id",
user_org_and_children_default_filter,
tablename = "hrm_human_resource")
if root_org == VNRC:
vnrc = True
# @ToDo: Make this use the same lookup as in ns_only to check if user can see HRs from multiple NS
settings.org.regions = False
s3db.hrm_human_resource.site_id.represent = s3db.org_SiteRepresent(show_type = False)
if controller == "vol":
if root_org == ARCS:
arcs = True
settings.pr.request_email = False
settings.pr.request_year_of_birth = True
#elif vnrc:
# settings.org.site_label = "Office/Center"
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
# Organisation needs to be an NS/Branch
ns_only("hrm_human_resource",
required = True,
branches = True,
limit_filter_opts = True,
)
table = s3db.hrm_human_resource
if arcs:
field = s3db.vol_details.card
field.readable = field.writable = True
elif vnrc:
field = table.job_title_id
field.readable = field.writable = False
if not vnrc:
from s3 import S3OptionsFilter
filter_widgets = s3db.get_config("hrm_human_resource",
"filter_widgets")
filter_widgets.insert(-1, S3OptionsFilter("training.course_id$course_sector.sector_id",
label = T("Training Sector"),
hidden = True,
))
resource = r.resource
get_config = resource.get_config
if controller == "vol":
if root_org == ARCS:
from s3 import IS_ADD_PERSON_WIDGET2, S3SQLCustomForm, S3SQLInlineComponent
table.person_id.requires = IS_ADD_PERSON_WIDGET2(first_name_only = True)
table.code.label = T("Volunteer ID")
# Emergency Contact Name isn't required
s3db.pr_contact_emergency.name.requires = None
crud_form = S3SQLCustomForm("organisation_id",
"code",
"person_id",
S3SQLInlineComponent("contact_emergency",
label = T("Emergency Contact Number"),
fields = [("", "phone"),
],
link = False,
multiple = False,
),
S3SQLInlineComponent("home_address",
label = T("Address"),
fields = [("", "location_id"),
],
default = {"type": 1}, # Current Home Address
link = False,
multiple = False,
),
S3SQLInlineComponent("education",
label = T("Education"),
fields = [(T("Education Level"), "level_id"),
"institute",
],
link = False,
multiple = False,
),
"job_title_id",
"start_date",
"details.active",
(T("Remarks"), "comments"),
)
s3db.configure("hrm_human_resource",
crud_form = crud_form,
)
elif root_org == CRMADA:
# Add Activity Type & Tweak Order
list_fields = ["person_id",
"organisation_id",
"job_title_id",
(settings.get_ui_label_mobile_phone(), "phone.value"),
(T("Trainings"), "training.course_id"),
(T("Activity Types"), "person_id$activity_hours.activity_hours_activity_type.activity_type_id"),
(T("Activities"), "person_id$activity_hours.activity_id"),
(T("Certificates"), "person_id$certification.certificate_id"),
(T("Email"), "email.value"),
"location_id",
"details.active",
]
s3db.configure("hrm_human_resource",
list_fields = list_fields,
)
elif root_org == IRCS:
list_fields = ["person_id",
"details.active",
"code",
"start_date",
"programme_hours.contract",
"programme_hours.date",
"programme_hours.programme_id",
(T("Training"), "training.course_id"),
]
s3db.configure("hrm_human_resource",
list_fields = list_fields,
)
elif root_org == NRCS:
pos = 6
# Add volunteer type to list_fields
list_fields = get_config("list_fields")
list_fields.insert(pos, "details.volunteer_type")
# Add volunteer type to report options
report_options = get_config("report_options")
if "details.volunteer_type" not in report_options["rows"]:
report_options["rows"].insert(pos, "details.volunteer_type")
if "details.volunteer_type" not in report_options["cols"]:
report_options["cols"].insert(pos, "details.volunteer_type")
# Add filter widget for volunteer type
filter_widgets = s3db.get_config("hrm_human_resource", "filter_widgets")
filter_widgets.insert(-1, S3OptionsFilter("details.volunteer_type",
hidden = True,
))
elif root_org == VNRC:
# Add extra list_fields
list_fields = get_config("list_fields")
list_fields += [(T("ID Number"), "person_id$identity.value"),
(T("Province"), "location_id$L1"),
(T("District"), "location_id$L2"),
(T("Commune"), "location_id$L3"),
]
elif controller == "hrm":
if root_org == IRCS:
list_fields = ["person_id",
"code",
"start_date",
"contract.name",
"contract.date",
"job_title_id",
"department_id",
]
s3db.configure("hrm_human_resource",
list_fields = list_fields,
)
elif controller == "deploy":
# Custom setting for RDRT
# Custom profile widgets for hrm_competency ("skills"):
from s3 import FS
subsets = (("Computer", "Computer Skills", "Add Computer Skills"),
("Language", "Language Skills", "Add Language Skills"),
)
widgets = []
append_widget = widgets.append
profile_widgets = get_config("profile_widgets")
contacts_filter = None
while profile_widgets:
widget = profile_widgets.pop(0)
if widget["tablename"] == "hrm_competency":
for skill_type, label, label_create in subsets:
query = widget["filter"] & \
(FS("skill_id$skill_type_id$name") == skill_type)
new_widget = dict(widget)
new_widget["label"] = label
new_widget["label_create"] = label_create
new_widget["filter"] = query
append_widget(new_widget)
elif widget["tablename"] == "hrm_experience":
new_widget = dict(widget)
new_widget["create_controller"] = "deploy"
append_widget(new_widget)
else:
append_widget(widget)
if widget["tablename"] == "pr_contact":
contacts_filter = widget["filter"]
# Emergency contacts
if contacts_filter is not None:
emergency_widget = {"label": "Emergency Contacts",
"label_create": "Add Emergency Contact",
"tablename": "pr_contact_emergency",
"type": "datalist",
"filter": contacts_filter,
"icon": "phone",
}
append_widget(emergency_widget)
if r.record:
widgets.insert(0, {"label": "Personal Details",
"tablename": "pr_person",
"type": "datalist",
"insert": False,
"list_fields": ["first_name",
"middle_name",
"last_name",
"date_of_birth",
"gender",
"person_details.nationality",
"physical_description.blood_type",
],
"filter": FS("id") == r.record.person_id,
"icon": "user",
})
# Remove unneeded filters widgets
filters = []
append_widget = filters.append
filter_widgets = get_config("filter_widgets")
while filter_widgets:
widget = filter_widgets.pop(0)
if widget.field not in ("location_id",
"site_id",
#"group_membership.group_id",
):
append_widget(widget)
from s3 import S3OptionsFilter
# Add gender-filter
gender_opts = dict(s3db.pr_gender_opts)
del gender_opts[1]
append_widget(S3OptionsFilter("person_id$gender",
options = gender_opts,
cols = 3,
hidden = True,
))
# Add Roster status filter
append_widget(S3OptionsFilter("application.active",
cols = 2,
default = True,
# Don't hide otherwise default
# doesn't apply:
#hidden = False,
label = T("Status"),
options = {"True": T("active"),
"False": T("inactive"),
},
))
# Representation of emergency contacts
from s3 import S3Represent
field = s3db.pr_contact_emergency.id
field.represent = S3Represent(lookup="pr_contact_emergency",
fields=("name", "relationship", "phone"),
labels=emergency_contact_represent,
)
# Custom list fields for RDRT
phone_label = settings.get_ui_label_mobile_phone()
s3db.org_organisation.root_organisation.label = T("National Society")
list_fields = ["person_id",
(T("Sectors"), "credential.job_title_id"),
# @todo: Languages?
# @todo: Skills?
(T("Trainings"), "training.course_id"),
"organisation_id$root_organisation",
"type",
"job_title_id",
# @todo: Education?
(T("Status"), "application.active"),
(T("Email"), "email.value"),
(phone_label, "phone.value"),
(T("Address"), "person_id$address.location_id"),
"person_id$date_of_birth",
"person_id$gender",
"person_id$person_details.nationality",
(T("Passport Number"), "person_id$passport.value"),
(T("Passport Issuer"), "person_id$passport.ia_name"),
(T("Passport Date"), "person_id$passport.valid_from"),
(T("Passport Expires"), "person_id$passport.valid_until"),
(T("Emergency Contacts"), "person_id$contact_emergency.id"),
"person_id$physical_description.blood_type",
]
resource.configure(filter_widgets = filters,
list_fields = list_fields,
profile_widgets = widgets,
profile_header = rdrt_member_profile_header,
)
return True
s3.prep = custom_prep
# Custom postp
standard_postp = s3.postp
def custom_postp(r, output):
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
if isinstance(output, dict):
if controller == "deploy" and \
"title" in output:
output["title"] = T("RDRT Members")
elif vnrc and \
r.method != "report" and \
"form" in output and \
(controller == "vol" or \
r.component_name == "human_resource"):
# Remove the injected Programme field
del output["form"][0].components[4]
del output["form"][0].components[4]
return output
s3.postp = custom_postp
return attr
settings.customise_hrm_human_resource_controller = customise_hrm_human_resource_controller
# -----------------------------------------------------------------------------
def customise_hrm_job_title_controller(**attr):
s3 = current.response.s3
table = current.s3db.hrm_job_title
controller = current.request.controller
if controller == "deploy":
# Filter to just deployables
s3.filter = (table.type == 4)
else:
# Organisation needs to be an NS
ns_only("hrm_job_title",
required = False,
branches = False,
)
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
if controller == "deploy":
field = table.type
field.default = 4
field.readable = field.writable = False
table.organisation_id.readable = False
table.organisation_id.writable = False
#help = T("If you don't see the Sector in the list, you can add a new one by clicking link 'Create Sector'.")
s3.crud_strings["hrm_job_title"] = Storage(
label_create=T("Create Sector"),
title_display=T("Sector Details"),
title_list=T("Sectors"),
title_update=T("Edit Sector"),
label_list_button=T("List Sectors"),
label_delete_button=T("Delete Sector"),
msg_record_created=T("Sector added"),
msg_record_modified=T("Sector updated"),
msg_record_deleted=T("Sector deleted"),
msg_list_empty=T("No Sectors currently registered"))
return result
s3.prep = custom_prep
return attr
settings.customise_hrm_job_title_controller = customise_hrm_job_title_controller
# -----------------------------------------------------------------------------
def customise_hrm_programme_controller(**attr):
# Organisation needs to be an NS
ns_only("hrm_programme",
required = False,
branches = False,
)
# Special cases for different NS
root_org = current.auth.root_org_name()
if root_org in (CVTL, PMI, PRC):
settings.hrm.vol_active_tooltip = "A volunteer is defined as active if they've participated in an average of 8 or more hours of Program work or Trainings per month in the last year"
elif root_org == IRCS:
table = current.s3db.hrm_programme_hours
table.date.label = T("Direct Date")
table.contract.label = T("Direct Number")
table.contract.readable = table.contract.writable = True
table.hours.readable = table.hours.writable = False
#elif root_org == VNRC:
# @ToDo
# def vn_age_group(age):
# settings.pr.age_group = vn_age_group
return attr
settings.customise_hrm_programme_controller = customise_hrm_programme_controller
# -----------------------------------------------------------------------------
def customise_hrm_programme_hours_controller(**attr):
# Default Filter
from s3 import s3_set_default_filter
s3_set_default_filter("~.person_id$human_resource.organisation_id",
user_org_default_filter,
tablename = "hrm_programme_hours")
# Special cases for different NS
root_org = current.auth.root_org_name()
if root_org == IRCS:
table = current.s3db.hrm_programme_hours
table.date.label = T("Direct Date")
table.contract.label = T("Direct Number")
table.contract.readable = table.contract.writable = True
table.hours.readable = table.hours.writable = False
elif root_org == VNRC:
# Remove link to download Template
attr["csv_template"] = "hide"
return attr
settings.customise_hrm_programme_hours_controller = customise_hrm_programme_hours_controller
# -----------------------------------------------------------------------------
def customise_hrm_training_controller(**attr):
# Default Filter
from s3 import s3_set_default_filter
s3_set_default_filter("~.person_id$human_resource.organisation_id",
user_org_default_filter,
tablename = "hrm_training")
# Special cases for different NS
root_org = current.auth.root_org_name()
if root_org == VNRC:
# Remove link to download Template
attr["csv_template"] = "hide"
return attr
settings.customise_hrm_training_controller = customise_hrm_training_controller
# -----------------------------------------------------------------------------
def customise_hrm_training_event_controller(**attr):
# Special cases for different NS
root_org = current.auth.root_org_name()
if root_org == NRCS:
# Don't allow creating of Persons here
from gluon import DIV
T = current.T
current.s3db.hrm_training.person_id.comment = \
DIV(_class="tooltip",
_title="%s|%s" % (T("Participant"),
T("Type the first few characters of one of the Participant's names.")))
elif root_org == VNRC:
# Remove link to download Template
attr["csv_template"] = "hide"
return attr
settings.customise_hrm_training_event_controller = customise_hrm_training_event_controller
# -----------------------------------------------------------------------------
def customise_inv_home():
"""
Homepage for the Inventory module
"""
from gluon import URL
from s3 import s3_redirect_default
# Redirect to Warehouse Summary Page
s3_redirect_default(URL(c="inv", f="warehouse", args="summary"))
settings.customise_inv_home = customise_inv_home
# -----------------------------------------------------------------------------
def customise_inv_inv_item_resource(r, tablename):
# Special cases for different NS
root_org = current.auth.root_org_name()
if root_org in (IRCS, AURC, CRMADA):
# Australian Iraqi RC use proper Logistics workflow
settings.inv.direct_stock_edits = False
current.s3db.configure("inv_inv_item",
create = False,
deletable = False,
editable = False,
listadd = False,
)
settings.customise_inv_inv_item_resource = customise_inv_inv_item_resource
# -----------------------------------------------------------------------------
def customise_inv_send_resource(r, tablename):
current.s3db.configure("inv_send",
list_fields = ["id",
"send_ref",
"req_ref",
#"sender_id",
"site_id",
"date",
"recipient_id",
"delivery_date",
"to_site_id",
"status",
#"driver_name",
#"driver_phone",
#"vehicle_plate_no",
#"time_out",
"comments",
],
)
settings.customise_inv_send_resource = customise_inv_send_resource
# -----------------------------------------------------------------------------
def customise_inv_warehouse_resource(r, tablename):
# Special cases for different NS
root_org = current.auth.root_org_name()
if root_org in (ARCS, AURC, CRMADA):
# Australian & Iraqi RC use proper Logistics workflow
settings.inv.direct_stock_edits = False
if root_org != NRCS:
# Only Nepal RC use Warehouse Types
s3db = current.s3db
field = s3db.inv_warehouse.warehouse_type_id
field.readable = field.writable = False
list_fields = s3db.get_config("inv_warehouse", "list_fields")
try:
list_fields.remove("warehouse_type_id")
except:
# Already removed
pass
settings.customise_inv_warehouse_resource = customise_inv_warehouse_resource
# -----------------------------------------------------------------------------
def member_membership_paid(row):
"""
Simplified variant of the original function in s3db/member.py,
with just "paid"/"unpaid"/"exempted" as possible values
"""
T = current.T
if hasattr(row, "member_membership"):
row = row.member_membership
try:
exempted = row.fee_exemption
except AttributeError:
exempted = False
if exempted:
return T("exempted")
try:
start_date = row.start_date
except AttributeError:
start_date = None
try:
paid_date = row.membership_paid
except AttributeError:
paid_date = None
if start_date:
now = current.request.utcnow.date()
if not paid_date:
due = datetime.date(start_date.year + 1,
start_date.month,
start_date.day)
else:
due = datetime.date(paid_date.year,
start_date.month,
start_date.day)
if due < paid_date:
due = datetime.date(paid_date.year + 1, due.month, due.day)
result = T("paid") if now < due else T("unpaid")
else:
result = current.messages["NONE"]
return result
# -----------------------------------------------------------------------------
def customise_member_membership_controller(**attr):
s3db = current.s3db
tablename = "member_membership"
# Default Filter
from s3 import s3_set_default_filter
s3_set_default_filter("~.organisation_id",
user_org_and_children_default_filter,
tablename = tablename)
# Special cases for different NS
root_org = current.auth.root_org_name()
nrcs = vnrc = False
if root_org == NRCS:
nrcs = True
s3db.member_membership.membership_paid.label = T("Membership Approved")
elif root_org == VNRC:
vnrc = True
# Remove link to download Template
attr["csv_template"] = "hide"
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
# Organisation needs to be an NS/Branch
ns_only(tablename,
required = True,
branches = True,
limit_filter_opts = True,
)
# Set the NS filter as Visible so that the default filter works
filter_widgets = s3db.get_config(tablename, "filter_widgets")
for widget in filter_widgets:
if widget.field == "organisation_id":
widget.opts.hidden = False
break
if nrcs:
# Remove the Paid Filter (they use 'Approved' instead)
filter_widgets = r.resource.get_config("filter_widgets")
if filter_widgets:
found = False
index = 0
for filter_widget in filter_widgets:
if filter_widget.field == "paid":
found = True
break
index += 1
if found:
filter_widgets.pop(index)
elif vnrc:
# Modify the Paid Filter
table = r.table
from gluon import Field
table["paid"] = Field.Method("paid", member_membership_paid)
filter_options = {T("paid"): T("paid"),
T("unpaid"): T("unpaid"),
T("exempted"): T("exempted"),
}
filter_widgets = r.resource.get_config("filter_widgets")
if filter_widgets:
for filter_widget in filter_widgets:
if filter_widget.field == "paid":
filter_widget.opts.options = filter_options
break
return result
s3.prep = custom_prep
return attr
settings.customise_member_membership_controller = customise_member_membership_controller
# -----------------------------------------------------------------------------
def customise_member_membership_type_controller(**attr):
# Organisation needs to be an NS/Branch
ns_only("member_membership_type",
required = False,
branches = False,
)
return attr
settings.customise_member_membership_type_controller = customise_member_membership_type_controller
# -----------------------------------------------------------------------------
def customise_org_capacity_assessment_controller(**attr):
# Organisation needs to be an NS/Branch
user = current.auth.user
organisation_id = user.organisation_id if user else None
if organisation_id:
from s3 import IS_ONE_OF
db = current.db
s3db = current.s3db
otable = s3db.org_organisation
rows = db(otable.root_organisation == organisation_id).select(otable.id)
filter_opts = [row.id for row in rows if row.id != organisation_id]
f = s3db.org_capacity_assessment.organisation_id
f.label = T("Branch")
f.widget = None
f.requires = IS_ONE_OF(db, "org_organisation.id",
s3db.org_OrganisationRepresent(parent=False, acronym=False),
filterby = "id",
filter_opts = filter_opts,
orderby = "org_organisation.name",
sort = True)
else:
ns_only("org_capacity_assessment",
required = True,
branches = True,
)
return attr
settings.customise_org_capacity_assessment_controller = customise_org_capacity_assessment_controller
# -----------------------------------------------------------------------------
def customise_org_office_resource(r, tablename):
# Organisation needs to be an NS/Branch
ns_only("org_office",
required = True,
branches = True,
limit_filter_opts = True,
)
# Special cases for different NS
root_org = current.auth.root_org_name()
if root_org == IRCS:
table = current.s3db.org_office
table.code.readable = table.code.writable = False
table.office_type_id.readable = table.office_type_id.writable = False
elif root_org == VNRC:
# Limit office type dropdown to just the VNRC options, not the global ones as well
field = current.s3db.org_office.office_type_id
from gluon import IS_EMPTY_OR
from s3 import IS_ONE_OF
field.requires = IS_EMPTY_OR(
IS_ONE_OF(current.db, "org_office_type.id",
field.represent,
filterby="organisation_id",
filter_opts=(current.auth.root_org(),)
))
settings.customise_org_office_resource = customise_org_office_resource
# -----------------------------------------------------------------------------
def customise_org_organisation_controller(**attr):
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
if r.interactive or r.representation == "aadata":
if not r.component or r.component_name == "branch":
resource = r.resource
type_label = T("Type")
if r.get_vars.get("caller") == "org_facility_organisation_id":
# Simplify
from s3 import S3SQLCustomForm
crud_form = S3SQLCustomForm("name",
"acronym",
"phone",
"comments",
)
resource.configure(crud_form=crud_form,
)
elif r.controller == "po":
# Referral Agencies in PO module
list_fields = ("name",
"acronym",
"organisation_organisation_type.organisation_type_id",
"website",
)
resource.configure(list_fields=list_fields)
# Default country
root_org = current.auth.root_org_name()
if root_org == NZRC:
resource.table.country.default = "NZ"
# Custom CRUD form
if r.interactive:
from s3 import S3SQLCustomForm, S3SQLInlineLink, S3SQLInlineComponent
# Filter inline address for type "office address", also sets default
OFFICE = {"field": "type", "options": 3}
crud_form = S3SQLCustomForm(
"name",
"acronym",
S3SQLInlineLink("organisation_type",
field = "organisation_type_id",
label = type_label,
multiple = False,
),
S3SQLInlineComponent("address",
fields = [("", "location_id")],
multiple = False,
filterby = (OFFICE,),
),
"phone",
"website",
"logo",
"comments",
)
# Remove unwanted filters
# @todo: add a location filter for office address
unwanted_filters = ("sector_organisation.sector_id",
"country",
)
filter_widgets = [widget
for widget in resource.get_config("filter_widgets")
if widget.field not in unwanted_filters]
resource.configure(crud_form = crud_form,
filter_widgets = filter_widgets,
)
else:
# Organisations in org module
list_fields = ["name",
"acronym",
"organisation_organisation_type.organisation_type_id",
"country",
"website",
]
type_filter = r.get_vars.get("organisation_type.name", None)
if type_filter:
type_names = type_filter.split(",")
if len(type_names) == 1:
# Strip Type from list_fields
list_fields.remove("organisation_organisation_type.organisation_type_id")
type_label = ""
if type_filter == "Red Cross / Red Crescent":
# Modify filter_widgets
filter_widgets = resource.get_config("filter_widgets")
# Remove type (always 'RC')
filter_widgets.pop(1)
# Modify CRUD Strings
s3.crud_strings.org_organisation = Storage(
label_create = T("Create National Society"),
title_display = T("National Society Details"),
title_list = T("Red Cross & Red Crescent National Societies"),
title_update = T("Edit National Society"),
title_upload = T("Import Red Cross & Red Crescent National Societies"),
label_list_button = T("List Red Cross & Red Crescent National Societies"),
label_delete_button = T("Delete National Society"),
msg_record_created = T("National Society added"),
msg_record_modified = T("National Society updated"),
msg_record_deleted = T("National Society deleted"),
msg_list_empty = T("No Red Cross & Red Crescent National Societies currently registered")
)
# Add Region to list_fields
list_fields.insert(-1, "region_id")
# Region is required
r.table.region_id.requires = r.table.region_id.requires.other
else:
r.table.region_id.readable = r.table.region_id.writable = False
resource.configure(list_fields=list_fields)
if r.interactive:
r.table.country.label = T("Country")
from s3 import S3SQLCustomForm, S3SQLInlineLink
crud_form = S3SQLCustomForm(
"name",
"acronym",
S3SQLInlineLink("organisation_type",
field = "organisation_type_id",
label = type_label,
multiple = False,
#widget = "hierarchy",
),
"region_id",
"country",
"phone",
"website",
"logo",
"comments",
)
resource.configure(crud_form=crud_form)
return result
s3.prep = custom_prep
return attr
settings.customise_org_organisation_controller = customise_org_organisation_controller
# -----------------------------------------------------------------------------
def customise_pr_contact_resource(r, tablename):
# Special cases for different NS
root_org = current.auth.root_org_name()
if root_org == VNRC:
table = current.s3db.pr_contact
# Hard to translate in Vietnamese
table.value.label = ""
# Restrict options to just those wanted by VNRC
from gluon import IS_IN_SET
table.contact_method.requires = IS_IN_SET({"EMAIL": T("Email"),
"HOME_PHONE": T("Home Phone"),
"SMS": T("Mobile Phone"),
"WORK_PHONE": T("Work phone"),
},
zero=None)
settings.customise_pr_contact_resource = customise_pr_contact_resource
# -----------------------------------------------------------------------------
def customise_pr_contact_emergency_resource(r, tablename):
# Special cases for different NS
root_org = current.auth.root_org_name()
if root_org == ARCS:
# Name isn't required
r.table.name.requires = None
elif root_org == VNRC:
address = r.table.address
address.readable = address.writable = True
settings.customise_pr_contact_emergency_resource = customise_pr_contact_emergency_resource
# -----------------------------------------------------------------------------
def customise_pr_person_availability_resource(r, tablename):
T = current.T
s3db = current.s3db
# Construct slot options
# NB this relies on prepopulation of date/time formulae with
# these names, as well as one pr_slot per combination:
dow = ("Mondays",
"Tuesdays",
"Wednesdays",
"Thursdays",
"Fridays",
"Saturdays",
"Sundays",
)
tod = ("Morning", "Afternoon", "Evening")
stable = s3db.pr_slot
dtable = s3db.pr_date_formula
ttable = s3db.pr_time_formula
join = [dtable.on((dtable.id == stable.date_formula_id) &
(dtable.name.belongs(dow))),
ttable.on((ttable.id == stable.time_formula_id) &
(ttable.name.belongs(tod))),
]
dtname = str(dtable)
ttname = str(ttable)
stname = str(stable)
key = lambda row: "%s %s" % (row[dtname]["name"], row[ttname]["name"])
query = (stable.deleted != True)
slots = current.db(query).select(stable.id,
stable.name,
dtable.name,
ttable.name,
join = join
).as_dict(key=key)
opts = []
add_option = opts.append
for t in tod:
for d in dow:
slot = slots.get("%s %s" % (d, t))
if slot:
add_option((slot[stname]["id"],
T(slot[stname]["name"]),
))
# @ToDo: Make prettier
# - reduce labels to just Rows/Columns
from s3 import S3SQLCustomForm, S3SQLInlineLink
from gluon.validators import IS_IN_SET
crud_form = S3SQLCustomForm(
"options",
S3SQLInlineLink("slot",
cols = len(tod),
field = "slot_id",
label = T("Available on"),
requires = IS_IN_SET(opts,
sort = False,
zero = None,
),
sort = False,
),
"comments",
)
s3db.configure("pr_person_availability",
crud_form = crud_form,
)
settings.customise_pr_person_availability_resource = customise_pr_person_availability_resource
# -----------------------------------------------------------------------------
def customise_pr_group_controller(**attr):
# Organisation needs to be an NS/Branch
ns_only("org_organisation_team",
required = False,
branches = True,
hierarchy = False,
)
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
if r.component_name == "group_membership":
# Special cases for different NS
root_org = current.auth.root_org_name()
if root_org == VNRC:
# Update the represent as already set
s3db = current.s3db
s3db.pr_group_membership.person_id.represent = s3db.pr_PersonRepresent()
return result
s3.prep = custom_prep
return attr
settings.customise_pr_group_controller = customise_pr_group_controller
# =============================================================================
def vol_programme_active(person_id):
"""
Whether a Volunteer counts as 'Active' based on the number of hours
they've done (both Trainings & Programmes) per month, averaged over
the last year.
If nothing recorded for the last 3 months, don't penalise as assume
that data entry hasn't yet been done.
@ToDo: This should be based on the HRM record, not Person record
- could be active with Org1 but not with Org2
@ToDo: allow to be calculated differently per-Org
"""
now = current.request.utcnow
# Time spent on Programme work
htable = current.s3db.hrm_programme_hours
query = (htable.deleted == False) & \
(htable.person_id == person_id) & \
(htable.date != None)
programmes = current.db(query).select(htable.hours,
htable.date,
orderby=htable.date)
if programmes:
# Ignore up to 3 months of records
three_months_prior = (now - datetime.timedelta(days=92))
end = max(programmes.last().date, three_months_prior.date())
last_year = end - datetime.timedelta(days=365)
# Is this the Volunteer's first year?
if programmes.first().date > last_year:
# Only start counting from their first month
start = programmes.first().date
else:
# Start from a year before the latest record
start = last_year
# Total hours between start and end
programme_hours = 0
for programme in programmes:
if programme.date >= start and programme.date <= end and programme.hours:
programme_hours += programme.hours
# Average hours per month
months = max(1, (end - start).days / 30.5)
average = programme_hours / months
# Active?
if average >= 8:
return True
return False
# =============================================================================
def vol_activity_active(person_id):
"""
Whether a Volunteer counts as 'Active' based on the number of hours
they've done on Volunteer Activities (inc Trainings, but not Project Activities)
in the last month.
"""
from dateutil.relativedelta import relativedelta
now = current.request.utcnow
# Time spent on Volunteer Activities in the last month
htable = current.s3db.vol_activity_hours
query = (htable.deleted == False) & \
(htable.person_id == person_id) & \
(htable.date >= (now - relativedelta(months=1)))
activities = current.db(query).select(htable.hours,
)
if activities:
# Total hours between start and end
hours = 0
for activity in activities:
if activity.hours:
hours += activity.hours
# Active?
if hours >= 4:
return True
return False
# -----------------------------------------------------------------------------
def vnrc_cv_form(r):
from s3 import S3FixedOptionsWidget, S3SQLCustomForm
T = current.T
ptewidget = S3FixedOptionsWidget(("Primary",
"Intermediate",
"Advanced",
"Bachelor",
),
translate = True,
sort = False,
)
smewidget = S3FixedOptionsWidget(("Officer",
"Principal Officer",
"Senior Officer",
),
translate = True,
sort = False,
)
crud_form = S3SQLCustomForm((T("Political Theory Education"),
"pte.value",
ptewidget,
),
(T("State Management Education"),
"sme.value",
smewidget,
)
)
current.s3db.configure("pr_person", crud_form=crud_form)
return dict(label = T("Other Education"),
type = "form",
tablename = "pr_person",
context = ("id", "id"),
)
# -----------------------------------------------------------------------------
def customise_pr_person_controller(**attr):
s3db = current.s3db
# Special cases for different NS
arcs = False
vnrc = False
root_org = current.auth.root_org_name()
if root_org == CRMADA:
table = s3db.pr_person
table.initials.readable = table.initials.writable = False
table.local_name.readable = table.local_name.writable = False
table.preferred_name.readable = table.preferred_name.writable = False
dtable = s3db.pr_person_details
dtable.religion.readable = dtable.religion.writable = False
dtable.nationality.default = "MG"
# Simplify UI: Just have 1 Address
s3db.add_components("pr_person",
pr_address = {"joinby": "pe_id",
"multiple": False,
},
)
elif root_org == IRCS:
settings.hrm.activity_types = None
settings.hrm.use_id = False
table = s3db.pr_person
table.initials.readable = table.initials.writable = False
table.preferred_name.readable = table.preferred_name.writable = False
elif root_org == PMI:
settings.hrm.staff_experience = "experience"
settings.hrm.vol_active_tooltip = "A volunteer is defined as active if they've participated in an average of 8 or more hours of Program work or Trainings per month in the last year"
elif root_org in (CVTL, PRC):
settings.hrm.vol_active_tooltip = "A volunteer is defined as active if they've participated in an average of 8 or more hours of Program work or Trainings per month in the last year"
if root_org == CVTL:
settings.member.cv_tab = True
elif root_org == VNRC:
# Custom components
add_components = s3db.add_components
PTE_TAG = "PoliticalTheoryEducation"
SME_TAG = "StateManagementEducation"
add_components("pr_person",
pr_identity = {"name": "idcard",
"joinby": "person_id",
"filterby": "type",
"filterfor": (2,),
"multiple": False,
},
pr_person_tag = ({"name": "pte",
"joinby": "person_id",
"filterby": "tag",
"filterfor": (PTE_TAG,),
"multiple": False,
"defaults": {"tag": PTE_TAG,
},
},
{"name": "sme",
"joinby": "person_id",
"filterby": "tag",
"filterfor": (SME_TAG,),
"multiple": False,
"defaults": {"tag": SME_TAG,
},
},
),
)
add_components("hrm_human_resource",
hrm_insurance = ({"name": "social_insurance",
"joinby": "human_resource_id",
"filterby": "type",
"filterfor": "SOCIAL",
},
{"name": "health_insurance",
"joinby": "human_resource_id",
"filterby": "type",
"filterfor": "HEALTH",
}),
)
vnrc = True
# Remove 'Commune' level for Addresses
#gis = current.gis
#gis.get_location_hierarchy()
#try:
# gis.hierarchy_levels.pop("L3")
#except:
# # Must be already removed
# pass
settings.modules.pop("asset", None)
if current.request.controller == "deploy":
# Replace default title in imports:
attr["retitle"] = lambda r: {"title": T("Import Members")} \
if r.method == "import" else None
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
component_name = r.component_name
method = r.method
if component_name == "address":
if root_org == CRMADA:
ctable = r.component.table
ctable.type.readable = ctable.type.writable = False
elif component_name == "appraisal":
atable = r.component.table
atable.organisation_id.readable = atable.organisation_id.writable = False
# Organisation needs to be an NS
#ns_only("hrm_appraisal",
# required = True,
# branches = False,
# )
field = atable.supervisor_id
field.readable = field.writable = False
field = atable.job_title_id
field.comment = None
field.label = T("Sector") # RDRT-specific
from s3 import IS_ONE_OF
field.requires = IS_ONE_OF(current.db, "hrm_job_title.id",
field.represent,
filterby = "type",
filter_opts = (4,),
)
elif component_name == "experience":
if root_org == IRCS:
ctable = r.component.table
ctable.hours.readable = ctable.hours.writable = False
ctable.job_title_id.readable = ctable.job_title_id.writable = False
elif component_name == "physical_description":
from gluon import DIV
ctable = r.component.table
if root_org in (CRMADA, IRCS):
ctable.ethnicity.readable = ctable.ethnicity.writable = False
ctable.medical_conditions.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Medical Conditions"),
T("Chronic Illness, Disabilities, Mental/Psychological Condition etc.")))
elif component_name == "identity":
if root_org == CRMADA:
controller = r.controller
table = r.component.table
# Set default to National ID Card
table.type.default = 2
# Relabel
table.valid_from.label = T("Date of Delivery")
field = table.place
field.label = T("Place of Delivery")
field.readable = field.writable = True
# Hide unneeded fields
# @ToDo: Do this dynamically in JS based on Type
hide_fields = ("description", "valid_until", "country_code", "ia_name")
for fname in hide_fields:
field = table[fname]
field.readable = field.writable = False
list_fields = s3db.get_config("pr_identity", "list_fields")
hide_fields = set(hide_fields)
list_fields = (fs for fs in list_fields if fs not in hide_fields)
s3db.configure("pr_identity", list_fields = list_fields)
elif method == "cv" or component_name == "education":
if vnrc:
etable = s3db.pr_education
# Don't enable Legacy Freetext field
# Hide the 'Name of Award' field
field = etable.award
field.readable = field.writable = False
# Limit education-level dropdown to the 3 specific options initially uploaded
# @ToDo: Make this use the VNRC data in the table instead (shouldn't hardcode dynamic options here)
# Although then these are different options which makes cross-Org reporting harder...hmmm..anyway these need an l10n which is hardcoded.
field = s3db.pr_education.level_id
levels = ("High School",
"University / College",
"Post Graduate",
)
from gluon import IS_EMPTY_OR
from s3 import IS_ONE_OF
field.requires = IS_EMPTY_OR(
IS_ONE_OF(current.db, "pr_education_level.id",
field.represent,
filterby = "name",
filter_opts = levels,
))
# Disallow adding of new education levels
field.comment = None
elif arcs:
# Don't enable Legacy Freetext field
# Only Highest-level of Education is captured
s3db.pr_education.level_id.label = T("Education Level")
else:
# Enable Legacy Freetext field
field = s3db.pr_education.level
field.readable = field.writable = True
field.label = T("Other Level")
field.comment = T("Use main dropdown whenever possible")
elif method =="record" or component_name == "human_resource":
# Organisation needs to be an NS/Branch
ns_only("hrm_human_resource",
required = True,
branches = True,
)
if method == "record":
if r.controller == "vol" and root_org == NRCS:
from s3 import S3SQLCustomForm
crud_form = S3SQLCustomForm("organisation_id",
"details.volunteer_type",
"job_title_id",
"start_date",
"end_date",
"status",
"comments",
)
s3db.configure("hrm_human_resource",
crud_form = crud_form,
)
else:
# Use default form (legacy)
s3db.clear_config("hrm_human_resource", "crud_form")
if arcs:
controller = r.controller
if controller == "vol" and not r.component:
# Hide unwanted fields
table = r.resource.table
for field in ("initials", "preferred_name", "local_name"):
table[field].writable = table[field].readable = False
table = s3db.pr_person_details
for field in ("religion",):
table[field].writable = table[field].readable = False
elif r.component_name == "physical_description":
# Hide unwanted fields
field = r.component.table.ethnicity
field.readable = field.writable = False
elif vnrc:
controller = r.controller
if not r.component:
crud_fields = ["first_name",
"middle_name",
"last_name",
"date_of_birth",
"gender",
"person_details.marital_status",
"person_details.nationality",
]
from gluon import IS_EMPTY_OR, IS_IN_SET
from s3 import IS_ONE_OF
db = current.db
dtable = s3db.pr_person_details
# Context-dependent form fields
if controller in ("pr", "hrm", "vol"):
# Provinces of Viet Nam
ltable = s3db.gis_location
ptable = ltable.with_alias("gis_parent_location")
dbset = db((ltable.level == "L1") & \
(ptable.name == "Viet Nam"))
left = ptable.on(ltable.parent == ptable.id)
vn_provinces = IS_EMPTY_OR(IS_ONE_OF(dbset, "gis_location.name",
"%(name)s",
left=left,
))
# Place Of Birth
field = dtable.place_of_birth
field.readable = field.writable = True
field.requires = vn_provinces
# Home Town
field = dtable.hometown
field.readable = field.writable = True
field.requires = vn_provinces
# Use a free-text version of religion field
# @todo: make religion a drop-down list of options
field = dtable.religion_other
field.label = T("Religion")
field.readable = field.writable = True
crud_fields.extend(["person_details.place_of_birth",
"person_details.hometown",
"person_details.religion_other",
"person_details.mother_name",
"person_details.father_name",
"person_details.affiliations",
])
else:
# ID Card Number inline
from s3 import S3SQLInlineComponent
idcard_number = S3SQLInlineComponent("idcard",
label = T("ID Card Number"),
fields = (("", "value"),),
default = {"type": 2,
},
multiple = False,
)
# @todo: make ethnicity a drop-down list of options
crud_fields.extend(["physical_description.ethnicity",
idcard_number,
])
# Standard option for nationality
field = dtable.nationality
VN = "VN"
field.default = VN
vnrc_only = False
try:
options = dict(field.requires.options())
except AttributeError:
pass
else:
opts = [VN]
if r.record:
# Get the nationality from the current record
query = (r.table.id == r.id)
left = dtable.on(dtable.person_id == r.id)
row = db(query).select(dtable.nationality,
left = left,
limitby = (0, 1)).first()
if row and row.nationality:
opts.append(row.nationality)
# Check wether this person is only VNRC-associated
htable = s3db.hrm_human_resource
otable = s3db.org_organisation
query = (htable.person_id == r.id) & \
(htable.deleted != True) & \
(otable.id == htable.organisation_id) & \
(otable.name != VNRC)
row = db(query).select(htable.id, limitby=(0, 1)).first()
if not row:
vnrc_only = True
opts = dict((k, options[k]) for k in opts if k in options)
if vnrc_only:
# Person is only associated with VNRC => enforce update,
# and limit options to either current value or VN
field.requires = IS_IN_SET(opts, zero=None)
else:
# Person is (also) associated with another org
# => can't enforce update, so just limit options
field.requires = IS_EMPTY_OR(IS_IN_SET(opts))
# Also hide some other fields
crud_fields.append("comments")
from s3 import S3SQLCustomForm
s3db.configure("pr_person",
crud_form = S3SQLCustomForm(*crud_fields),
)
if method == "record" or component_name == "human_resource":
# Hide unwanted fields in human_resource
htable = s3db.hrm_human_resource
for fname in ["job_title_id",
"code",
"essential",
"site_contact",
"start_date",
"end_date",
]:
field = htable[fname]
field.readable = field.writable = False
if method == "record" and controller == "hrm":
# Custom config for method handler
from s3 import FS
# RC employment history
org_type_name = "organisation_id$organisation_organisation_type.organisation_type_id$name"
widget_filter = (FS(org_type_name) == "Red Cross / Red Crescent") & \
(FS("organisation") == None)
org_experience = {"label": T("Red Cross Employment History"),
"label_create": T("Add Employment"),
"list_fields": ["start_date",
"end_date",
"organisation",
"department_id",
"job_title",
"employment_type",
],
"filter": widget_filter,
}
# Non-RC employment history
widget_filter = FS("organisation") != None
other_experience = {"label": T("Other Employments"),
"label_create": T("Add Employment"),
"list_fields": ["start_date",
"end_date",
"organisation",
"job_title",
],
"filter": widget_filter,
}
s3db.set_method("pr", "person",
method = "record",
action = s3db.hrm_Record(salary=True,
awards=True,
disciplinary_record=True,
org_experience=org_experience,
other_experience=other_experience,
))
# Custom list_fields for hrm_salary (exclude monthly amount)
stable = s3db.hrm_salary
stable.salary_grade_id.label = T("Grade Code")
s3db.configure("hrm_salary",
list_fields = ["staff_level_id",
"salary_grade_id",
"start_date",
"end_date",
],
)
# Custom list_fields for hrm_award
s3db.configure("hrm_award",
list_fields = ["date",
"awarding_body",
"award_type_id",
],
orderby = "hrm_award.date desc"
)
# Custom list_fields for hrm_disciplinary_action
s3db.configure("hrm_disciplinary_action",
list_fields = ["date",
"disciplinary_body",
"disciplinary_type_id",
],
orderby = "hrm_disciplinary_action.date desc"
)
# Custom form for hrm_human_resource
from s3 import S3SQLCustomForm, S3SQLInlineComponent
crud_fields = ["organisation_id",
"site_id",
"department_id",
"status",
S3SQLInlineComponent("contract",
label=T("Contract Details"),
fields=["term",
(T("Hours Model"), "hours"),
],
multiple=False,
),
S3SQLInlineComponent("social_insurance",
label=T("Social Insurance"),
name="social",
fields=["insurance_number",
"insurer",
],
default={"type": "SOCIAL"},
multiple=False,
),
S3SQLInlineComponent("health_insurance",
label=T("Health Insurance"),
name="health",
fields=["insurance_number",
"provider",
],
default={"type": "HEALTH"},
multiple=False,
),
"comments",
]
s3db.configure("hrm_human_resource",
crud_form = S3SQLCustomForm(*crud_fields),
)
elif component_name == "address":
settings.gis.building_name = False
settings.gis.latlon_selector = False
settings.gis.map_selector = False
elif method == "contacts":
table = s3db.pr_contact_emergency
table.address.readable = table.address.writable = True
elif component_name == "identity":
controller = r.controller
table = r.component.table
# Limit options for identity document type
pr_id_type_opts = {1: T("Passport"),
2: T("National ID Card"),
}
from gluon.validators import IS_IN_SET
table.type.requires = IS_IN_SET(pr_id_type_opts, zero=None)
if controller == "hrm":
# For staff, set default for ID document type and do not
# allow selection of other options
table.type.default = 2
table.type.writable = False
hide_fields = ("description", "valid_until", "country_code", "ia_name")
else:
hide_fields = ("description",)
# Hide unneeded fields
for fname in hide_fields:
field = table[fname]
field.readable = field.writable = False
list_fields = s3db.get_config("pr_identity", "list_fields")
hide_fields = set(hide_fields)
list_fields = (fs for fs in list_fields if fs not in hide_fields)
s3db.configure("pr_identity", list_fields = list_fields)
elif component_name == "physical_description" or \
method == "import":
# Add the less-specific blood types (as that's all the data currently available in some cases)
field = s3db.pr_physical_description.blood_type
from gluon.validators import IS_EMPTY_OR, IS_IN_SET
blood_type_opts = ("A+", "A-", "B+", "B-", "AB+", "AB-", "O+", "O-", "A", "B", "AB", "O")
field.requires = IS_EMPTY_OR(IS_IN_SET(blood_type_opts))
elif method == "cv" or component_name == "experience":
table = s3db.hrm_experience
# Use simple free-text variants
table.organisation_id.default = None # should not default in this case
table.organisation.readable = True
table.organisation.writable = True
table.job_title.readable = True
table.job_title.writable = True
table.comments.label = T("Main Duties")
from s3 import S3SQLCustomForm
crud_form = S3SQLCustomForm("organisation",
"job_title",
"comments",
"start_date",
"end_date",
)
s3db.configure("hrm_experience",
crud_form = crud_form,
list_fields = ["organisation",
"job_title",
"comments",
"start_date",
"end_date",
],
)
if method == "cv":
# Customize CV
s3db.set_method("pr", "person",
method = "cv",
action = s3db.hrm_CV(form=vnrc_cv_form))
elif component_name == "salary":
stable = s3db.hrm_salary
stable.salary_grade_id.label = T("Grade Code")
field = stable.monthly_amount
field.readable = field.writable = False
elif component_name == "competency":
ctable = s3db.hrm_competency
# Hide confirming organisation (defaults to VNRC)
ctable.organisation_id.readable = False
elif component_name == "membership":
field = s3db.member_membership.fee_exemption
field.readable = field.writable = True
PROGRAMMES = T("Programs")
from s3 import S3SQLCustomForm, S3SQLInlineLink
crud_form = S3SQLCustomForm("organisation_id",
"code",
"membership_type_id",
"start_date",
"end_date",
"membership_fee",
"membership_paid",
"fee_exemption",
S3SQLInlineLink("programme",
field="programme_id",
label=PROGRAMMES,
),
)
list_fields = ["organisation_id",
"membership_type_id",
"start_date",
(T("Paid"), "paid"),
(T("Email"), "email.value"),
(T("Phone"), "phone.value"),
(PROGRAMMES, "membership_programme.programme_id"),
]
s3db.configure("member_membership",
crud_form = crud_form,
list_fields = list_fields,
)
return True
s3.prep = custom_prep
attr["rheader"] = lambda r, vnrc=vnrc: pr_rheader(r, vnrc)
if vnrc:
# Link to customised download Template
#attr["csv_template"] = ("../../themes/IFRC/formats", "volunteer_vnrc")
# Remove link to download Template
attr["csv_template"] = "hide"
return attr
settings.customise_pr_person_controller = customise_pr_person_controller
# -----------------------------------------------------------------------------
def pr_rheader(r, vnrc):
"""
Custom rheader for vol/person for vnrc
"""
controller = current.request.controller
if vnrc :
if controller == "vol":
# Simplify RHeader
settings.hrm.vol_experience = None
if controller == "member":
return current.s3db.member_rheader(r)
else:
s3db = current.s3db
s3db.hrm_vars()
return s3db.hrm_rheader(r)
# -----------------------------------------------------------------------------
def customise_survey_series_controller(**attr):
# Organisation needs to be an NS/Branch
ns_only("survey_series",
required = False,
branches = True,
)
return attr
settings.customise_survey_series_controller = customise_survey_series_controller
# -------------------------------------------------------------------------
def household_inject_form_script(r, record):
"""
Inject JS for progressive revelation of household form,
to be called from prep
@param r: the S3Request
@param record: the household record
"""
if r.interactive:
s3 = current.response.s3
script = "/%s/static/themes/IFRC/js/po.js" % current.request.application
if script not in s3.scripts:
s3.scripts.append(script)
if record and record.followup:
s3.jquery_ready.append('''$.showHouseholdComponents(true)''');
return
# -------------------------------------------------------------------------
def customise_po_household_controller(**attr):
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
# Do not require international phone number format
settings = current.deployment_settings
settings.msg.require_international_phone_numbers = False
# Inject JS for household form
household_inject_form_script(r, r.record)
# Geocode imported household addresses
if r.method == "import" and "job" in r.post_vars:
settings.gis.geocode_imported_addresses = True
settings.gis.ignore_geocode_errors = True
return result
s3.prep = custom_prep
return attr
settings.customise_po_household_controller = customise_po_household_controller
# -------------------------------------------------------------------------
def customise_po_area_controller(**attr):
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
# Do not require international phone number format
settings = current.deployment_settings
settings.msg.require_international_phone_numbers = False
if r.component_name == "household":
# Inject JS for household form
record = None
if r.component_id:
records = r.component.load()
if records:
record = records[0]
household_inject_form_script(r, record)
return result
s3.prep = custom_prep
return attr
settings.customise_po_area_controller = customise_po_area_controller
# -------------------------------------------------------------------------
def project_project_postprocess(form):
"""
When using Budget Monitoring (i.e. CRMADA) then create the entries
"""
db = current.db
s3db = current.s3db
project_id = form.vars.id
# Read Budget Entity ID, Start Date and End Date
ptable = s3db.project_project
project = db(ptable.id == project_id).select(ptable.budget_entity_id,
ptable.name,
ptable.start_date,
ptable.end_date,
limitby=(0, 1)
).first()
if not project:
return
# Copy Project Name to Budget Name
budget_entity_id = project.budget_entity_id
btable = s3db.budget_budget
query = (btable.budget_entity_id == budget_entity_id)
budget = db(query).select(btable.id, # Needed for update_record
# If we want to provide smoothed default expected values
#btable.total_budget,
btable.currency,
# Assume Monthly
#btable.monitoring_frequency,
limitby=(0, 1)
).first()
if not budget:
return
try:
budget.update_record(name = project.name)
except:
# unique=True violation
budget.update_record(name = "Budget for %s" % project.name)
mtable = s3db.budget_monitoring
exists = db(mtable.budget_entity_id == budget_entity_id).select(mtable.id,
limitby=(0, 1))
if not exists:
# Create Monitoring Data entries
start_date = project.start_date
end_date = project.end_date
if not start_date or not end_date:
return
# Assume Monthly
#monitoring_frequency = budget.monitoring_frequency
#if not monitoring_frequency:
# return
#total_budget = budget.total_budget
currency = budget.currency
# Create entries for the 1st of every month between start_date and end_date
from dateutil import rrule
dates = list(rrule.rrule(rrule.MONTHLY, bymonthday=1, dtstart=start_date, until=end_date))
for d in dates:
mtable.insert(budget_entity_id = budget_entity_id,
# @ToDo: This needs to be modified whenever entries are manually edited
# Set/update this in budget_monitoring_onaccept
# - also check here that we don't exceed overall budget
start_date = start_date,
end_date = d,
currency = currency,
)
# Start date relates to previous entry
start_date = d
# -----------------------------------------------------------------------------
def customise_project_programme_controller(**attr):
# Organisation needs to be an NS/Branch
ns_only("project_programme",
required = True,
branches = False,
updateable = True,
)
return attr
settings.customise_project_programme_controller = customise_project_programme_controller
# -----------------------------------------------------------------------------
def customise_project_project_controller(**attr):
tablename = "project_project"
# Default Filter
from s3 import s3_set_default_filter
s3_set_default_filter("~.organisation_id",
user_org_default_filter,
tablename = "project_project")
# Load standard model
s3db = current.s3db
table = s3db[tablename]
# @ToDo: S3SQLInlineComponent for Project orgs
# Get IDs for PartnerNS/Partner-Donor
# db = current.db
# ttable = db.org_organisation_type
# rows = db(ttable.deleted != True).select(ttable.id,
# ttable.name,
# )
# rc = []
# not_rc = []
# nappend = not_rc.append
# for row in rows:
# if row.name == "Red Cross / Red Crescent":
# rc.append(row.id)
# elif row.name == "Supplier":
# pass
# else:
# nappend(row.id)
# Custom Fields
table.organisation_id.label = T("Host National Society")
# Custom Crud Form
from s3 import S3SQLCustomForm, S3SQLInlineComponent, S3SQLInlineLink
if settings.get_project_programmes():
# Inject inline link for programmes including AddResourceLink
#from s3layouts import S3PopupLink
comment = s3db.project_programme_id.attr.comment
comment.vars = {"caller": "link_defaultprogramme",
"prefix": "project",
"parent": "programme_project",
}
programme = S3SQLInlineLink("programme",
label = T("Program"),
field = "programme_id",
multiple = False,
comment = comment,
)
else:
programme = None
# Special cases for different NS
root_org = current.auth.root_org_name()
if root_org == CRMADA:
settings.project.details_tab = True
#settings.project.community_volunteers = True
HFA = None
# Done in a more structured way instead
objectives = None
outputs = None
settings.project.goals = True
settings.project.indicators = True
settings.project.outcomes = True
settings.project.outputs = True
# Use Budget module instead of ProjectAnnualBudget
settings.project.multiple_budgets = False
settings.project.budget_monitoring = True
# Require start/end dates
table.start_date.requires = table.start_date.requires.other
table.end_date.requires = table.end_date.requires.other
budget = S3SQLInlineComponent("budget",
label = T("Budget"),
#link = False,
multiple = False,
fields = ["total_budget",
"currency",
#"monitoring_frequency",
],
)
btable = s3db.budget_budget
# Need to provide a name
import random, string
btable.name.default = "".join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(16))
btable.monitoring_frequency.default = 3 # Monthly
postprocess = project_project_postprocess
list_fields = s3db.get_config("project_project", "list_fields")
list_fields += [(T("Monthly Status"), "current_status_by_indicators"),
(T("Cumulative Status"), "overall_status_by_indicators"),
]
else:
postprocess = None
HFA = "drr.hfa"
budget = None
objectives = "objectives"
outputs = S3SQLInlineComponent(
"output",
label = T("Outputs"),
fields = ["name", "status"],
)
crud_form = S3SQLCustomForm(
"organisation_id",
programme,
"name",
"code",
"description",
"status_id",
"start_date",
"end_date",
budget,
#S3SQLInlineComponent(
# "location",
# label = T("Locations"),
# fields = ["location_id"],
#),
# Outputs
outputs,
S3SQLInlineLink(
"hazard",
label = T("Hazards"),
field = "hazard_id",
help_field = s3db.project_hazard_help_fields,
cols = 4,
translate = True,
),
S3SQLInlineLink(
"sector",
label = T("Sectors"),
field = "sector_id",
cols = 4,
translate = True,
),
S3SQLInlineLink(
"theme",
label = T("Themes"),
field = "theme_id",
help_field = s3db.project_theme_help_fields,
cols = 4,
translate = True,
# Filter Theme by Sector
filterby = "theme_id:project_theme_sector.sector_id",
match = "sector_project.sector_id",
script = '''
$.filterOptionsS3({
'trigger':{'alias':'sector','name':'sector_id','inlineType':'link'},
'target':{'alias':'theme','name':'theme_id','inlineType':'link'},
'lookupPrefix':'project',
'lookupResource':'theme',
'lookupKey':'theme_id:project_theme_sector.sector_id',
'showEmptyField':false,
'tooltip':'project_theme_help_fields(id,name)'
})'''
),
HFA,
objectives,
"human_resource_id",
# Disabled since we need organisation_id filtering to either organisation_type_id == RC or NOT
# & also hiding Branches from RCs
# & also rewriting for organisation_type_id via link table
# Partner NS
# S3SQLInlineComponent(
# "organisation",
# name = "partnerns",
# label = T("Partner National Societies"),
# fields = ["organisation_id",
# "comments",
# ],
# Filter Organisation by Type
# filter = ["organisation_id": {"filterby": "organisation_type_id",
# "filterfor": rc,
# }],
# filterby = dict(field = "role",
# options = [9])
# ),
# Partner Orgs
# S3SQLInlineComponent(
# "organisation",
# name = "partner",
# label = T("Partner Organizations"),
# fields = ["organisation_id",
# "comments",
# ],
# Filter Organisation by Type
# filter = ["organisation_id": {"filterby": "organisation_type_id",
# "filterfor": not_rc,
# }],
# filterby = dict(field = "role",
# options = [2])
# ),
# Donors
# S3SQLInlineComponent(
# "organisation",
# name = "donor",
# label = T("Donor(s)"),
# fields = ["organisation_id",
# "amount",
# "currency"],
# Filter Organisation by Type
# filter = ["organisation_id": {"filterby": "organisation_type_id",
# "filterfor": not_rc,
# }],
# filterby = dict(field = "role",
# options = [3])
# ),
#"budget",
#"currency",
"comments",
postprocess = postprocess,
)
s3db.configure(tablename,
crud_form = crud_form,
)
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
if r.component:
if r.component_name == "organisation":
component_id = r.component_id
if component_id:
# No r.component.record :/
ctable = s3db.project_organisation
crecord = current.db(ctable.id == component_id).select(ctable.role,
limitby=(0, 1)
).first()
if crecord.role == settings.get_project_organisation_lead_role():
ns_only("project_organisation",
required = True,
branches = False,
updateable = True,
)
else:
# Lead Organisation needs to be an NS (not a branch)
ns_only(tablename,
required = True,
branches = False,
limit_filter_opts = True,
)
# Set the Host NS filter as Visible so that the default filter works
filter_widgets = s3db.get_config(tablename, "filter_widgets")
for widget in filter_widgets:
if widget.field == "organisation_id":
widget.opts.hidden = False
break
return result
s3.prep = custom_prep
return attr
settings.customise_project_project_controller = customise_project_project_controller
# -----------------------------------------------------------------------------
def customise_project_beneficiary_resource(r, tablename):
"""
Link Project Beneficiaries to Activity Type
"""
if r.interactive and r.component:
if r.tablename == "project_project":
# We are a component of the Project
project_id = r.id
elif r.tablename == "project_location":
# We are a component of the Project Location
project_id = r.record.project_id
else:
# Unknown!
return
db = current.db
s3db = current.s3db
# Filter Activity Type by Sector
ltable = s3db.project_sector_project
rows = db(ltable.project_id == project_id).select(ltable.sector_id)
sectors = [row.sector_id for row in rows]
ltable = s3db.project_activity_type_sector
rows = db(ltable.sector_id.belongs(sectors)).select(ltable.activity_type_id)
filteropts = [row.activity_type_id for row in rows]
def postprocess(form):
# Update project_location.activity_type
beneficiary_id = form.vars.get("id", None)
table = db.project_beneficiary
row = db(table.id == beneficiary_id).select(table.project_location_id,
limitby = (0, 1)
).first()
if not row:
return
project_location_id = row.project_location_id
if not project_location_id:
return
ltable = db.project_beneficiary_activity_type
row = db(ltable.beneficiary_id == beneficiary_id).select(ltable.activity_type_id,
limitby = (0, 1)
).first()
if not row:
return
activity_type_id = row.activity_type_id
ltable = s3db.project_activity_type_location
query = (ltable.project_location_id == project_location_id) & \
(ltable.activity_type_id == activity_type_id)
exists = db(query).select(ltable.id,
limitby = (0, 1)
).first()
if not exists:
ltable.insert(project_location_id = project_location_id,
activity_type_id = activity_type_id,
)
from s3 import S3SQLCustomForm, S3SQLInlineLink
crud_form = S3SQLCustomForm(#"project_id",
"project_location_id",
S3SQLInlineLink("activity_type",
field = "activity_type_id",
filterby = "id",
options = filteropts,
label = T("Activity Type"),
multiple = False,
),
"parameter_id",
"value",
"target_value",
"date",
"end_date",
"comments",
postprocess = postprocess,
)
s3db.configure(tablename,
crud_form = crud_form,
)
elif not r.component:
# Report
from s3 import S3OptionsFilter
resource = r.resource
filter_widgets = resource.get_config("filter_widgets")
filter_widgets.insert(1,
S3OptionsFilter("beneficiary_activity_type.activity_type_id",
label = T("Activity Type"),
))
report_options = resource.get_config("report_options")
report_options.rows.append("beneficiary_activity_type.activity_type_id")
# Same object so would be added twice
#report_options.cols.append("beneficiary_activity_type.activity_type_id")
resource.configure(filter_widgets = filter_widgets,
report_options = report_options,
)
settings.customise_project_beneficiary_resource = customise_project_beneficiary_resource
# -----------------------------------------------------------------------------
def customise_project_location_resource(r, tablename):
from s3 import S3LocationSelector, S3SQLCustomForm, S3SQLInlineComponentCheckbox
s3db = current.s3db
s3db.project_location.location_id.widget = \
S3LocationSelector(show_postcode = False,
show_latlon = False,
show_map = False,
)
crud_form = S3SQLCustomForm(
"project_id",
"location_id",
# @ToDo: Grouped Checkboxes
S3SQLInlineComponentCheckbox(
"activity_type",
label = T("Activity Types"),
field = "activity_type_id",
cols = 3,
# Filter Activity Type by Sector
filter = {"linktable": "project_activity_type_sector",
"lkey": "activity_type_id",
"rkey": "sector_id",
"lookuptable": "project_project",
"lookupkey": "project_id",
},
translate = True,
),
"comments",
)
s3db.configure(tablename,
crud_form = crud_form,
)
settings.customise_project_location_resource = customise_project_location_resource
# -----------------------------------------------------------------------------
def customise_req_commit_controller(**attr):
# Request is mandatory
field = current.s3db.req_commit.req_id
field.requires = field.requires.other
return attr
settings.customise_req_commit_controller = customise_req_commit_controller
# -----------------------------------------------------------------------------
def customise_req_req_resource(r, tablename):
s3db = current.s3db
# Request is mandatory
field = s3db.req_commit.req_id
field.requires = field.requires.other
table = s3db.req_req
table.req_ref.represent = lambda v, show_link=True, pdf=True: \
s3db.req_ref_represent(v, show_link, pdf)
table.site_id.label = T("Deliver To")
# Hide Drivers list_field
list_fields = s3db.get_config("req_req", "list_fields")
try:
list_fields.remove((T("Drivers"), "drivers"))
except:
# Already removed
pass
settings.customise_req_req_resource = customise_req_req_resource
# -----------------------------------------------------------------------------
def customise_vulnerability_data_resource(r, tablename):
# Date is required: We don't store modelled data
requires = r.table.date.requires
if hasattr(requires, "other"):
r.table.date.requires = requires.other
settings.customise_vulnerability_data_resource = customise_vulnerability_data_resource
# END =========================================================================
| mit |
glwu/python-for-android | python3-alpha/python3-src/Lib/distutils/tests/test_dist.py | 47 | 11394 | """Tests for distutils.dist."""
import os
import io
import sys
import unittest
import warnings
import textwrap
from distutils.dist import Distribution, fix_help_options
from distutils.cmd import Command
from test.support import TESTFN, captured_stdout, run_unittest
from distutils.tests import support
class test_dist(Command):
"""Sample distutils extension command."""
user_options = [
("sample-option=", "S", "help text"),
]
def initialize_options(self):
self.sample_option = None
class TestDistribution(Distribution):
"""Distribution subclasses that avoids the default search for
configuration files.
The ._config_files attribute must be set before
.parse_config_files() is called.
"""
def find_config_files(self):
return self._config_files
class DistributionTestCase(support.LoggingSilencer,
support.EnvironGuard,
unittest.TestCase):
def setUp(self):
super(DistributionTestCase, self).setUp()
self.argv = sys.argv, sys.argv[:]
del sys.argv[1:]
def tearDown(self):
sys.argv = self.argv[0]
sys.argv[:] = self.argv[1]
super(DistributionTestCase, self).tearDown()
def create_distribution(self, configfiles=()):
d = TestDistribution()
d._config_files = configfiles
d.parse_config_files()
d.parse_command_line()
return d
def test_command_packages_unspecified(self):
sys.argv.append("build")
d = self.create_distribution()
self.assertEqual(d.get_command_packages(), ["distutils.command"])
def test_command_packages_cmdline(self):
from distutils.tests.test_dist import test_dist
sys.argv.extend(["--command-packages",
"foo.bar,distutils.tests",
"test_dist",
"-Ssometext",
])
d = self.create_distribution()
# let's actually try to load our test command:
self.assertEqual(d.get_command_packages(),
["distutils.command", "foo.bar", "distutils.tests"])
cmd = d.get_command_obj("test_dist")
self.assertTrue(isinstance(cmd, test_dist))
self.assertEqual(cmd.sample_option, "sometext")
def test_command_packages_configfile(self):
sys.argv.append("build")
self.addCleanup(os.unlink, TESTFN)
f = open(TESTFN, "w")
try:
print("[global]", file=f)
print("command_packages = foo.bar, splat", file=f)
finally:
f.close()
d = self.create_distribution([TESTFN])
self.assertEqual(d.get_command_packages(),
["distutils.command", "foo.bar", "splat"])
# ensure command line overrides config:
sys.argv[1:] = ["--command-packages", "spork", "build"]
d = self.create_distribution([TESTFN])
self.assertEqual(d.get_command_packages(),
["distutils.command", "spork"])
# Setting --command-packages to '' should cause the default to
# be used even if a config file specified something else:
sys.argv[1:] = ["--command-packages", "", "build"]
d = self.create_distribution([TESTFN])
self.assertEqual(d.get_command_packages(), ["distutils.command"])
def test_empty_options(self):
# an empty options dictionary should not stay in the
# list of attributes
klass = Distribution
# catching warnings
warns = []
def _warn(msg):
warns.append(msg)
old_warn = warnings.warn
warnings.warn = _warn
try:
dist = klass(attrs={'author': 'xxx',
'name': 'xxx',
'version': 'xxx',
'url': 'xxxx',
'options': {}})
finally:
warnings.warn = old_warn
self.assertEqual(len(warns), 0)
def test_finalize_options(self):
attrs = {'keywords': 'one,two',
'platforms': 'one,two'}
dist = Distribution(attrs=attrs)
dist.finalize_options()
# finalize_option splits platforms and keywords
self.assertEqual(dist.metadata.platforms, ['one', 'two'])
self.assertEqual(dist.metadata.keywords, ['one', 'two'])
def test_get_command_packages(self):
dist = Distribution()
self.assertEqual(dist.command_packages, None)
cmds = dist.get_command_packages()
self.assertEqual(cmds, ['distutils.command'])
self.assertEqual(dist.command_packages,
['distutils.command'])
dist.command_packages = 'one,two'
cmds = dist.get_command_packages()
self.assertEqual(cmds, ['distutils.command', 'one', 'two'])
def test_announce(self):
# make sure the level is known
dist = Distribution()
args = ('ok',)
kwargs = {'level': 'ok2'}
self.assertRaises(ValueError, dist.announce, args, kwargs)
class MetadataTestCase(support.TempdirManager, support.EnvironGuard,
unittest.TestCase):
def setUp(self):
super(MetadataTestCase, self).setUp()
self.argv = sys.argv, sys.argv[:]
def tearDown(self):
sys.argv = self.argv[0]
sys.argv[:] = self.argv[1]
super(MetadataTestCase, self).tearDown()
def test_simple_metadata(self):
attrs = {"name": "package",
"version": "1.0"}
dist = Distribution(attrs)
meta = self.format_metadata(dist)
self.assertTrue("Metadata-Version: 1.0" in meta)
self.assertTrue("provides:" not in meta.lower())
self.assertTrue("requires:" not in meta.lower())
self.assertTrue("obsoletes:" not in meta.lower())
def test_provides(self):
attrs = {"name": "package",
"version": "1.0",
"provides": ["package", "package.sub"]}
dist = Distribution(attrs)
self.assertEqual(dist.metadata.get_provides(),
["package", "package.sub"])
self.assertEqual(dist.get_provides(),
["package", "package.sub"])
meta = self.format_metadata(dist)
self.assertTrue("Metadata-Version: 1.1" in meta)
self.assertTrue("requires:" not in meta.lower())
self.assertTrue("obsoletes:" not in meta.lower())
def test_provides_illegal(self):
self.assertRaises(ValueError, Distribution,
{"name": "package",
"version": "1.0",
"provides": ["my.pkg (splat)"]})
def test_requires(self):
attrs = {"name": "package",
"version": "1.0",
"requires": ["other", "another (==1.0)"]}
dist = Distribution(attrs)
self.assertEqual(dist.metadata.get_requires(),
["other", "another (==1.0)"])
self.assertEqual(dist.get_requires(),
["other", "another (==1.0)"])
meta = self.format_metadata(dist)
self.assertTrue("Metadata-Version: 1.1" in meta)
self.assertTrue("provides:" not in meta.lower())
self.assertTrue("Requires: other" in meta)
self.assertTrue("Requires: another (==1.0)" in meta)
self.assertTrue("obsoletes:" not in meta.lower())
def test_requires_illegal(self):
self.assertRaises(ValueError, Distribution,
{"name": "package",
"version": "1.0",
"requires": ["my.pkg (splat)"]})
def test_obsoletes(self):
attrs = {"name": "package",
"version": "1.0",
"obsoletes": ["other", "another (<1.0)"]}
dist = Distribution(attrs)
self.assertEqual(dist.metadata.get_obsoletes(),
["other", "another (<1.0)"])
self.assertEqual(dist.get_obsoletes(),
["other", "another (<1.0)"])
meta = self.format_metadata(dist)
self.assertTrue("Metadata-Version: 1.1" in meta)
self.assertTrue("provides:" not in meta.lower())
self.assertTrue("requires:" not in meta.lower())
self.assertTrue("Obsoletes: other" in meta)
self.assertTrue("Obsoletes: another (<1.0)" in meta)
def test_obsoletes_illegal(self):
self.assertRaises(ValueError, Distribution,
{"name": "package",
"version": "1.0",
"obsoletes": ["my.pkg (splat)"]})
def format_metadata(self, dist):
sio = io.StringIO()
dist.metadata.write_pkg_file(sio)
return sio.getvalue()
def test_custom_pydistutils(self):
# fixes #2166
# make sure pydistutils.cfg is found
if os.name == 'posix':
user_filename = ".pydistutils.cfg"
else:
user_filename = "pydistutils.cfg"
temp_dir = self.mkdtemp()
user_filename = os.path.join(temp_dir, user_filename)
f = open(user_filename, 'w')
try:
f.write('.')
finally:
f.close()
try:
dist = Distribution()
# linux-style
if sys.platform in ('linux', 'darwin'):
os.environ['HOME'] = temp_dir
files = dist.find_config_files()
self.assertTrue(user_filename in files)
# win32-style
if sys.platform == 'win32':
# home drive should be found
os.environ['HOME'] = temp_dir
files = dist.find_config_files()
self.assertTrue(user_filename in files,
'%r not found in %r' % (user_filename, files))
finally:
os.remove(user_filename)
def test_fix_help_options(self):
help_tuples = [('a', 'b', 'c', 'd'), (1, 2, 3, 4)]
fancy_options = fix_help_options(help_tuples)
self.assertEqual(fancy_options[0], ('a', 'b', 'c'))
self.assertEqual(fancy_options[1], (1, 2, 3))
def test_show_help(self):
# smoke test, just makes sure some help is displayed
dist = Distribution()
sys.argv = []
dist.help = 1
dist.script_name = 'setup.py'
with captured_stdout() as s:
dist.parse_command_line()
output = [line for line in s.getvalue().split('\n')
if line.strip() != '']
self.assertTrue(len(output) > 0)
def test_long_description(self):
long_desc = textwrap.dedent("""\
example::
We start here
and continue here
and end here.""")
attrs = {"name": "package",
"version": "1.0",
"long_description": long_desc}
dist = Distribution(attrs)
meta = self.format_metadata(dist)
meta = meta.replace('\n' + 8 * ' ', '\n')
self.assertTrue(long_desc in meta)
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(DistributionTestCase))
suite.addTest(unittest.makeSuite(MetadataTestCase))
return suite
if __name__ == "__main__":
run_unittest(test_suite())
| apache-2.0 |
longfin/sider | docs/conf.py | 2 | 8883 | # -*- coding: utf-8 -*-
#
# Sider documentation build configuration file, created by
# sphinx-quickstart on Thu Mar 15 06:45:22 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os, os.path, glob, datetime
# Whether it is built by ReadTheDocs.org
readthedocs = os.environ.get('READTHEDOCS', '') == 'True'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path[:0] = [os.path.abspath('.'), os.path.abspath('..')]
sys.path[2:2] = glob.glob(os.path.abspath(os.path.join('..', 'examples', '*')))
from siderdocs import lower_sprintf_str
from sider.version import VERSION
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.1'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx',
'sphinx.ext.todo', 'sphinx.ext.inheritance_diagram',
'sphinx.ext.extlinks']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Sider'
copyright = str(datetime.date.today().year) + u', Hong Minhee'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = VERSION
# The full version, including alpha/beta/rc tags.
release = VERSION
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
# - solarized: https://github.com/altercation/solarized
# - solarized-dark-pygments: https://github.com/gthank/solarized-dark-pygments
#pygments_style = 'solarized.SolarizedStyle'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'solarized'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {'codebgcolor': '#002B36', 'codetextcolor': '#586E75'}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'Sider ' + VERSION
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Siderdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Sider.tex', u'Sider Documentation',
u'Hong Minhee', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'sider', u'Sider Documentation', [u'Hong Minhee'], 1)
]
if readthedocs:
intersphinx_mapping = {
'python': ('http://python.readthedocs.org/en/v2.7.2/', None),
'greenlet': ('http://greenlet.readthedocs.org/en/latest/', None),
'eventlet': ('http://eventlet.net/doc/', None),
'gevent': ('http://gevent.org/', None),
'werkzeug': ('http://werkzeug.readthedocs.org/en/latest/', None),
'jinja2': ('http://jinja2.readthedocs.org/en/latest/', None)
}
else:
intersphinx_mapping = {
'python': ('http://docs.python.org/', None),
'greenlet': ('http://greenlet.readthedocs.org/en/latest/', None),
'eventlet': ('http://eventlet.net/doc/', None),
'gevent': ('http://gevent.org/', None),
'werkzeug': ('http://werkzeug.pocoo.org/docs/', None),
'jinja2': ('http://jinja.pocoo.org/docs/', None)
}
extlinks = {
'redis': (lower_sprintf_str('http://redis.io/commands/%s'), ''),
'issue': ('https://github.com/dahlia/sider/issues/%s', 'issue #'),
'commit': ('https://github.com/dahlia/sider/commit/%s', ''),
'branch': ('https://github.com/dahlia/sider/compare/master...%s', '')
}
todo_include_todos = not readthedocs
inheritance_graph_attrs = {'bgcolor': 'transparent'}
| mit |
heeraj123/oh-mainline | vendor/packages/docutils/test/test_parsers/test_rst/test_interpreted.py | 16 | 10332 | #! /usr/bin/env python
# $Id: test_interpreted.py 7514 2012-09-14 14:27:12Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Tests for interpreted text in docutils/parsers/rst/states.py.
"""
from __init__ import DocutilsTestSupport
from docutils.utils.code_analyzer import with_pygments
def suite():
s = DocutilsTestSupport.ParserTestSuite()
if not with_pygments:
del(totest['code-parsing'])
s.generateTests(totest)
return s
totest = {}
totest['basics'] = [
["""\
`interpreted`
""",
"""\
<document source="test data">
<paragraph>
<title_reference>
interpreted
"""],
["""\
:title:`interpreted`
""",
"""\
<document source="test data">
<paragraph>
<title_reference>
interpreted
"""],
["""\
`interpreted`:title:
""",
"""\
<document source="test data">
<paragraph>
<title_reference>
interpreted
"""],
["""\
`interpreted \`title``
""",
"""\
<document source="test data">
<paragraph>
<title_reference>
interpreted `title`
"""],
["""\
:title:`:not-role: interpreted`
""",
"""\
<document source="test data">
<paragraph>
<title_reference>
:not-role: interpreted
"""],
["""\
`interpreted` but not \\`interpreted` [`] or ({[`] or [`]}) or `
""",
"""\
<document source="test data">
<paragraph>
<title_reference>
interpreted
but not `interpreted` [`] or ({[`] or [`]}) or `
"""],
["""\
`interpreted`-text `interpreted`: text `interpreted`:text `text`'s interpreted
""",
"""\
<document source="test data">
<paragraph>
<title_reference>
interpreted
-text \n\
<title_reference>
interpreted
: text \n\
<title_reference>
interpreted
:text \n\
<title_reference>
text
's interpreted
"""],
["""\
`interpreted without closing backquote
""",
"""\
<document source="test data">
<paragraph>
<problematic ids="id2" refid="id1">
`
interpreted without closing backquote
<system_message backrefs="id2" ids="id1" level="2" line="1" source="test data" type="WARNING">
<paragraph>
Inline interpreted text or phrase reference start-string without end-string.
"""],
["""\
`interpreted`:not a role if it contains whitespace:
""",
"""\
<document source="test data">
<paragraph>
<title_reference>
interpreted
:not a role if it contains whitespace:
"""],
["""\
:title:`` (empty interpteted text not recognized)
""",
"""\
<document source="test data">
<paragraph>
:title:`` (empty interpteted text not recognized)
"""],
["""\
:title:`\ ` (interpteted text containing empty string)
""",
"""\
<document source="test data">
<paragraph>
<title_reference>
(interpteted text containing empty string)
"""],
["""\
`\ `:title: (interpteted text containing empty string (postfix))
""",
"""\
<document source="test data">
<paragraph>
<title_reference>
(interpteted text containing empty string (postfix))
"""],
["""\
:title:`\ non-empty`
""",
"""\
<document source="test data">
<paragraph>
<title_reference>
non-empty
"""],
["""\
:title:`\ ` (trailing unquoted space)
""",
"""\
<document source="test data">
<paragraph>
:title:
<problematic ids="id2" refid="id1">
`
` (trailing unquoted space)
<system_message backrefs="id2" ids="id1" level="2" line="1" source="test data" type="WARNING">
<paragraph>
Inline interpreted text or phrase reference start-string without end-string.
"""],
["""\
Explicit roles for standard inline markup:
:emphasis:`emphasis`,
:strong:`strong`,
:literal:`inline literal text`.
""",
"""\
<document source="test data">
<paragraph>
Explicit roles for standard inline markup:
<emphasis>
emphasis
,
<strong>
strong
,
<literal>
inline literal text
.
"""],
["""\
Simple explicit roles:
:ab:`abbreviation`,
:ac:`acronym`,
:sup:`superscript`,
:sub:`subscript`,
:title:`title reference`.
""",
"""\
<document source="test data">
<paragraph>
Simple explicit roles:
<abbreviation>
abbreviation
,
<acronym>
acronym
,
<superscript>
superscript
,
<subscript>
subscript
,
<title_reference>
title reference
.
"""],
]
totest['code'] = [
["""\
Code role for inline code snippets:
:code:`$\alpha = \int_0^\infty f(x) dx$`.
""",
"""\
<document source="test data">
<paragraph>
Code role for inline code snippets:
<literal classes="code">
$\x07lpha = \\int_0^\\infty f(x) dx$
.
"""],
]
totest['code-parsing'] = [
["""\
.. role:: tex(code)
:language: latex
Custom role based on code role:
:tex:`$\alpha = f(x)$`.
""",
"""\
<document source="test data">
<paragraph>
Custom role based on code role:
<literal classes="code tex latex">
<inline classes="literal string">
$
<inline classes="name builtin">
\x07lpha \n\
<inline classes="operator">
=
<inline classes="name builtin">
f
<inline classes="operator">
(
<inline classes="name builtin">
x
<inline classes="operator">
)
<inline classes="literal string">
$
.
"""],
["""\
Custom role based on code role:
.. role:: python(code)
:language: python
:class: testclass
Python code :python:`print("The end")`.
""",
"""\
<document source="test data">
<paragraph>
Custom role based on code role:
<paragraph>
Python code \n\
<literal classes="code testclass python">
<inline classes="keyword">
print
<inline classes="punctuation">
(
<inline classes="literal string">
"The end"
<inline classes="punctuation">
)
.
"""],
]
totest['references'] = [
["""\
:PEP:`0`
""",
"""\
<document source="test data">
<paragraph>
<reference refuri="http://www.python.org/dev/peps/pep-0000">
PEP 0
"""],
["""\
:PEP:`-1`
""",
"""\
<document source="test data">
<paragraph>
<problematic ids="id2" refid="id1">
:PEP:`-1`
<system_message backrefs="id2" ids="id1" level="3" line="1" source="test data" type="ERROR">
<paragraph>
PEP number must be a number from 0 to 9999; "-1" is invalid.
"""],
["""\
:RFC:`2822`
""",
"""\
<document source="test data">
<paragraph>
<reference refuri="http://www.faqs.org/rfcs/rfc2822.html">
RFC 2822
"""],
["""\
:RFC:`0`
""",
"""\
<document source="test data">
<paragraph>
<problematic ids="id2" refid="id1">
:RFC:`0`
<system_message backrefs="id2" ids="id1" level="3" line="1" source="test data" type="ERROR">
<paragraph>
RFC number must be a number greater than or equal to 1; "0" is invalid.
"""],
]
totest['unknown_roles'] = [
["""\
:role:`interpreted`
""",
"""\
<document source="test data">
<paragraph>
<problematic ids="id2" refid="id1">
:role:`interpreted`
<system_message level="1" line="1" source="test data" type="INFO">
<paragraph>
No role entry for "role" in module "docutils.parsers.rst.languages.en".
Trying "role" as canonical role name.
<system_message backrefs="id2" ids="id1" level="3" line="1" source="test data" type="ERROR">
<paragraph>
Unknown interpreted text role "role".
"""],
["""\
`interpreted`:role:
""",
"""\
<document source="test data">
<paragraph>
<problematic ids="id2" refid="id1">
`interpreted`:role:
<system_message level="1" line="1" source="test data" type="INFO">
<paragraph>
No role entry for "role" in module "docutils.parsers.rst.languages.en".
Trying "role" as canonical role name.
<system_message backrefs="id2" ids="id1" level="3" line="1" source="test data" type="ERROR">
<paragraph>
Unknown interpreted text role "role".
"""],
["""\
:role:`interpreted`:role:
""",
"""\
<document source="test data">
<paragraph>
<problematic ids="id2" refid="id1">
:role:`interpreted`:role:
<system_message backrefs="id2" ids="id1" level="2" line="1" source="test data" type="WARNING">
<paragraph>
Multiple roles in interpreted text (both prefix and suffix present; only one allowed).
"""],
["""\
:very.long-role_name:`interpreted`
""",
"""\
<document source="test data">
<paragraph>
<problematic ids="id2" refid="id1">
:very.long-role_name:`interpreted`
<system_message level="1" line="1" source="test data" type="INFO">
<paragraph>
No role entry for "very.long-role_name" in module "docutils.parsers.rst.languages.en".
Trying "very.long-role_name" as canonical role name.
<system_message backrefs="id2" ids="id1" level="3" line="1" source="test data" type="ERROR">
<paragraph>
Unknown interpreted text role "very.long-role_name".
"""],
["""\
:restructuredtext-unimplemented-role:`interpreted`
""",
"""\
<document source="test data">
<paragraph>
<problematic ids="id2" refid="id1">
:restructuredtext-unimplemented-role:`interpreted`
<system_message level="1" line="1" source="test data" type="INFO">
<paragraph>
No role entry for "restructuredtext-unimplemented-role" in module "docutils.parsers.rst.languages.en".
Trying "restructuredtext-unimplemented-role" as canonical role name.
<system_message backrefs="id2" ids="id1" level="3" line="1" source="test data" type="ERROR">
<paragraph>
Interpreted text role "restructuredtext-unimplemented-role" not implemented.
"""],
]
if __name__ == '__main__':
import unittest
unittest.main(defaultTest='suite')
| agpl-3.0 |
AndBicScadMedia/YouCompleteMe | python/ycm/client/omni_completion_request.py | 48 | 1204 | #!/usr/bin/env python
#
# Copyright (C) 2013 Google Inc.
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
from ycm.client.completion_request import CompletionRequest
class OmniCompletionRequest( CompletionRequest ):
def __init__( self, omni_completer, request_data ):
super( OmniCompletionRequest, self ).__init__( request_data )
self._omni_completer = omni_completer
def Start( self ):
self._results = self._omni_completer.ComputeCandidates( self.request_data )
def Done( self ):
return True
def Response( self ):
return self._results
| gpl-3.0 |
Rob-Johnson/marathon-python | marathon/models/endpoint.py | 8 | 1689 | try:
import json
except ImportError:
import simplejson as json
from .base import MarathonObject
class MarathonEndpoint(MarathonObject):
"""Marathon Endpoint helper object for service discovery. It describes a single port mapping for a running task.
:param str app_id: application id
:param str host: mesos slave running the task
:param str task_id: task id
:param int service_port: application service port
:param int task_port: port allocated on the slave
"""
def __repr__(self):
return "{clazz}::{app_id}::{service_port}::{task_id}::{task_port}".format(
clazz=self.__class__.__name__,
app_id=self.app_id,
service_port=self.service_port,
task_id=self.task_id,
task_port=self.task_port
)
def __init__(self, app_id=None, service_port=None, host=None, task_id=None, task_port=None):
self.app_id = app_id
self.service_port = service_port
self.host = host
self.task_id = task_id
self.task_port = task_port
@classmethod
def from_tasks(cls, tasks):
"""Construct a list of MarathonEndpoints from a list of tasks.
:param list[:class:`marathon.models.MarathonTask`] tasks: list of tasks to parse
:rtype: list[:class:`MarathonEndpoint`]
"""
endpoints = [
[
MarathonEndpoint(task.app_id, task.service_ports[port_index], task.host, task.id, port)
for port_index, port in enumerate(task.ports)
]
for task in tasks
]
# Flatten result
return [item for sublist in endpoints for item in sublist]
| mit |
2ndy/RaspIM | usr/lib/python2.6/fileinput.py | 224 | 14143 | """Helper class to quickly write a loop over all standard input files.
Typical use is:
import fileinput
for line in fileinput.input():
process(line)
This iterates over the lines of all files listed in sys.argv[1:],
defaulting to sys.stdin if the list is empty. If a filename is '-' it
is also replaced by sys.stdin. To specify an alternative list of
filenames, pass it as the argument to input(). A single file name is
also allowed.
Functions filename(), lineno() return the filename and cumulative line
number of the line that has just been read; filelineno() returns its
line number in the current file; isfirstline() returns true iff the
line just read is the first line of its file; isstdin() returns true
iff the line was read from sys.stdin. Function nextfile() closes the
current file so that the next iteration will read the first line from
the next file (if any); lines not read from the file will not count
towards the cumulative line count; the filename is not changed until
after the first line of the next file has been read. Function close()
closes the sequence.
Before any lines have been read, filename() returns None and both line
numbers are zero; nextfile() has no effect. After all lines have been
read, filename() and the line number functions return the values
pertaining to the last line read; nextfile() has no effect.
All files are opened in text mode by default, you can override this by
setting the mode parameter to input() or FileInput.__init__().
If an I/O error occurs during opening or reading a file, the IOError
exception is raised.
If sys.stdin is used more than once, the second and further use will
return no lines, except perhaps for interactive use, or if it has been
explicitly reset (e.g. using sys.stdin.seek(0)).
Empty files are opened and immediately closed; the only time their
presence in the list of filenames is noticeable at all is when the
last file opened is empty.
It is possible that the last line of a file doesn't end in a newline
character; otherwise lines are returned including the trailing
newline.
Class FileInput is the implementation; its methods filename(),
lineno(), fileline(), isfirstline(), isstdin(), nextfile() and close()
correspond to the functions in the module. In addition it has a
readline() method which returns the next input line, and a
__getitem__() method which implements the sequence behavior. The
sequence must be accessed in strictly sequential order; sequence
access and readline() cannot be mixed.
Optional in-place filtering: if the keyword argument inplace=1 is
passed to input() or to the FileInput constructor, the file is moved
to a backup file and standard output is directed to the input file.
This makes it possible to write a filter that rewrites its input file
in place. If the keyword argument backup=".<some extension>" is also
given, it specifies the extension for the backup file, and the backup
file remains around; by default, the extension is ".bak" and it is
deleted when the output file is closed. In-place filtering is
disabled when standard input is read. XXX The current implementation
does not work for MS-DOS 8+3 filesystems.
Performance: this module is unfortunately one of the slower ways of
processing large numbers of input lines. Nevertheless, a significant
speed-up has been obtained by using readlines(bufsize) instead of
readline(). A new keyword argument, bufsize=N, is present on the
input() function and the FileInput() class to override the default
buffer size.
XXX Possible additions:
- optional getopt argument processing
- isatty()
- read(), read(size), even readlines()
"""
import sys, os
__all__ = ["input","close","nextfile","filename","lineno","filelineno",
"isfirstline","isstdin","FileInput"]
_state = None
DEFAULT_BUFSIZE = 8*1024
def input(files=None, inplace=0, backup="", bufsize=0,
mode="r", openhook=None):
"""input([files[, inplace[, backup[, mode[, openhook]]]]])
Create an instance of the FileInput class. The instance will be used
as global state for the functions of this module, and is also returned
to use during iteration. The parameters to this function will be passed
along to the constructor of the FileInput class.
"""
global _state
if _state and _state._file:
raise RuntimeError, "input() already active"
_state = FileInput(files, inplace, backup, bufsize, mode, openhook)
return _state
def close():
"""Close the sequence."""
global _state
state = _state
_state = None
if state:
state.close()
def nextfile():
"""
Close the current file so that the next iteration will read the first
line from the next file (if any); lines not read from the file will
not count towards the cumulative line count. The filename is not
changed until after the first line of the next file has been read.
Before the first line has been read, this function has no effect;
it cannot be used to skip the first file. After the last line of the
last file has been read, this function has no effect.
"""
if not _state:
raise RuntimeError, "no active input()"
return _state.nextfile()
def filename():
"""
Return the name of the file currently being read.
Before the first line has been read, returns None.
"""
if not _state:
raise RuntimeError, "no active input()"
return _state.filename()
def lineno():
"""
Return the cumulative line number of the line that has just been read.
Before the first line has been read, returns 0. After the last line
of the last file has been read, returns the line number of that line.
"""
if not _state:
raise RuntimeError, "no active input()"
return _state.lineno()
def filelineno():
"""
Return the line number in the current file. Before the first line
has been read, returns 0. After the last line of the last file has
been read, returns the line number of that line within the file.
"""
if not _state:
raise RuntimeError, "no active input()"
return _state.filelineno()
def fileno():
"""
Return the file number of the current file. When no file is currently
opened, returns -1.
"""
if not _state:
raise RuntimeError, "no active input()"
return _state.fileno()
def isfirstline():
"""
Returns true the line just read is the first line of its file,
otherwise returns false.
"""
if not _state:
raise RuntimeError, "no active input()"
return _state.isfirstline()
def isstdin():
"""
Returns true if the last line was read from sys.stdin,
otherwise returns false.
"""
if not _state:
raise RuntimeError, "no active input()"
return _state.isstdin()
class FileInput:
"""class FileInput([files[, inplace[, backup[, mode[, openhook]]]]])
Class FileInput is the implementation of the module; its methods
filename(), lineno(), fileline(), isfirstline(), isstdin(), fileno(),
nextfile() and close() correspond to the functions of the same name
in the module.
In addition it has a readline() method which returns the next
input line, and a __getitem__() method which implements the
sequence behavior. The sequence must be accessed in strictly
sequential order; random access and readline() cannot be mixed.
"""
def __init__(self, files=None, inplace=0, backup="", bufsize=0,
mode="r", openhook=None):
if isinstance(files, basestring):
files = (files,)
else:
if files is None:
files = sys.argv[1:]
if not files:
files = ('-',)
else:
files = tuple(files)
self._files = files
self._inplace = inplace
self._backup = backup
self._bufsize = bufsize or DEFAULT_BUFSIZE
self._savestdout = None
self._output = None
self._filename = None
self._lineno = 0
self._filelineno = 0
self._file = None
self._isstdin = False
self._backupfilename = None
self._buffer = []
self._bufindex = 0
# restrict mode argument to reading modes
if mode not in ('r', 'rU', 'U', 'rb'):
raise ValueError("FileInput opening mode must be one of "
"'r', 'rU', 'U' and 'rb'")
self._mode = mode
if inplace and openhook:
raise ValueError("FileInput cannot use an opening hook in inplace mode")
elif openhook and not hasattr(openhook, '__call__'):
raise ValueError("FileInput openhook must be callable")
self._openhook = openhook
def __del__(self):
self.close()
def close(self):
self.nextfile()
self._files = ()
def __iter__(self):
return self
def next(self):
try:
line = self._buffer[self._bufindex]
except IndexError:
pass
else:
self._bufindex += 1
self._lineno += 1
self._filelineno += 1
return line
line = self.readline()
if not line:
raise StopIteration
return line
def __getitem__(self, i):
if i != self._lineno:
raise RuntimeError, "accessing lines out of order"
try:
return self.next()
except StopIteration:
raise IndexError, "end of input reached"
def nextfile(self):
savestdout = self._savestdout
self._savestdout = 0
if savestdout:
sys.stdout = savestdout
output = self._output
self._output = 0
if output:
output.close()
file = self._file
self._file = 0
if file and not self._isstdin:
file.close()
backupfilename = self._backupfilename
self._backupfilename = 0
if backupfilename and not self._backup:
try: os.unlink(backupfilename)
except OSError: pass
self._isstdin = False
self._buffer = []
self._bufindex = 0
def readline(self):
try:
line = self._buffer[self._bufindex]
except IndexError:
pass
else:
self._bufindex += 1
self._lineno += 1
self._filelineno += 1
return line
if not self._file:
if not self._files:
return ""
self._filename = self._files[0]
self._files = self._files[1:]
self._filelineno = 0
self._file = None
self._isstdin = False
self._backupfilename = 0
if self._filename == '-':
self._filename = '<stdin>'
self._file = sys.stdin
self._isstdin = True
else:
if self._inplace:
self._backupfilename = (
self._filename + (self._backup or os.extsep+"bak"))
try: os.unlink(self._backupfilename)
except os.error: pass
# The next few lines may raise IOError
os.rename(self._filename, self._backupfilename)
self._file = open(self._backupfilename, self._mode)
try:
perm = os.fstat(self._file.fileno()).st_mode
except OSError:
self._output = open(self._filename, "w")
else:
fd = os.open(self._filename,
os.O_CREAT | os.O_WRONLY | os.O_TRUNC,
perm)
self._output = os.fdopen(fd, "w")
try:
if hasattr(os, 'chmod'):
os.chmod(self._filename, perm)
except OSError:
pass
self._savestdout = sys.stdout
sys.stdout = self._output
else:
# This may raise IOError
if self._openhook:
self._file = self._openhook(self._filename, self._mode)
else:
self._file = open(self._filename, self._mode)
self._buffer = self._file.readlines(self._bufsize)
self._bufindex = 0
if not self._buffer:
self.nextfile()
# Recursive call
return self.readline()
def filename(self):
return self._filename
def lineno(self):
return self._lineno
def filelineno(self):
return self._filelineno
def fileno(self):
if self._file:
try:
return self._file.fileno()
except ValueError:
return -1
else:
return -1
def isfirstline(self):
return self._filelineno == 1
def isstdin(self):
return self._isstdin
def hook_compressed(filename, mode):
ext = os.path.splitext(filename)[1]
if ext == '.gz':
import gzip
return gzip.open(filename, mode)
elif ext == '.bz2':
import bz2
return bz2.BZ2File(filename, mode)
else:
return open(filename, mode)
def hook_encoded(encoding):
import codecs
def openhook(filename, mode):
return codecs.open(filename, mode, encoding)
return openhook
def _test():
import getopt
inplace = 0
backup = 0
opts, args = getopt.getopt(sys.argv[1:], "ib:")
for o, a in opts:
if o == '-i': inplace = 1
if o == '-b': backup = a
for line in input(args, inplace=inplace, backup=backup):
if line[-1:] == '\n': line = line[:-1]
if line[-1:] == '\r': line = line[:-1]
print "%d: %s[%d]%s %s" % (lineno(), filename(), filelineno(),
isfirstline() and "*" or "", line)
print "%d: %s[%d]" % (lineno(), filename(), filelineno())
if __name__ == '__main__':
_test()
| gpl-2.0 |
rwaldron/jenkins | ips/proto.py | 8 | 2169 | # The MIT License
#
# Copyright (c) 2004-2009, Sun Microsystems, Inc., Kohsuke Kawaguchi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# definition of the IPS package.
# see https://updatecenter2.dev.java.net/maven-makepkgs-plugin/ for more about this
import builder;
# IPS can't do SNAPSHOT
version = builder.props['version']
if version.endswith("-SNAPSHOT"):
version = version[:-9];
pkg = builder.build_pkg(name="jenkins", version=version+",0-0")
pkg.update({
"attributes" : {
"pkg.summary" : "Jenkins",
"pkg.description" : "Extensible continuous integration system",
}
})
# restart_fmri instructs IPS to reload the manifest
pkg.addfile("/usr/local/bin/jenkins.war",{"file":"./target/jenkins.war"})
pkg.addfile("/var/svc/manifest/application/jenkins.xml",{"file":"../ips/jenkins.xml","restart_fmri":"svc:/system/manifest-import:default"})
# this is the Hudson home directory
pkg.mkdirs("/var/lib/jenkins")
# TODO: register SMF when the feature is available?
# see http://www.pauloswald.com/article/29/hudson-solaris-smf-manifest
# see http://blogs.sun.com/wittyman/entry/postgresql_packages_from_ips_repository
| mit |
limao4223127/NCR_Pay | Agent Lead Spanish/AKMII.DMRA.Test/Python Script/check.py | 2 | 1387 | import os
def ScanFolder(path):
fileList = []
for item in os.listdir(path):
filePath = os.path.join(path, item)
if os.path.isfile(filePath):
fileList.append(filePath)
if os.path.isdir(filePath):
ScanFolder(filePath)
if (len(fileList) > 0):
potentialFiles = filter(lambda c:c.find('Potential') > -1 and c.find('.pdf') > -1, fileList)
detailFiles = filter(lambda c:c.find('DetailRpt') > -1 and c.find('.pdf') > -1, fileList)
powerFiles = filter(lambda c:c.find('PowerName') > -1 and c.find('.pdf') > -1, fileList)
mapFiles = filter(lambda c:c.find('OverviewMap') > -1 and c.find('.pdf') > -1, fileList)
inputFiles = potentialFiles + detailFiles + powerFiles + mapFiles
if (len(inputFiles) > 0):
if not potentialFiles:
print 'Miss potential at ' + path
if not detailFiles:
print 'Miss detail at ' + path
## if not powerFiles:
## print 'Miss power at ' + path
## if not mapFiles:
## print 'Miss map at ' + path
if __name__ == "__main__":
reportPath = 'D:\\Agent Lead\\Site\\download\\Report'
try:
ScanFolder(reportPath)
except Exception as e:
print e
| gpl-2.0 |
L-Insomnia-P/kernel-msm | arch/ia64/scripts/unwcheck.py | 13143 | 1714 | #!/usr/bin/python
#
# Usage: unwcheck.py FILE
#
# This script checks the unwind info of each function in file FILE
# and verifies that the sum of the region-lengths matches the total
# length of the function.
#
# Based on a shell/awk script originally written by Harish Patil,
# which was converted to Perl by Matthew Chapman, which was converted
# to Python by David Mosberger.
#
import os
import re
import sys
if len(sys.argv) != 2:
print "Usage: %s FILE" % sys.argv[0]
sys.exit(2)
readelf = os.getenv("READELF", "readelf")
start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]")
rlen_pattern = re.compile(".*rlen=([0-9]+)")
def check_func (func, slots, rlen_sum):
if slots != rlen_sum:
global num_errors
num_errors += 1
if not func: func = "[%#x-%#x]" % (start, end)
print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
return
num_funcs = 0
num_errors = 0
func = False
slots = 0
rlen_sum = 0
for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
m = start_pattern.match(line)
if m:
check_func(func, slots, rlen_sum)
func = m.group(1)
start = long(m.group(2), 16)
end = long(m.group(3), 16)
slots = 3 * (end - start) / 16
rlen_sum = 0L
num_funcs += 1
else:
m = rlen_pattern.match(line)
if m:
rlen_sum += long(m.group(1))
check_func(func, slots, rlen_sum)
if num_errors == 0:
print "No errors detected in %u functions." % num_funcs
else:
if num_errors > 1:
err="errors"
else:
err="error"
print "%u %s detected in %u functions." % (num_errors, err, num_funcs)
sys.exit(1)
| gpl-2.0 |
flit/pyOCD | pyocd/utility/autoflush.py | 3 | 1642 | # pyOCD debugger
# Copyright (c) 2020 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..core import exceptions
class Autoflush(object):
"""! @brief Context manager for performing flushes.
Pass a Target instance to the constructor, and when the context exits, the target will be
automatically flushed. If a TransferError or subclass, such as TransferFaultError, is raised
within the context, then the flush will be skipped.
The parameter passed to the constructor can actually be any object with a `flush()` method,
due to Python's dynamic dispatch.
"""
def __init__(self, target):
"""! @brief Constructor.
@param self The object.
@param target Object on which the flush will be performed. Normally this is a Target
instance.
"""
self._target = target
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if type is None or not issubclass(type, exceptions.TransferError):
self._target.flush()
return False
| apache-2.0 |
sabi0/intellij-community | python/lib/Lib/site-packages/django/conf/locale/sv/formats.py | 232 | 1365 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'Y-m-d'
SHORT_DATETIME_FORMAT = 'Y-m-d H:i'
FIRST_DAY_OF_WEEK = 1
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y', # '10/25/06'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = ' '
NUMBER_GROUPING = 3
| apache-2.0 |
louietsai/python-for-android | python3-alpha/extra_modules/gdata/apps/migration/__init__.py | 119 | 8582 | #!/usr/bin/python2.4
#
# Copyright 2008 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains objects used with Google Apps."""
__author__ = 'google-apps-apis@googlegroups.com'
import atom
import gdata
# XML namespaces which are often used in Google Apps entity.
APPS_NAMESPACE = 'http://schemas.google.com/apps/2006'
APPS_TEMPLATE = '{http://schemas.google.com/apps/2006}%s'
class Rfc822Msg(atom.AtomBase):
"""The Migration rfc822Msg element."""
_tag = 'rfc822Msg'
_namespace = APPS_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['encoding'] = 'encoding'
def __init__(self, extension_elements=None,
extension_attributes=None, text=None):
self.text = text
self.encoding = 'base64'
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def Rfc822MsgFromString(xml_string):
"""Parse in the Rrc822 message from the XML definition."""
return atom.CreateClassFromXMLString(Rfc822Msg, xml_string)
class MailItemProperty(atom.AtomBase):
"""The Migration mailItemProperty element."""
_tag = 'mailItemProperty'
_namespace = APPS_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['value'] = 'value'
def __init__(self, value=None, extension_elements=None,
extension_attributes=None, text=None):
self.value = value
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def MailItemPropertyFromString(xml_string):
"""Parse in the MailItemProperiy from the XML definition."""
return atom.CreateClassFromXMLString(MailItemProperty, xml_string)
class Label(atom.AtomBase):
"""The Migration label element."""
_tag = 'label'
_namespace = APPS_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['labelName'] = 'label_name'
def __init__(self, label_name=None,
extension_elements=None, extension_attributes=None,
text=None):
self.label_name = label_name
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def LabelFromString(xml_string):
"""Parse in the mailItemProperty from the XML definition."""
return atom.CreateClassFromXMLString(Label, xml_string)
class MailEntry(gdata.GDataEntry):
"""A Google Migration flavor of an Atom Entry."""
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}rfc822Msg' % APPS_NAMESPACE] = ('rfc822_msg', Rfc822Msg)
_children['{%s}mailItemProperty' % APPS_NAMESPACE] = ('mail_item_property',
[MailItemProperty])
_children['{%s}label' % APPS_NAMESPACE] = ('label', [Label])
def __init__(self, author=None, category=None, content=None,
atom_id=None, link=None, published=None,
title=None, updated=None,
rfc822_msg=None, mail_item_property=None, label=None,
extended_property=None,
extension_elements=None, extension_attributes=None, text=None):
gdata.GDataEntry.__init__(self, author=author, category=category,
content=content,
atom_id=atom_id, link=link, published=published,
title=title, updated=updated)
self.rfc822_msg = rfc822_msg
self.mail_item_property = mail_item_property
self.label = label
self.extended_property = extended_property or []
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def MailEntryFromString(xml_string):
"""Parse in the MailEntry from the XML definition."""
return atom.CreateClassFromXMLString(MailEntry, xml_string)
class BatchMailEntry(gdata.BatchEntry):
"""A Google Migration flavor of an Atom Entry."""
_tag = gdata.BatchEntry._tag
_namespace = gdata.BatchEntry._namespace
_children = gdata.BatchEntry._children.copy()
_attributes = gdata.BatchEntry._attributes.copy()
_children['{%s}rfc822Msg' % APPS_NAMESPACE] = ('rfc822_msg', Rfc822Msg)
_children['{%s}mailItemProperty' % APPS_NAMESPACE] = ('mail_item_property',
[MailItemProperty])
_children['{%s}label' % APPS_NAMESPACE] = ('label', [Label])
def __init__(self, author=None, category=None, content=None,
atom_id=None, link=None, published=None,
title=None, updated=None,
rfc822_msg=None, mail_item_property=None, label=None,
batch_operation=None, batch_id=None, batch_status=None,
extended_property=None,
extension_elements=None, extension_attributes=None, text=None):
gdata.BatchEntry.__init__(self, author=author, category=category,
content=content,
atom_id=atom_id, link=link, published=published,
batch_operation=batch_operation,
batch_id=batch_id, batch_status=batch_status,
title=title, updated=updated)
self.rfc822_msg = rfc822_msg or None
self.mail_item_property = mail_item_property or []
self.label = label or []
self.extended_property = extended_property or []
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def BatchMailEntryFromString(xml_string):
"""Parse in the BatchMailEntry from the XML definition."""
return atom.CreateClassFromXMLString(BatchMailEntry, xml_string)
class BatchMailEventFeed(gdata.BatchFeed):
"""A Migration event feed flavor of an Atom Feed."""
_tag = gdata.BatchFeed._tag
_namespace = gdata.BatchFeed._namespace
_children = gdata.BatchFeed._children.copy()
_attributes = gdata.BatchFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [BatchMailEntry])
def __init__(self, author=None, category=None, contributor=None,
generator=None, icon=None, atom_id=None, link=None, logo=None,
rights=None, subtitle=None, title=None, updated=None,
entry=None, total_results=None, start_index=None,
items_per_page=None, interrupted=None, extension_elements=None,
extension_attributes=None, text=None):
gdata.BatchFeed.__init__(self, author=author, category=category,
contributor=contributor, generator=generator,
icon=icon, atom_id=atom_id, link=link,
logo=logo, rights=rights, subtitle=subtitle,
title=title, updated=updated, entry=entry,
total_results=total_results,
start_index=start_index,
items_per_page=items_per_page,
interrupted=interrupted,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
class MailEntryProperties(object):
"""Represents a mail message and its attributes."""
def __init__(self, mail_message=None, mail_item_properties=None,
mail_labels=None, identifier=None):
self.mail_message = mail_message
self.mail_item_properties = mail_item_properties or []
self.mail_labels = mail_labels or []
self.identifier = identifier
def BatchMailEventFeedFromString(xml_string):
"""Parse in the BatchMailEventFeed from the XML definition."""
return atom.CreateClassFromXMLString(BatchMailEventFeed, xml_string)
| apache-2.0 |
BrainTech/pre-pisak | modules/spellers/speller1c.py | 1 | 22723 | #!/bin/env python2.7
# -*- coding: utf-8 -*-
# This file is part of AT-Platform.
#
# AT-Platform is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# AT-Platform is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with AT-Platform. If not, see <http://www.gnu.org/licenses/>.
import wxversion
wxversion.select( '2.8' )
import glob, os, time
import wx, alsaaudio
import wx.lib.buttons as bt
from pymouse import PyMouse
from string import maketrans
from pygame import mixer
#=============================================================================
class speller( wx.Frame ):
def __init__(self, parent, id):
self.winWidth, self.winHeight = wx.DisplaySize( )
wx.Frame.__init__( self , parent , id , 'ATPlatform Speller' )
style = self.GetWindowStyle( )
self.SetWindowStyle( style | wx.STAY_ON_TOP )
self.parent = parent
self.Maximize( True )
self.Centre( True )
self.MakeModal( True )
self.initializeParameters( )
self.initializeBitmaps( )
self.createGui( )
self.initializeTimer( )
self.createBindings( )
#-------------------------------------------------------------------------
def initializeParameters(self):
with open( './.pathToATPlatform' ,'r' ) as textFile:
self.pathToATPlatform = textFile.readline( )
with open( self.pathToATPlatform + 'parameters', 'r' ) as parametersFile:
for line in parametersFile:
if line[ :line.find('=')-1 ] == 'timeGap':
self.timeGap = int( line[ line.rfind('=')+2:-1 ] )
elif line[ :line.find('=')-1 ] == 'backgroundColour':
self.backgroundColour = line[ line.rfind('=')+2:-1 ]
elif line[ :line.find('=')-1 ] == 'textColour':
self.textColour = line[ line.rfind('=')+2:-1 ]
elif line[ :line.find('=')-1 ] == 'scanningColour':
self.scanningColour = line[ line.rfind('=')+2:-1 ]
elif line[ :line.find('=')-1 ] == 'selectionColour':
self.selectionColour = line[ line.rfind('=')+2:-1 ]
elif line[ :line.find('=')-1 ] == 'filmVolume':
self.filmVolumeLevel = int( line[ line.rfind('=')+2:-1 ] )
elif line[ :line.find('=')-1 ] == 'musicVolume':
self.musicVolumeLevel = int( line[ line.rfind('=')+2:-1 ] )
elif not line.isspace( ):
print 'Niewłaściwie opisane parametry'
print 'Błąd w pliku parameters w linii', line
self.timeGap = 1500
self.backgroundColour = 'white'
self.textColour = 'black'
self.scanningColour = '#E7FAFD'
self.selectionColour = '#9EE4EF'
self.filmVolumeLevel = 100
self.musicVolumeLevel = 40
with open( self.pathToATPlatform + 'spellerParameters', 'r' ) as parametersFile:
for line in parametersFile:
if line[ :line.find('=')-1 ] == 'voice':
self.voice = line[ line.rfind('=')+2:-1 ]
elif line[ :line.find('=')-1 ] == 'vowelColour':
self.vowelColour = line[ line.rfind('=')+2:-1 ]
elif line[ :line.find('=')-1 ] == 'polishLettersColour':
self.polishLettersColour = line[ line.rfind('=')+2:-1 ]
elif not line.isspace( ):
print 'Niewłaściwie opisane parametry'
print 'Błąd w pliku spellerParameters w linii', line
self.voice = 'False'
self.vowelColour = 'False'
self.polishLettersColour = 'False'
self.labels = [ 'A E B C D F G H I O J K L M N P U Y R S T W Z SPECIAL_CHARACTERS UNDO SPEAK SAVE SPACJA OPEN EXIT'.split( ), '1 2 3 4 5 6 7 8 9 0 + - * / = % $ & . , ; : " ? ! @ # ( ) [ ] { } < > ~ UNDO SPEAK SAVE SPACJA OPEN EXIT'.split( ) ]
self.colouredLabels = [ 'A', 'E', 'I', 'O', 'U', 'Y' ]
self.numberOfRows = [ 4, 5 ]
self.numberOfColumns = [ 8, 9 ]
self.flag = 'row'
self.rowIteration = 0
self.columnIteration = 0
self.countRows = 0
self.countColumns = 0
self.maxNumberOfRows = 2
self.maxNumberOfColumns = 2
self.numberOfPresses = 1
self.subSizerNumber = 0
self.mouseCursor = PyMouse( )
self.mousePosition = self.winWidth - 8, self.winHeight - 8
self.mouseCursor.move( *self.mousePosition )
mixer.init( )
self.typewriterKeySound = mixer.Sound( self.pathToATPlatform + 'sounds/typewriter_key.wav' )
self.typewriterForwardSound = mixer.Sound( self.pathToATPlatform + 'sounds/typewriter_forward.wav' )
self.typewriterSpaceSound = mixer.Sound( self.pathToATPlatform + 'sounds/typewriter_space.wav' )
if self.voice == 'True':
self.phones = glob.glob( self.pathToATPlatform + 'sounds/phone/*' )
self.phoneLabels = [ item[ item.rfind( '/' )+1 : item.rfind( '.' ) ] for item in self.phones ]
self.sounds = [ mixer.Sound( self.sound ) for self.sound in self.phones ]
self.SetBackgroundColour( 'black' )
#-------------------------------------------------------------------------
def initializeBitmaps(self):
labelFiles = [ self.pathToATPlatform + file for file in [ 'icons/speller/special_characters.png', 'icons/speller/undo.png', 'icons/speller/speak.png', 'icons/speller/save.png', 'icons/speller/open.png', 'icons/speller/exit.png', ] ]
self.labelBitmaps = { }
labelBitmapIndex = [ self.labels[ 0 ].index( self.labels[ 0 ][ -7 ] ), self.labels[ 0 ].index( self.labels[ 0 ][ -6 ] ), self.labels[ 0 ].index( self.labels[ 0 ][ -5 ] ), self.labels[ 0 ].index( self.labels[ 0 ][ -4 ] ), self.labels[ 0 ].index( self.labels[ 0 ][ -2 ] ), self.labels[ 0 ].index( self.labels[ 0 ][ -1 ] ) ]
for labelFilesIndex, labelIndex in enumerate( labelBitmapIndex ):
self.labelBitmaps[ self.labels[ 0 ][ labelIndex ] ] = wx.BitmapFromImage( wx.ImageFromStream( open( labelFiles[ labelFilesIndex ], 'rb' )) )
self.labelBitmaps2 = { }
labelBitmapIndex2 = [ self.labels[ 1 ].index( self.labels[ 1 ][ -6 ] ), self.labels[ 1 ].index( self.labels[ 1 ][ -5 ] ), self.labels[ 1 ].index( self.labels[ 1 ][ -4 ] ), self.labels[ 1 ].index( self.labels[ 1 ][ -2 ] ), self.labels[ 1 ].index( self.labels[ 1 ][ -1 ] ) ]
for labelFilesIndex2, labelIndex2 in enumerate( labelBitmapIndex2 ):
self.labelBitmaps2[ self.labels[ 1 ][ labelIndex2 ] ] = wx.BitmapFromImage( wx.ImageFromStream( open( labelFiles[ -5: ][ labelFilesIndex2 ], 'rb' )) )
#-------------------------------------------------------------------------
def createGui(self):
self.mainSizer = wx.BoxSizer( wx.VERTICAL )
self.textField = wx.TextCtrl( self, style = wx.TE_LEFT, size = ( self.winWidth, 0.2 * self.winHeight ) )
self.textField.SetFont( wx.Font( 60, wx.SWISS, wx.NORMAL, wx.NORMAL ) )
self.mainSizer.Add( self.textField, flag = wx.EXPAND | wx.TOP | wx.BOTTOM, border = 3 )
self.subSizers = [ ]
subSizer = wx.GridBagSizer( 3, 3 )
for index_1, item in enumerate( self.labels[ 0 ][ :-7 ] ):
b = bt.GenButton( self, -1, item, name = item, size = ( 0.985*self.winWidth / self.numberOfColumns[ 0 ], 0.745 * self.winHeight / self.numberOfRows[ 0 ] ) )
b.SetFont( wx.Font( 35, wx.FONTFAMILY_ROMAN, wx.FONTWEIGHT_LIGHT, False ) )
b.SetBezelWidth( 3 )
b.SetBackgroundColour( self.backgroundColour )
if item in self.colouredLabels and self.vowelColour != 'False':
b.SetForegroundColour( self.vowelColour )
else:
b.SetForegroundColour( self.textColour )
b.Bind( wx.EVT_LEFT_DOWN, self.onPress )
subSizer.Add( b, ( index_1 / self.numberOfColumns[ 0 ], index_1 % self.numberOfColumns[ 0 ] ), wx.DefaultSpan, wx.EXPAND )
for index_2, item in enumerate( self.labels[ 0 ][ -7 : -3 ], start = 1 ):
b = bt.GenBitmapButton( self, -1, bitmap = self.labelBitmaps[ item ] )
b.SetBackgroundColour( self.backgroundColour )
b.SetBezelWidth( 3 )
b.Bind( wx.EVT_LEFT_DOWN, self.onPress )
subSizer.Add( b, ( ( index_1 + index_2 ) / self.numberOfColumns[ 0 ], ( index_1 + index_2 ) % self.numberOfColumns[ 0 ] ), wx.DefaultSpan, wx.EXPAND )
for item in ( self.labels[ 0 ][ -3 ], ):
b = bt.GenButton( self, -1, item, name = item, size = ( 3 * 0.985*self.winWidth / self.numberOfColumns[ 0 ], 0.745 * self.winHeight / self.numberOfRows[ 0 ] ) )
b.SetFont( wx.Font( 35, wx.FONTFAMILY_ROMAN, wx.FONTWEIGHT_LIGHT, False ) )
b.SetBezelWidth( 3 )
b.SetBackgroundColour( self.backgroundColour )
b.SetForegroundColour( self.textColour )
b.Bind( wx.EVT_LEFT_DOWN, self.onPress )
subSizer.Add( b, ( ( index_1 + index_2 ) / self.numberOfColumns[ 0 ], ( index_1 + index_2 + 1 ) % self.numberOfColumns[ 0 ] ), ( 1, 3 ), wx.EXPAND )
for index_3, item in enumerate( self.labels[ 0 ][ -2: ], start = 4 ):
b = bt.GenBitmapButton( self, -1, bitmap = self.labelBitmaps[ item ] )
b.SetBackgroundColour( self.backgroundColour )
b.SetBezelWidth( 3 )
b.Bind( wx.EVT_LEFT_DOWN, self.onPress )
subSizer.Add( b, ( ( index_1 + index_2 + index_3 ) / self.numberOfColumns[ 0 ], ( index_1 + index_2 + index_3 ) % self.numberOfColumns[ 0 ] ), wx.DefaultSpan, wx.EXPAND )
self.subSizers.append( subSizer )
self.mainSizer.Add( self.subSizers[ 0 ], proportion = 1, flag = wx.EXPAND )
self.SetSizer( self.mainSizer )
self.Center( )
subSizer2 = wx.GridBagSizer( 3, 3 )
for index_1, item in enumerate( self.labels[ 1 ][ :-6 ] ):
b = bt.GenButton( self, -1, item, name = item, size = ( 0.985*self.winWidth / self.numberOfColumns[ 1 ], 0.7 * self.winHeight / self.numberOfRows[ 1 ] ) )
b.SetFont( wx.Font( 35, wx.FONTFAMILY_ROMAN, wx.FONTWEIGHT_LIGHT, False ) )
b.SetBezelWidth( 3 )
b.SetBackgroundColour( self.backgroundColour )
b.SetForegroundColour( self.textColour )
b.Bind( wx.EVT_LEFT_DOWN, self.onPress )
subSizer2.Add( b, ( index_1 / self.numberOfColumns[ 1 ], index_1 % self.numberOfColumns[ 1 ] ), wx.DefaultSpan, wx.EXPAND )
for index_2, item in enumerate( self.labels[ 1 ][ -6 : -3 ], start = 1 ):
b = bt.GenBitmapButton( self, -1, bitmap = self.labelBitmaps2[ item ] )
b.SetBackgroundColour( self.backgroundColour )
b.SetBezelWidth( 3 )
b.Bind( wx.EVT_LEFT_DOWN, self.onPress )
subSizer2.Add( b, ( ( index_1 + index_2 ) / self.numberOfColumns[ 1 ], ( index_1 + index_2 ) % self.numberOfColumns[ 1 ] ), wx.DefaultSpan, wx.EXPAND )
for item in ( self.labels[ 1 ][ -3 ], ):
b = bt.GenButton( self, -1, item, name = item, size = ( 3 * 0.985*self.winWidth / self.numberOfColumns[ 1 ], 0.7 * self.winHeight / self.numberOfRows[ 1 ] ) )
b.SetFont( wx.Font( 35, wx.FONTFAMILY_ROMAN, wx.FONTWEIGHT_LIGHT, False ) )
b.SetBezelWidth( 3 )
b.SetBackgroundColour( self.backgroundColour )
b.SetForegroundColour( self.textColour )
b.Bind( wx.EVT_LEFT_DOWN, self.onPress )
subSizer2.Add( b, ( ( index_1 + index_2 ) / self.numberOfColumns[ 1 ], ( index_1 + index_2 + 1 ) % self.numberOfColumns[ 1 ] ), ( 1, 4 ), wx.EXPAND )
for index_3, item in enumerate( self.labels[ 1 ][ -2: ], start = 5 ):
b = bt.GenBitmapButton( self, -1, bitmap = self.labelBitmaps2[ item ] )
b.SetBackgroundColour( self.backgroundColour )
b.SetBezelWidth( 3 )
b.Bind( wx.EVT_LEFT_DOWN, self.onPress )
subSizer2.Add( b, ( ( index_1 + index_2 + index_3 ) / self.numberOfColumns[ 1 ], ( index_1 + index_2 + index_3 ) % self.numberOfColumns[ 1 ] ), wx.DefaultSpan, wx.EXPAND )
self.subSizers.append( subSizer2 )
self.mainSizer.Add( self.subSizers[ 1 ], proportion = 1, flag = wx.EXPAND )
self.mainSizer.Show( item = self.subSizers[ 1 ], show = False, recursive = True )
self.SetSizer( self.mainSizer )
self.Center( )
#-------------------------------------------------------------------------
def initializeTimer(self):
self.stoper = wx.Timer( self )
self.Bind( wx.EVT_TIMER, self.timerUpdate, self.stoper )
self.stoper.Start( self.timeGap )
#-------------------------------------------------------------------------
def createBindings(self):
self.Bind( wx.EVT_CLOSE, self.OnCloseWindow )
#-------------------------------------------------------------------------
def OnCloseWindow(self, event):
self.mousePosition = self.winWidth/1.85, self.winHeight/1.85
self.mouseCursor.move( *self.mousePosition )
dial = wx.MessageDialog(None, 'Czy napewno chcesz wyjść z programu?', 'Wyjście',
wx.YES_NO | wx.NO_DEFAULT | wx.ICON_QUESTION | wx.STAY_ON_TOP)
ret = dial.ShowModal()
if ret == wx.ID_YES:
if __name__ == '__main__':
self.Destroy()
else:
self.parent.Destroy( )
self.Destroy( )
else:
event.Veto()
self.mousePosition = self.winWidth - 8, self.winHeight - 8
self.mouseCursor.move( *self.mousePosition )
#-------------------------------------------------------------------------
def onExit(self):
if __name__ == '__main__':
self.stoper.Stop ( )
self.Destroy( )
else:
self.stoper.Stop( )
self.MakeModal( False )
self.parent.Show( True )
self.parent.stoper.Start( self.parent.timeGap )
self.Destroy( )
#-------------------------------------------------------------------------
def onPress(self, event):
self.numberOfPresses += 1
if self.numberOfPresses == 1:
if self.flag == 'rest':
self.flag = 'row'
self.rowIteration = 0
elif self.flag == 'row':
if self.rowIteration != self.numberOfRows[ self.subSizerNumber ]:
buttonsToHighlight = range( ( self.rowIteration - 1 ) * self.numberOfColumns[ self.subSizerNumber ], ( self.rowIteration - 1 ) * self.numberOfColumns[ self.subSizerNumber ] + self.numberOfColumns[ self.subSizerNumber ] )
else:
buttonsToHighlight = range( ( self.rowIteration - 1 ) * self.numberOfColumns[ self.subSizerNumber ], ( self.rowIteration - 1 ) * self.numberOfColumns[ self.subSizerNumber ] + 6 )
for button in buttonsToHighlight:
item = self.subSizers[ self.subSizerNumber ].GetItem( button )
b = item.GetWindow( )
b.SetBackgroundColour( self.selectionColour )
b.SetFocus( )
b.Update( )
self.flag = 'columns'
self.rowIteration -= 1
self.columnIteration = 0
elif self.flag == 'columns' and self.rowIteration != self.numberOfRows[ self.subSizerNumber ] - 1:
item = self.subSizers[ self.subSizerNumber ].GetItem( ( self.rowIteration ) * self.numberOfColumns[ self.subSizerNumber ] + self.columnIteration - 1 )
b = item.GetWindow( )
b.SetBackgroundColour( self.selectionColour )
b.SetFocus( )
b.Update( )
label = self.labels[ self.subSizerNumber ][ self.rowIteration * self.numberOfColumns[ self.subSizerNumber ] + self.columnIteration - 1 ]
if label == 'SPECIAL_CHARACTERS':
self.subSizerNumber = 1
self.mainSizer.Show( item = self.subSizers[ 1 ], show = True, recursive = True )
self.mainSizer.Show( item = self.subSizers[ 0 ], show = False, recursive = True )
self.SetSizer( self.mainSizer )
self.Layout( )
else:
self.typewriterKeySound.play( )
#self.typingSound.Play(wx.SOUND_ASYNC) doesn't work. Wonder why
self.textField.AppendText( label )
self.flag = 'row'
self.rowIteration = 0
self.columnIteration = 0
self.countColumns = 0
elif self.flag == 'columns' and self.rowIteration == self.numberOfRows[ self.subSizerNumber ] - 1:
item = self.subSizers[ self.subSizerNumber ].GetItem( ( self.rowIteration ) * self.numberOfColumns[ self.subSizerNumber ] + self.columnIteration-1 )
b = item.GetWindow( )
b.SetBackgroundColour( self.selectionColour )
b.SetFocus( )
b.Update( )
label = self.labels[ self.subSizerNumber ][ self.rowIteration * self.numberOfColumns[ self.subSizerNumber ] + self.columnIteration-1 ]
if label == 'UNDO':
self.typewriterForwardSound.play( )
self.textField.Remove( self.textField.GetLastPosition( ) - 1, self.textField.GetLastPosition( ) )
elif label == 'SPEAK':
text = str( self.textField.GetValue( ) )
if text == '' or text.isspace( ):
pass
else:
inputTable = '~!#$&( )[]{}<>;:"\|'
outputTable = ' ' * len( inputTable )
translateTable = maketrans( inputTable, outputTable )
textToSpeech = text.translate( translateTable )
replacements = { '-' : ' minus ', '+' : ' plus ', '*' : ' razy ', '/' : ' podzielić na ', '=' : ' równa się ', '%' : ' procent ' }
textToSpeech = reduce( lambda text, replacer: text.replace( *replacer ), replacements.iteritems( ), textToSpeech )
time.sleep( 1 )
os.system( 'milena_say %s' %textToSpeech )
elif label == 'SAVE':
text = str( self.textField.GetValue( ) )
if text == '':
pass
else:
f = open( 'myTextFile.txt', 'w' )
f.write( self.textField.GetValue( ) )
f.close( )
elif label == 'SPACJA':
self.typewriterSpaceSound.play( )
self.textField.AppendText( ' ' )
elif label == 'OPEN':
try:
textToLoad = open( 'myFile.txt' ).read( )
self.textField.Clear( )
self.textField.AppendText( textToLoad )
except IOError:
pass
elif label == 'EXIT':
if self.subSizerNumber == 0:
self.onExit( )
else:
self.mainSizer.Show( item = self.subSizers[ self.subSizerNumber ], show = False, recursive = True )
self.subSizerNumber = 0
self.mainSizer.Show( item = self.subSizers[ self.subSizerNumber ], show = True, recursive = True )
self.SetSizer( self.mainSizer )
self.Layout( )
self.flag = 'row'
self.rowIteration = 0
self.columnIteration = 0
self.countRows = 0
self.countColumns = 0
else:
event.Skip( ) #Event skip use in else statement here!
#-------------------------------------------------------------------------
def timerUpdate(self, event):
self.mouseCursor.move( *self.mousePosition )
self.numberOfPresses = 0
if self.flag == 'row':
if self.countRows == self.maxNumberOfRows:
self.flag = 'rest'
self.countRows = 0
items = self.subSizers[ self.subSizerNumber ].GetChildren( )
for item in items:
b = item.GetWindow( )
b.SetBackgroundColour( self.backgroundColour )
b.SetFocus( )
b.Update( )
else:
self.rowIteration = self.rowIteration % self.numberOfRows[ self.subSizerNumber ]
items = self.subSizers[ self.subSizerNumber ].GetChildren( )
for item in items:
b = item.GetWindow( )
b.SetBackgroundColour( self.backgroundColour )
b.SetFocus( )
b.Update( )
if self.rowIteration == self.numberOfRows[ self.subSizerNumber ] - 1:
self.countRows += 1
buttonsToHighlight = range( self.rowIteration * self.numberOfColumns[ self.subSizerNumber ], self.rowIteration * self.numberOfColumns[ self.subSizerNumber ] + 6 )
else:
buttonsToHighlight = range( self.rowIteration * self.numberOfColumns[ self.subSizerNumber ], self.rowIteration * self.numberOfColumns[ self.subSizerNumber ] + self.numberOfColumns[ self.subSizerNumber ] )
for button in buttonsToHighlight:
item = self.subSizers[ self.subSizerNumber ].GetItem( button )
b = item.GetWindow( )
b.SetBackgroundColour( self.scanningColour )
b.SetFocus( )
b.Update( )
self.rowIteration += 1
if self.voice == 'True':
os.system( 'milena_say %i' % ( self.rowIteration ) )
elif self.flag == 'columns':
if self.countColumns == self.maxNumberOfColumns:
self.flag = 'row'
item = self.subSizers[ self.subSizerNumber ].GetItem( self.rowIteration * self.numberOfColumns[ self.subSizerNumber ] + self.columnIteration - 1 )
b = item.GetWindow( )
b.SetBackgroundColour( self.backgroundColour )
self.rowIteration = 0
self.columnIteration = 0
self.countColumns = 0
else:
if self.columnIteration == self.numberOfColumns[ self.subSizerNumber ] - 1 or (self.subSizerNumber == 0 and self.columnIteration == self.numberOfColumns[ self.subSizerNumber ] - 3 and self.rowIteration == self.numberOfRows[ self.subSizerNumber ] - 1 ) or ( self.subSizerNumber == 1 and self.columnIteration == self.numberOfColumns[ self.subSizerNumber ] - 4 and self.rowIteration == self.numberOfRows[ self.subSizerNumber ] - 1 ):
self.countColumns += 1
if self.columnIteration == self.numberOfColumns[ self.subSizerNumber ] or ( self.subSizerNumber == 0 and self.columnIteration == self.numberOfColumns[ self.subSizerNumber ] - 2 and self.rowIteration == self.numberOfRows[ self.subSizerNumber ] - 1 ) or ( self.subSizerNumber == 1 and self.columnIteration == self.numberOfColumns[ self.subSizerNumber ] - 3 and self.rowIteration == self.numberOfRows[ self.subSizerNumber ] - 1 ):
self.columnIteration = 0
items = self.subSizers[ self.subSizerNumber ].GetChildren( )
for item in items:
b = item.GetWindow( )
b.SetBackgroundColour( self.backgroundColour )
b.SetFocus( )
b.Update( )
item = self.subSizers[ self.subSizerNumber ].GetItem( self.rowIteration * self.numberOfColumns[ self.subSizerNumber ] + self.columnIteration )
b = item.GetWindow( )
b.SetBackgroundColour( self.scanningColour )
b.SetFocus( )
b.Update( )
if self.voice == 'True':
label = self.labels[ self.subSizerNumber ][ self.rowIteration * self.numberOfColumns[ self.subSizerNumber ] + self.columnIteration ]
try:
soundIndex = self.phoneLabels.index( [ item for item in self.phoneLabels if item == label ][ 0 ] )
sound = self.sounds[ soundIndex ]
sound.play( )
except IndexError:
pass
self.columnIteration += 1
else:
pass
#=============================================================================
if __name__ == '__main__':
app = wx.PySimpleApp( )
frame = speller( parent = None, id = -1 )
frame.Show( True )
app.MainLoop( )
| gpl-3.0 |
piasek1906/Piasek.4.3.JWR | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py | 11088 | 3246 | # Core.py - Python extension for perf script, core functions
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
def taskState(state):
states = {
0 : "R",
1 : "S",
2 : "D",
64: "DEAD"
}
if state not in states:
return "Unknown"
return states[state]
class EventHeaders:
def __init__(self, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
self.cpu = common_cpu
self.secs = common_secs
self.nsecs = common_nsecs
self.pid = common_pid
self.comm = common_comm
def ts(self):
return (self.secs * (10 ** 9)) + self.nsecs
def ts_format(self):
return "%d.%d" % (self.secs, int(self.nsecs / 1000))
| gpl-2.0 |
holmrenser/holmrenser.github.io | _scripts/table.py | 1 | 1345 | #!/usr/bin/python
__author__ = 'rensholmer'
__created__ = '04/09/15'
import sys
import glob
import json
import copy
_sp = { 'al':0,'at':0,'ar':0,'br':0,
'cp':0,'fv':1,'gm':1,'gr':0,
'mt':1,'pt':1,'sl':0,'st':0,
'ta':0,'vv':0,'os':0,'lj':0,
'rc':1,'zm':0,'ph':1,'md':1,
'cr':0}
def main():
files = glob.glob('../json/*json')
print 'name,file,num_genes,num_links,num_in_cluster,num_in_nfc,num_clusters,max_cluster'
for f in files:
filename = f.split('/')[-1]
name = filename.split('.')[0]
clusters = {}
in_nfc = 0
out_nfc = 0
in_cluster = 0
with open(f,'rU') as fh:
dic = json.load(fh)
num_genes = len(dic['nodes'])
num_links = len([x for x in dic['links'] if 'block_score' in x])
for node in dic['nodes']:
cluster_id = node['clusters']['1.2']
if cluster_id > 0:
in_cluster += 1
if cluster_id in clusters:
clusters[cluster_id] += 1
else:
clusters[cluster_id] = 1
if node['anchor'] == 1:
if cluster_id > 0:
anchor_in_cluster = 1
else:
anchor_in_cluster = 0
if _sp[node['species']] == 1:
in_nfc += 1
else:
out_nfc += 1
fields = [name,filename,num_genes,num_links,in_cluster,in_nfc,len(clusters),max(clusters.values())]
print ','.join([str(x) for x in fields])
if __name__ == '__main__':
main() | mit |
lazaronixon/enigma2 | tools/create_picon_links.py | 192 | 1273 | #
# create links for picon
# usage: create_picon_links lamedb
# run in picon directory.
# It will read the servicenames from the lamedb and create symlinks
# for the servicereference names.
import os, sys
f = open(sys.argv[1]).readlines()
f = f[f.index("services\n")+1:-3]
while len(f):
ref = [int(x, 0x10) for x in f[0][:-1].split(':')]
name = f[1][:-1]
name = name.replace('\xc2\x87', '').replace('\xc2\x86', '')
# SID:NS:TSID:ONID:STYPE:UNUSED(channelnumber in enigma1)
# X X X X D D
# REFTYPE:FLAGS:STYPE:SID:TSID:ONID:NS:PARENT_SID:PARENT_TSID:UNUSED
# D D X X X X X X X X
refstr = "1:0:%X:%X:%X:%X:%X:0:0:0" % (ref[4], ref[0], ref[2], ref[3], ref[1])
refstr = refstr.replace(':', '_')
filename = name + ".png"
linkname = refstr + ".png"
filename = filename.replace('/', '_').replace('\\', '_').replace('&', '_').replace('\'', '').replace('"', '').replace('`', '')
filename = filename.replace('\n', '')
for i in range(len(filename)):
if ord(filename[i]) > 127:
filename = filename[0:i] + '_' + filename[i + 1:]
if os.access(filename, os.F_OK) and not os.access(linkname, os.F_OK):
os.symlink(filename, linkname)
else:
print "could not find %s (%s)" % (filename, name)
f =f[3:]
| gpl-2.0 |
ales-erjavec/scipy | scipy/optimize/tests/test_cobyla.py | 100 | 3562 | from __future__ import division, print_function, absolute_import
import math
import numpy as np
from numpy.testing import assert_allclose, TestCase, run_module_suite, \
assert_
from scipy.optimize import fmin_cobyla, minimize
class TestCobyla(TestCase):
def setUp(self):
self.x0 = [4.95, 0.66]
self.solution = [math.sqrt(25 - (2.0/3)**2), 2.0/3]
self.opts = {'disp': False, 'rhobeg': 1, 'tol': 1e-5,
'maxiter': 100}
def fun(self, x):
return x[0]**2 + abs(x[1])**3
def con1(self, x):
return x[0]**2 + x[1]**2 - 25
def con2(self, x):
return -self.con1(x)
def test_simple(self):
x = fmin_cobyla(self.fun, self.x0, [self.con1, self.con2], rhobeg=1,
rhoend=1e-5, iprint=0, maxfun=100)
assert_allclose(x, self.solution, atol=1e-4)
def test_minimize_simple(self):
# Minimize with method='COBYLA'
cons = ({'type': 'ineq', 'fun': self.con1},
{'type': 'ineq', 'fun': self.con2})
sol = minimize(self.fun, self.x0, method='cobyla', constraints=cons,
options=self.opts)
assert_allclose(sol.x, self.solution, atol=1e-4)
assert_(sol.success, sol.message)
assert_(sol.maxcv < 1e-5, sol)
assert_(sol.nfev < 70, sol)
assert_(sol.fun < self.fun(self.solution) + 1e-3, sol)
def test_minimize_constraint_violation(self):
np.random.seed(1234)
pb = np.random.rand(10, 10)
spread = np.random.rand(10)
def p(w):
return pb.dot(w)
def f(w):
return -(w * spread).sum()
def c1(w):
return 500 - abs(p(w)).sum()
def c2(w):
return 5 - abs(p(w).sum())
def c3(w):
return 5 - abs(p(w)).max()
cons = ({'type': 'ineq', 'fun': c1},
{'type': 'ineq', 'fun': c2},
{'type': 'ineq', 'fun': c3})
w0 = np.zeros((10, 1))
sol = minimize(f, w0, method='cobyla', constraints=cons,
options={'catol': 1e-6})
assert_(sol.maxcv > 1e-6)
assert_(not sol.success)
def test_vector_constraints():
# test that fmin_cobyla and minimize can take a combination
# of constraints, some returning a number and others an array
def fun(x):
return (x[0] - 1)**2 + (x[1] - 2.5)**2
def fmin(x):
return fun(x) - 1
def cons1(x):
a = np.array([[1, -2, 2], [-1, -2, 6], [-1, 2, 2]])
return np.array([a[i, 0] * x[0] + a[i, 1] * x[1] +
a[i, 2] for i in range(len(a))])
def cons2(x):
return x # identity, acts as bounds x > 0
x0 = np.array([2, 0])
cons_list = [fun, cons1, cons2]
xsol = [1.4, 1.7]
fsol = 0.8
# testing fmin_cobyla
sol = fmin_cobyla(fun, x0, cons_list, rhoend=1e-5, iprint=0)
assert_allclose(sol, xsol, atol=1e-4)
sol = fmin_cobyla(fun, x0, fmin, rhoend=1e-5, iprint=0)
assert_allclose(fun(sol), 1, atol=1e-4)
# testing minimize
constraints = [{'type': 'ineq', 'fun': cons} for cons in cons_list]
sol = minimize(fun, x0, constraints=constraints, tol=1e-5)
assert_allclose(sol.x, xsol, atol=1e-4)
assert_(sol.success, sol.message)
assert_allclose(sol.fun, fsol, atol=1e-4)
constraints = {'type': 'ineq', 'fun': fmin}
sol = minimize(fun, x0, constraints=constraints, tol=1e-5)
assert_allclose(sol.fun, 1, atol=1e-4)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
glyph/imaginary | imaginary/test/test_wiring.py | 1 | 3706 |
"""
Tests for L{imaginary.wiring}
These tests are not particularly good at the moment. They are, however, a
minor step up from nothing.
"""
from zope.interface.verify import verifyObject
from twisted.trial.unittest import TestCase
from twisted.python.filepath import FilePath
from axiom.store import Store
from axiom.dependency import installOn
from axiom.userbase import LoginSystem, getAccountNames
from xmantissa.ixmantissa import ITerminalServerFactory
from xmantissa.offering import installOffering
from xmantissa.terminal import _AuthenticatedShellViewer
from axiom.plugins.mantissacmd import Mantissa
from imaginary.world import ImaginaryWorld
from imaginary.wiring.textserver import ImaginaryApp
from xmantissa.plugins.imaginaryoff import imaginaryOffering
from twisted.test.proto_helpers import StringTransport
from twisted.conch.insults.insults import ServerProtocol
from characteristic import attributes
@attributes("proto world".split())
class TestWorld(object):
"""
A fixture for testing a terminal protcol.
"""
def buildWorld(testCase):
"""
Build a L{TestWorld}.
"""
# XXX This is too many stores for a unit test to need to create.
siteStore = Store(filesdir=FilePath(testCase.mktemp()))
Mantissa().installSite(siteStore, u'example.com', u'', False)
installOffering(siteStore, imaginaryOffering, {})
login = siteStore.findUnique(LoginSystem)
account = login.addAccount(u'alice', u'example.com', u'password')
userStore = account.avatars.open()
app = ImaginaryApp(store=userStore)
installOn(app, userStore)
imaginary = login.accountByAddress(u'Imaginary', None).avatars.open()
world = imaginary.findUnique(ImaginaryWorld)
# Alice connects to her own ImaginaryApp (all that is possible at the
# moment).
viewer = _AuthenticatedShellViewer(getAccountNames(userStore))
return TestWorld(proto=app.buildTerminalProtocol(viewer),
world=world)
class ImaginaryAppTests(TestCase):
"""
Tests for L{ImaginaryApp}, which provides access to Imaginary via
L{ShellServer}, the top-level Mantissa SSH handler.
"""
def test_interface(self):
"""
L{ImaginaryApp} implements L{ITerminalServerFactory}
"""
self.assertTrue(verifyObject(ITerminalServerFactory, ImaginaryApp()))
def test_powerup(self):
"""
L{installOn} powers up the target for L{ITerminalServerFactory} with
L{ImaginaryApp}.
"""
store = Store()
app = ImaginaryApp(store=store)
installOn(app, store)
self.assertIdentical(ITerminalServerFactory(store), app)
def test_buildTerminalProtocol(self):
"""
L{ImaginaryApp.buildTerminalProtocol} returns a
L{CharacterSelectionTextServer} instance with a role representing the
store it is in, a reference to the L{ImaginaryWorld} installed on the
Imaginary application store, and a list of L{Thing} items shared to the
role.
"""
testWorld = buildWorld(self)
self.assertIdentical(testWorld.proto.world, testWorld.world)
self.assertEqual(testWorld.proto.role.externalID, u'alice@example.com')
self.assertEqual(testWorld.proto.choices, [])
def test_connectionMadePrompt(self):
"""
L{CharacterSelectionTextServer} prompts the player upon connection,
giving them the option to create a character.
"""
testWorld = buildWorld(self)
transport = StringTransport()
terminal = ServerProtocol(lambda: testWorld.proto)
terminal.makeConnection(transport)
self.assertIn("0) Create", transport.io.getvalue())
| mit |
mliu7/django-avatar | avatar/migrations/0002_auto__add_field_avatar_existing_thumbnail_sizes.py | 1 | 4545 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Avatar.existing_thumbnail_sizes'
db.add_column('avatar_avatar', 'existing_thumbnail_sizes', self.gf('django.db.models.fields.CommaSeparatedIntegerField')(default='', max_length=1024, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Avatar.existing_thumbnail_sizes'
db.delete_column('avatar_avatar', 'existing_thumbnail_sizes')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 7, 31, 0, 57, 55, 749131)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 7, 31, 0, 57, 55, 749029)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'avatar.avatar': {
'Meta': {'object_name': 'Avatar'},
'avatar': ('django.db.models.fields.files.ImageField', [], {'max_length': '1024', 'blank': 'True'}),
'date_uploaded': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'existing_thumbnail_sizes': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '1024', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'primary': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['avatar']
| bsd-3-clause |
python-pillow/pillow-perf | testsuite/cases/pillow.py | 1 | 2653 | import math
from PIL import Image, ImageFilter, ImageOps
from .base import BaseTestCase, root
Image.LANCZOS = Image.ANTIALIAS
class PillowTestCase(BaseTestCase):
filter_ids = {
0: 'ner', 1: 'lzs', 2: 'bil', 3: 'bic',
4: 'box', 5: 'hmn', 6: 'mtc',
}
def create_test_data(self):
im = Image.open(root('resources', 'color_circle.png'))
im = im.convert(self.mode)
im = self.resize(im, self.size, Image.BICUBIC)
assert im.size == tuple(self.size)
return [im]
@classmethod
def resize(cls, self, size, resample=Image.NEAREST):
"""
This implementation uses convolution resampling on Pillow 2.6 and below.
"""
self.load()
if self.size == size:
return self._new(self.im)
if self.mode in ("1", "P"):
resample = Image.NEAREST
if self.mode == 'RGBA':
return cls.resize(self.convert('RGBa'),
size, resample).convert('RGBA')
if self.mode == 'LA':
return cls.resize(self.convert('La'),
size, resample).convert('LA')
if resample == Image.NEAREST or not hasattr(self.im, 'stretch'):
im = self.im.resize(size, resample)
else:
im = self.im.stretch(size, resample)
return self._new(im)
@classmethod
def gaussian_blur(cls, self, radius, n=3):
if self.mode == 'RGBA':
return cls.gaussian_blur(self.convert('RGBa'),
radius, n).convert('RGBA')
if self.mode == 'LA':
return cls.gaussian_blur(self.convert('La'),
radius, n).convert('LA')
if not hasattr(self.im, 'box_blur'):
# Pillow 2.6 used different radius formula
return self._new(ImageOps.gaussian_blur(self, radius * 2.5))
# https://www.mia.uni-saarland.de/Publications/gwosdek-ssvm11.pdf
# [7] Box length.
L = math.sqrt(12.0 * float(radius) * radius / n + 1.0)
# [11] Box radius.
l = (L - 1.0) / 2.0
# Integer part.
li = math.floor(l)
# Reduce the fractional part in accordance with tests.
a = math.e ** (2.5 * (l - li) / (li + 1)) - 1
a /= math.e ** (2.5 / (li + 1)) - 1
box_radius = li + a
self.load()
return self._new(self.im.box_blur(box_radius, n))
@classmethod
def getchannel(cls, self, channel):
if hasattr(self, 'getchannel'):
return self.getchannel(channel)
else:
return self.split()[channel]
| mit |