repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
mcanthony/nupic | refs/heads/master | src/nupic/support/enum.py | 26 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import re
import keyword
import functools
__IDENTIFIER_PATTERN = re.compile(r'^[a-zA-Z_][a-zA-Z0-9_]*$')
def __isidentifier(s):
if s in keyword.kwlist:
return False
return __IDENTIFIER_PATTERN.match(s) is not None
def Enum(*args, **kwargs):
"""
Utility function for creating enumerations in python
Example Usage:
>> Color = Enum("Red", "Green", "Blue", "Magenta")
>> print Color.Red
>> 0
>> print Color.Green
>> 1
>> print Color.Blue
>> 2
>> print Color.Magenta
>> 3
>> Color.Violet
>> 'violet'
>> Color.getLabel(Color.Red)
>> 'Red'
>> Color.getLabel(2)
>> 'Blue'
"""
def getLabel(cls, val):
""" Get a string label for the current value of the enum """
return cls.__labels[val]
def validate(cls, val):
""" Returns True if val is a valid value for the enumeration """
return val in cls.__values
def getValues(cls):
""" Returns a list of all the possible values for this enum """
return list(cls.__values)
def getLabels(cls):
"""Returns a list of all possible labels for this enum """
return list(cls.__labels.values())
for arg in list(args)+kwargs.keys():
if type(arg) is not str:
raise TypeError("Enum arg {0} must be a string".format(arg))
if not __isidentifier(arg):
raise ValueError("Invalid enum value '{0}'. "\
"'{0}' is not a valid identifier".format(arg))
#kwargs.update(zip(args, range(len(args))))
kwargs.update(zip(args, args))
newType = type("Enum", (object,), kwargs)
newType.__labels = dict( (v,k) for k,v in kwargs.iteritems())
newType.__values = set(newType.__labels.keys())
newType.getLabel = functools.partial(getLabel, newType)
newType.validate = functools.partial(validate, newType)
newType.getValues = functools.partial(getValues, newType)
newType.getLabels = functools.partial(getLabels, newType)
return newType
if __name__ == '__main__':
Color = Enum("Red", "Blue")
Shape = Enum("Square", "Triangle")
|
sebadiaz/rethinkdb | refs/heads/next | external/v8_3.30.33.16/tools/js2c.py | 36 | #!/usr/bin/env python
#
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This is a utility for converting JavaScript source code into C-style
# char arrays. It is used for embedded JavaScript code in the V8
# library.
import os, re, sys, string
import optparse
import jsmin
import bz2
import textwrap
class Error(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
def ToCArray(byte_sequence):
result = []
for chr in byte_sequence:
result.append(str(ord(chr)))
joined = ", ".join(result)
return textwrap.fill(joined, 80)
def RemoveCommentsAndTrailingWhitespace(lines):
lines = re.sub(r'//.*\n', '\n', lines) # end-of-line comments
lines = re.sub(re.compile(r'/\*.*?\*/', re.DOTALL), '', lines) # comments.
lines = re.sub(r'\s+\n+', '\n', lines) # trailing whitespace
return lines
def ReadFile(filename):
file = open(filename, "rt")
try:
lines = file.read()
finally:
file.close()
return lines
EVAL_PATTERN = re.compile(r'\beval\s*\(')
WITH_PATTERN = re.compile(r'\bwith\s*\(')
def Validate(lines):
# Because of simplified context setup, eval and with is not
# allowed in the natives files.
if EVAL_PATTERN.search(lines):
raise Error("Eval disallowed in natives.")
if WITH_PATTERN.search(lines):
raise Error("With statements disallowed in natives.")
# Pass lines through unchanged.
return lines
def ExpandConstants(lines, constants):
for key, value in constants:
lines = key.sub(str(value), lines)
return lines
def ExpandMacroDefinition(lines, pos, name_pattern, macro, expander):
pattern_match = name_pattern.search(lines, pos)
while pattern_match is not None:
# Scan over the arguments
height = 1
start = pattern_match.start()
end = pattern_match.end()
assert lines[end - 1] == '('
last_match = end
arg_index = [0] # Wrap state into array, to work around Python "scoping"
mapping = { }
def add_arg(str):
# Remember to expand recursively in the arguments
replacement = expander(str.strip())
mapping[macro.args[arg_index[0]]] = replacement
arg_index[0] += 1
while end < len(lines) and height > 0:
# We don't count commas at higher nesting levels.
if lines[end] == ',' and height == 1:
add_arg(lines[last_match:end])
last_match = end + 1
elif lines[end] in ['(', '{', '[']:
height = height + 1
elif lines[end] in [')', '}', ']']:
height = height - 1
end = end + 1
# Remember to add the last match.
add_arg(lines[last_match:end-1])
result = macro.expand(mapping)
# Replace the occurrence of the macro with the expansion
lines = lines[:start] + result + lines[end:]
pattern_match = name_pattern.search(lines, start + len(result))
return lines
def ExpandMacros(lines, macros):
# We allow macros to depend on the previously declared macros, but
# we don't allow self-dependecies or recursion.
for name_pattern, macro in reversed(macros):
def expander(s):
return ExpandMacros(s, macros)
lines = ExpandMacroDefinition(lines, 0, name_pattern, macro, expander)
return lines
class TextMacro:
def __init__(self, args, body):
self.args = args
self.body = body
def expand(self, mapping):
result = self.body
for key, value in mapping.items():
result = result.replace(key, value)
return result
class PythonMacro:
def __init__(self, args, fun):
self.args = args
self.fun = fun
def expand(self, mapping):
args = []
for arg in self.args:
args.append(mapping[arg])
return str(self.fun(*args))
CONST_PATTERN = re.compile(r'^const\s+([a-zA-Z0-9_]+)\s*=\s*([^;]*);$')
MACRO_PATTERN = re.compile(r'^macro\s+([a-zA-Z0-9_]+)\s*\(([^)]*)\)\s*=\s*([^;]*);$')
PYTHON_MACRO_PATTERN = re.compile(r'^python\s+macro\s+([a-zA-Z0-9_]+)\s*\(([^)]*)\)\s*=\s*([^;]*);$')
def ReadMacros(lines):
constants = []
macros = []
for line in lines.split('\n'):
hash = line.find('#')
if hash != -1: line = line[:hash]
line = line.strip()
if len(line) is 0: continue
const_match = CONST_PATTERN.match(line)
if const_match:
name = const_match.group(1)
value = const_match.group(2).strip()
constants.append((re.compile("\\b%s\\b" % name), value))
else:
macro_match = MACRO_PATTERN.match(line)
if macro_match:
name = macro_match.group(1)
args = [match.strip() for match in macro_match.group(2).split(',')]
body = macro_match.group(3).strip()
macros.append((re.compile("\\b%s\\(" % name), TextMacro(args, body)))
else:
python_match = PYTHON_MACRO_PATTERN.match(line)
if python_match:
name = python_match.group(1)
args = [match.strip() for match in python_match.group(2).split(',')]
body = python_match.group(3).strip()
fun = eval("lambda " + ",".join(args) + ': ' + body)
macros.append((re.compile("\\b%s\\(" % name), PythonMacro(args, fun)))
else:
raise Error("Illegal line: " + line)
return (constants, macros)
INLINE_MACRO_PATTERN = re.compile(r'macro\s+([a-zA-Z0-9_]+)\s*\(([^)]*)\)\s*\n')
INLINE_MACRO_END_PATTERN = re.compile(r'endmacro\s*\n')
def ExpandInlineMacros(lines):
pos = 0
while True:
macro_match = INLINE_MACRO_PATTERN.search(lines, pos)
if macro_match is None:
# no more macros
return lines
name = macro_match.group(1)
args = [match.strip() for match in macro_match.group(2).split(',')]
end_macro_match = INLINE_MACRO_END_PATTERN.search(lines, macro_match.end());
if end_macro_match is None:
raise Error("Macro %s unclosed" % name)
body = lines[macro_match.end():end_macro_match.start()]
# remove macro definition
lines = lines[:macro_match.start()] + lines[end_macro_match.end():]
name_pattern = re.compile("\\b%s\\(" % name)
macro = TextMacro(args, body)
# advance position to where the macro defintion was
pos = macro_match.start()
def non_expander(s):
return s
lines = ExpandMacroDefinition(lines, pos, name_pattern, macro, non_expander)
INLINE_CONSTANT_PATTERN = re.compile(r'const\s+([a-zA-Z0-9_]+)\s*=\s*([^;\n]+)[;\n]')
def ExpandInlineConstants(lines):
pos = 0
while True:
const_match = INLINE_CONSTANT_PATTERN.search(lines, pos)
if const_match is None:
# no more constants
return lines
name = const_match.group(1)
replacement = const_match.group(2)
name_pattern = re.compile("\\b%s\\b" % name)
# remove constant definition and replace
lines = (lines[:const_match.start()] +
re.sub(name_pattern, replacement, lines[const_match.end():]))
# advance position to where the constant defintion was
pos = const_match.start()
HEADER_TEMPLATE = """\
// Copyright 2011 Google Inc. All Rights Reserved.
// This file was generated from .js source files by GYP. If you
// want to make changes to this file you should either change the
// javascript source files or the GYP script.
#include "src/v8.h"
#include "src/natives.h"
#include "src/utils.h"
namespace v8 {
namespace internal {
%(sources_declaration)s\
%(raw_sources_declaration)s\
template <>
int NativesCollection<%(type)s>::GetBuiltinsCount() {
return %(builtin_count)i;
}
template <>
int NativesCollection<%(type)s>::GetDebuggerCount() {
return %(debugger_count)i;
}
template <>
int NativesCollection<%(type)s>::GetIndex(const char* name) {
%(get_index_cases)s\
return -1;
}
template <>
int NativesCollection<%(type)s>::GetRawScriptsSize() {
return %(raw_total_length)i;
}
template <>
Vector<const char> NativesCollection<%(type)s>::GetRawScriptSource(int index) {
%(get_raw_script_source_cases)s\
return Vector<const char>("", 0);
}
template <>
Vector<const char> NativesCollection<%(type)s>::GetScriptName(int index) {
%(get_script_name_cases)s\
return Vector<const char>("", 0);
}
template <>
Vector<const byte> NativesCollection<%(type)s>::GetScriptsSource() {
return Vector<const byte>(sources, %(total_length)i);
}
template <>
void NativesCollection<%(type)s>::SetRawScriptsSource(Vector<const char> raw_source) {
DCHECK(%(raw_total_length)i == raw_source.length());
raw_sources = raw_source.start();
}
} // internal
} // v8
"""
SOURCES_DECLARATION = """\
static const byte sources[] = { %s };
"""
RAW_SOURCES_COMPRESSION_DECLARATION = """\
static const char* raw_sources = NULL;
"""
RAW_SOURCES_DECLARATION = """\
static const char* raw_sources = reinterpret_cast<const char*>(sources);
"""
GET_INDEX_CASE = """\
if (strcmp(name, "%(id)s") == 0) return %(i)i;
"""
GET_RAW_SCRIPT_SOURCE_CASE = """\
if (index == %(i)i) return Vector<const char>(raw_sources + %(offset)i, %(raw_length)i);
"""
GET_SCRIPT_NAME_CASE = """\
if (index == %(i)i) return Vector<const char>("%(name)s", %(length)i);
"""
def BuildFilterChain(macro_filename):
"""Build the chain of filter functions to be applied to the sources.
Args:
macro_filename: Name of the macro file, if any.
Returns:
A function (string -> string) that reads a source file and processes it.
"""
filter_chain = [ReadFile]
if macro_filename:
(consts, macros) = ReadMacros(ReadFile(macro_filename))
filter_chain.append(lambda l: ExpandConstants(l, consts))
filter_chain.append(lambda l: ExpandMacros(l, macros))
filter_chain.extend([
RemoveCommentsAndTrailingWhitespace,
ExpandInlineMacros,
ExpandInlineConstants,
Validate,
jsmin.JavaScriptMinifier().JSMinify
])
def chain(f1, f2):
return lambda x: f2(f1(x))
return reduce(chain, filter_chain)
class Sources:
def __init__(self):
self.names = []
self.modules = []
self.is_debugger_id = []
def IsDebuggerFile(filename):
return filename.endswith("-debugger.js")
def IsMacroFile(filename):
return filename.endswith("macros.py")
def PrepareSources(source_files):
"""Read, prepare and assemble the list of source files.
Args:
sources: List of Javascript-ish source files. A file named macros.py
will be treated as a list of macros.
Returns:
An instance of Sources.
"""
macro_file = None
macro_files = filter(IsMacroFile, source_files)
assert len(macro_files) in [0, 1]
if macro_files:
source_files.remove(macro_files[0])
macro_file = macro_files[0]
filters = BuildFilterChain(macro_file)
# Sort 'debugger' sources first.
source_files = sorted(source_files,
lambda l,r: IsDebuggerFile(r) - IsDebuggerFile(l))
result = Sources()
for source in source_files:
try:
lines = filters(source)
except Error as e:
raise Error("In file %s:\n%s" % (source, str(e)))
result.modules.append(lines);
is_debugger = IsDebuggerFile(source)
result.is_debugger_id.append(is_debugger);
name = os.path.basename(source)[:-3]
result.names.append(name if not is_debugger else name[:-9]);
return result
def BuildMetadata(sources, source_bytes, native_type):
"""Build the meta data required to generate a libaries file.
Args:
sources: A Sources instance with the prepared sources.
source_bytes: A list of source bytes.
(The concatenation of all sources; might be compressed.)
native_type: The parameter for the NativesCollection template.
Returns:
A dictionary for use with HEADER_TEMPLATE.
"""
total_length = len(source_bytes)
raw_sources = "".join(sources.modules)
# The sources are expected to be ASCII-only.
assert not filter(lambda value: ord(value) >= 128, raw_sources)
# Loop over modules and build up indices into the source blob:
get_index_cases = []
get_script_name_cases = []
get_raw_script_source_cases = []
offset = 0
for i in xrange(len(sources.modules)):
native_name = "native %s.js" % sources.names[i]
d = {
"i": i,
"id": sources.names[i],
"name": native_name,
"length": len(native_name),
"offset": offset,
"raw_length": len(sources.modules[i]),
}
get_index_cases.append(GET_INDEX_CASE % d)
get_script_name_cases.append(GET_SCRIPT_NAME_CASE % d)
get_raw_script_source_cases.append(GET_RAW_SCRIPT_SOURCE_CASE % d)
offset += len(sources.modules[i])
assert offset == len(raw_sources)
# If we have the raw sources we can declare them accordingly.
have_raw_sources = source_bytes == raw_sources
raw_sources_declaration = (RAW_SOURCES_DECLARATION
if have_raw_sources else RAW_SOURCES_COMPRESSION_DECLARATION)
metadata = {
"builtin_count": len(sources.modules),
"debugger_count": sum(sources.is_debugger_id),
"sources_declaration": SOURCES_DECLARATION % ToCArray(source_bytes),
"raw_sources_declaration": raw_sources_declaration,
"raw_total_length": sum(map(len, sources.modules)),
"total_length": total_length,
"get_index_cases": "".join(get_index_cases),
"get_raw_script_source_cases": "".join(get_raw_script_source_cases),
"get_script_name_cases": "".join(get_script_name_cases),
"type": native_type,
}
return metadata
def CompressMaybe(sources, compression_type):
"""Take the prepared sources and generate a sequence of bytes.
Args:
sources: A Sources instance with the prepared sourced.
compression_type: string, describing the desired compression.
Returns:
A sequence of bytes.
"""
sources_bytes = "".join(sources.modules)
if compression_type == "off":
return sources_bytes
elif compression_type == "bz2":
return bz2.compress(sources_bytes)
else:
raise Error("Unknown compression type %s." % compression_type)
def PutInt(blob_file, value):
assert(value >= 0 and value < (1 << 28))
if (value < 1 << 6):
size = 1
elif (value < 1 << 14):
size = 2
elif (value < 1 << 22):
size = 3
else:
size = 4
value_with_length = (value << 2) | (size - 1)
byte_sequence = bytearray()
for i in xrange(size):
byte_sequence.append(value_with_length & 255)
value_with_length >>= 8;
blob_file.write(byte_sequence)
def PutStr(blob_file, value):
PutInt(blob_file, len(value));
blob_file.write(value);
def WriteStartupBlob(sources, startup_blob):
"""Write a startup blob, as expected by V8 Initialize ...
TODO(vogelheim): Add proper method name.
Args:
sources: A Sources instance with the prepared sources.
startup_blob_file: Name of file to write the blob to.
"""
output = open(startup_blob, "wb")
debug_sources = sum(sources.is_debugger_id);
PutInt(output, debug_sources)
for i in xrange(debug_sources):
PutStr(output, sources.names[i]);
PutStr(output, sources.modules[i]);
PutInt(output, len(sources.names) - debug_sources)
for i in xrange(debug_sources, len(sources.names)):
PutStr(output, sources.names[i]);
PutStr(output, sources.modules[i]);
output.close()
def JS2C(source, target, native_type, compression_type, raw_file, startup_blob):
sources = PrepareSources(source)
sources_bytes = CompressMaybe(sources, compression_type)
metadata = BuildMetadata(sources, sources_bytes, native_type)
# Optionally emit raw file.
if raw_file:
output = open(raw_file, "w")
output.write(sources_bytes)
output.close()
if startup_blob:
WriteStartupBlob(sources, startup_blob);
# Emit resulting source file.
output = open(target, "w")
output.write(HEADER_TEMPLATE % metadata)
output.close()
def main():
parser = optparse.OptionParser()
parser.add_option("--raw", action="store",
help="file to write the processed sources array to.")
parser.add_option("--startup_blob", action="store",
help="file to write the startup blob to.")
parser.set_usage("""js2c out.cc type compression sources.js ...
out.cc: C code to be generated.
type: type parameter for NativesCollection template.
compression: type of compression used. [off|bz2]
sources.js: JS internal sources or macros.py.""")
(options, args) = parser.parse_args()
JS2C(args[3:], args[0], args[1], args[2], options.raw, options.startup_blob)
if __name__ == "__main__":
main()
|
eoyilmaz/anima | refs/heads/master | anima/ui/progress_dialog.py | 1 | # -*- coding: utf-8 -*-
from anima.base import Singleton
class ProgressCaller(object):
"""A simple object to hold caller data for ProgressDialogManager
"""
def __init__(self, max_steps=0, title=''):
self.max_steps = max_steps
self.title = title
self.current_step = 0
self.manager = None
def step(self, step_size=1, message=''):
"""A shortcut for the ProgressDialogManager.step() method
"""
self.manager.step(self, step=step_size, message=message)
def end_progress(self):
"""A shortcut fro the ProgressDialogManager.end_progress() method
"""
self.manager.end_progress(self)
class ProgressDialogManager(object):
"""A wrapper for the QtGui.QProgressDialog where it can be called from
multiple other branches of the code.
This is a wrapper for the QProgressDialog window. It is able to track more
then one process. The current usage is as follows::
pm = ProgressDialogManager()
# register a new caller which will have 100 steps
caller = pm.register(100)
for i in range(100):
caller.step()
So calling ``register`` will register a new caller for the progress window.
The ProgressDialogManager will store the caller and will kill the
QProgressDialog when all of the callers are completed.
"""
__metaclass__ = Singleton
def __init__(self, parent=None, dialog=None):
self.in_progress = False
self.dialog = dialog
self.callers = []
if not hasattr(self, 'use_ui'):
# prevent resetting the use_ui to True
self.use_ui = True
self.parent = parent
self.title = ''
self.max_steps = 0
self.current_step = 0
def create_dialog(self):
"""creates the progressWindow
"""
if self.use_ui:
if self.dialog is None:
from anima.ui.lib import QtWidgets
self.dialog = \
QtWidgets.QProgressDialog(self.parent)
# QtGui.QProgressDialog(None, QtCore.Qt.WindowStaysOnTopHint)
# self.dialog.setMinimumDuration(2000)
self.dialog.setRange(0, self.max_steps)
self.dialog.setLabelText(self.title)
# self.dialog.setAutoClose(True)
# self.dialog.setAutoReset(True)
self.center_window()
self.dialog.show()
# also set the Manager to in progress
self.in_progress = True
def was_cancelled(self):
if self.dialog:
return self.dialog.wasCanceled()
return False
def close(self):
"""kills the progressWindow
"""
if self.dialog is not None:
self.dialog.close()
# re initialize self
self.__init__(dialog=self.dialog)
def register(self, max_iteration, title=''):
"""registers a new caller
:return: ProgressCaller instance
"""
caller = ProgressCaller(max_steps=max_iteration, title=title)
caller.manager = self
self.max_steps += max_iteration
if self.use_ui:
if not self.in_progress:
self.create_dialog()
else:
# update the maximum
if self.dialog:
self.create_dialog()
self.dialog.setRange(0, self.max_steps)
self.dialog.setValue(self.current_step)
# self. center_window()
# also store this
self.callers.append(caller)
return caller
def center_window(self):
"""recenters the dialog window to the screen
"""
if self.dialog is not None:
from anima.ui.lib import QtGui, QtWidgets
desktop = QtWidgets.QApplication.desktop()
cursor_pos = QtGui.QCursor.pos()
desktop_number = desktop.screenNumber(cursor_pos)
desktop_rect = desktop.screenGeometry(desktop_number)
size = self.dialog.geometry()
if size:
dr_width = desktop_rect.width()
dr_left = desktop_rect.left()
dr_height = desktop_rect.height()
dr_top = desktop_rect.top()
self.dialog.move(
(dr_width - size.width()) * 0.5 + dr_left,
(dr_height - size.height()) * 0.5 + dr_top
)
def step(self, caller, step=1, message=''):
"""Increments the progress by the given mount
:param caller: A :class:`.ProgressCaller` instance, generally returned
by the :meth:`.register` method.
:param step: The step size to increment, the default value is 1.
:param str message: The progress message as string.
"""
caller.current_step += step
self.current_step += step
if self.dialog:
self.dialog.setValue(self.current_step)
self.dialog.setLabelText('%s : %s' % (caller.title, message))
# self.center_window()
if caller.current_step >= caller.max_steps:
# kill the caller
self.end_progress(caller)
from anima.ui.lib import QtWidgets
try:
qApp = QtWidgets.qApp
except AttributeError:
qApp = QtWidgets.QApplication
qApp.processEvents()
def end_progress(self, caller):
"""Ends the progress for the given caller
:param caller: A :class:`.ProgressCaller` instance
:return: None
"""
# remove the caller from the callers list
if caller in self.callers:
self.callers.remove(caller)
# also reduce the max_steps counter
# in case of an early kill
steps_left = caller.max_steps - caller.current_step
if steps_left > 0:
self.max_steps -= steps_left
if len(self.callers) == 0:
self.close()
|
apocalypsebg/odoo | refs/heads/8.0 | addons/website_mail_group/controllers/main.py | 306 | # -*- coding: utf-8 -*-
import datetime
from dateutil import relativedelta
from openerp import tools, SUPERUSER_ID
from openerp.addons.web import http
from openerp.addons.website.models.website import slug
from openerp.addons.web.http import request
class MailGroup(http.Controller):
_thread_per_page = 20
_replies_per_page = 10
def _get_archives(self, group_id):
MailMessage = request.registry['mail.message']
groups = MailMessage.read_group(
request.cr, request.uid, [('model', '=', 'mail.group'), ('res_id', '=', group_id)], ['subject', 'date'],
groupby="date", orderby="date desc", context=request.context)
for group in groups:
begin_date = datetime.datetime.strptime(group['__domain'][0][2], tools.DEFAULT_SERVER_DATETIME_FORMAT).date()
end_date = datetime.datetime.strptime(group['__domain'][1][2], tools.DEFAULT_SERVER_DATETIME_FORMAT).date()
group['date_begin'] = '%s' % datetime.date.strftime(begin_date, tools.DEFAULT_SERVER_DATE_FORMAT)
group['date_end'] = '%s' % datetime.date.strftime(end_date, tools.DEFAULT_SERVER_DATE_FORMAT)
return groups
@http.route("/groups", type='http', auth="public", website=True)
def view(self, **post):
cr, uid, context = request.cr, request.uid, request.context
group_obj = request.registry.get('mail.group')
mail_message_obj = request.registry.get('mail.message')
group_ids = group_obj.search(cr, uid, [('alias_id', '!=', False), ('alias_id.alias_name', '!=', False)], context=context)
groups = group_obj.browse(cr, uid, group_ids, context)
# compute statistics
month_date = datetime.datetime.today() - relativedelta.relativedelta(months=1)
group_data = dict()
for group in groups:
group_data[group.id] = {
'monthly_message_nbr': mail_message_obj.search(
cr, SUPERUSER_ID,
[('model', '=', 'mail.group'), ('res_id', '=', group.id), ('date', '>=', month_date.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT))],
count=True, context=context)}
values = {'groups': groups, 'group_data': group_data}
return request.website.render('website_mail_group.mail_groups', values)
@http.route(["/groups/subscription/"], type='json', auth="user")
def subscription(self, group_id=0, action=False, **post):
""" TDE FIXME: seems dead code """
cr, uid, context = request.cr, request.uid, request.context
group_obj = request.registry.get('mail.group')
if action:
group_obj.message_subscribe_users(cr, uid, [group_id], context=context)
else:
group_obj.message_unsubscribe_users(cr, uid, [group_id], context=context)
return []
@http.route([
"/groups/<model('mail.group'):group>",
"/groups/<model('mail.group'):group>/page/<int:page>"
], type='http', auth="public", website=True)
def thread_headers(self, group, page=1, mode='thread', date_begin=None, date_end=None, **post):
cr, uid, context = request.cr, request.uid, request.context
thread_obj = request.registry.get('mail.message')
domain = [('model', '=', 'mail.group'), ('res_id', '=', group.id)]
if mode == 'thread':
domain += [('parent_id', '=', False)]
if date_begin and date_end:
domain += [('date', '>=', date_begin), ('date', '<=', date_end)]
thread_count = thread_obj.search_count(cr, uid, domain, context=context)
pager = request.website.pager(
url='/groups/%s' % slug(group),
total=thread_count,
page=page,
step=self._thread_per_page,
url_args={'mode': mode, 'date_begin': date_begin or '', 'date_end': date_end or ''},
)
thread_ids = thread_obj.search(cr, uid, domain, limit=self._thread_per_page, offset=pager['offset'])
messages = thread_obj.browse(cr, uid, thread_ids, context)
values = {
'messages': messages,
'group': group,
'pager': pager,
'mode': mode,
'archives': self._get_archives(group.id),
'date_begin': date_begin,
'date_end': date_end,
'replies_per_page': self._replies_per_page,
}
return request.website.render('website_mail_group.group_messages', values)
@http.route([
'''/groups/<model('mail.group'):group>/<model('mail.message', "[('model','=','mail.group'), ('res_id','=',group[0])]"):message>''',
], type='http', auth="public", website=True)
def thread_discussion(self, group, message, mode='thread', date_begin=None, date_end=None, **post):
cr, uid, context = request.cr, request.uid, request.context
Message = request.registry['mail.message']
if mode == 'thread':
base_domain = [('model', '=', 'mail.group'), ('res_id', '=', group.id), ('parent_id', '=', message.parent_id and message.parent_id.id or False)]
else:
base_domain = [('model', '=', 'mail.group'), ('res_id', '=', group.id)]
next_message = None
next_message_ids = Message.search(cr, uid, base_domain + [('date', '<', message.date)], order="date DESC", limit=1, context=context)
if next_message_ids:
next_message = Message.browse(cr, uid, next_message_ids[0], context=context)
prev_message = None
prev_message_ids = Message.search(cr, uid, base_domain + [('date', '>', message.date)], order="date ASC", limit=1, context=context)
if prev_message_ids:
prev_message = Message.browse(cr, uid, prev_message_ids[0], context=context)
values = {
'message': message,
'group': group,
'mode': mode,
'archives': self._get_archives(group.id),
'date_begin': date_begin,
'date_end': date_end,
'replies_per_page': self._replies_per_page,
'next_message': next_message,
'prev_message': prev_message,
}
return request.website.render('website_mail_group.group_message', values)
@http.route(
'''/groups/<model('mail.group'):group>/<model('mail.message', "[('model','=','mail.group'), ('res_id','=',group[0])]"):message>/get_replies''',
type='json', auth="public", methods=['POST'], website=True)
def render_messages(self, group, message, **post):
last_displayed_id = post.get('last_displayed_id')
if not last_displayed_id:
return False
Message = request.registry['mail.message']
replies_domain = [('id', '<', int(last_displayed_id)), ('parent_id', '=', message.id)]
msg_ids = Message.search(request.cr, request.uid, replies_domain, limit=self._replies_per_page, context=request.context)
msg_count = Message.search(request.cr, request.uid, replies_domain, count=True, context=request.context)
messages = Message.browse(request.cr, request.uid, msg_ids, context=request.context)
values = {
'group': group,
'thread_header': message,
'messages': messages,
'msg_more_count': msg_count - self._replies_per_page,
'replies_per_page': self._replies_per_page,
}
return request.registry['ir.ui.view'].render(request.cr, request.uid, 'website_mail_group.messages_short', values, engine='ir.qweb', context=request.context)
@http.route("/groups/<model('mail.group'):group>/get_alias_info", type='json', auth='public', website=True)
def get_alias_info(self, group, **post):
return {
'alias_name': group.alias_id and group.alias_id.alias_name and group.alias_id.alias_domain and '%s@%s' % (group.alias_id.alias_name, group.alias_id.alias_domain) or False
}
|
vinnyrose/django-constance | refs/heads/master | tests/test_admin.py | 8 | from django.contrib import admin
from django.contrib.auth.models import User, Permission
from django.core.exceptions import PermissionDenied
from django.test import TestCase, RequestFactory
from constance.admin import settings, Config
class TestAdmin(TestCase):
model = Config
def setUp(self):
super(TestAdmin, self).setUp()
self.rf = RequestFactory()
self.superuser = User.objects.create_superuser('admin', 'nimda', 'a@a.cz')
self.normaluser = User.objects.create_user('normal', 'nimda', 'b@b.cz')
self.normaluser.is_staff = True
self.normaluser.save()
self.options = admin.site._registry[self.model]
def test_changelist(self):
self.client.login(username='admin', password='nimda')
request = self.rf.get('/admin/constance/config/')
request.user = self.superuser
response = self.options.changelist_view(request, {})
self.assertEqual(response.status_code, 200)
def test_custom_auth(self):
settings.SUPERUSER_ONLY = False
self.client.login(username='normal', password='nimda')
request = self.rf.get('/admin/constance/config/')
request.user = self.normaluser
self.assertRaises(PermissionDenied,
self.options.changelist_view,
request, {})
self.assertFalse(request.user.has_perm('constance.change_config'))
# reload user to reset permission cache
request = self.rf.get('/admin/constance/config/')
request.user = User.objects.get(pk=self.normaluser.pk)
request.user.user_permissions.add(Permission.objects.get(codename='change_config'))
self.assertTrue(request.user.has_perm('constance.change_config'))
response = self.options.changelist_view(request, {})
self.assertEqual(response.status_code, 200)
|
rmbq/caf_msm-3.10 | refs/heads/LA.BF.2.1.2_rb1.5 | scripts/tracing/draw_functrace.py | 14679 | #!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
|
esikachev/scenario | refs/heads/test_cases | sahara/plugins/vanilla/hadoop2/config.py | 1 | # Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from oslo_log import log as logging
import six
from sahara.i18n import _LI
from sahara.plugins.vanilla.hadoop2 import config_helper as c_helper
from sahara.plugins.vanilla.hadoop2 import oozie_helper as o_helper
from sahara.plugins.vanilla import utils as vu
from sahara.swift import swift_helper as swift
from sahara.topology import topology_helper as th
from sahara.utils import files as f
from sahara.utils import proxy
from sahara.utils import xmlutils as x
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
HADOOP_CONF_DIR = '/opt/hadoop/etc/hadoop'
OOZIE_CONF_DIR = '/opt/oozie/conf'
HIVE_CONF_DIR = '/opt/hive/conf'
HADOOP_USER = 'hadoop'
HADOOP_GROUP = 'hadoop'
def configure_cluster(pctx, cluster):
LOG.debug("Configuring cluster \"%s\"", cluster.name)
if (CONF.use_identity_api_v3 and CONF.use_domain_for_proxy_users and
vu.get_hiveserver(cluster) and
c_helper.is_swift_enabled(pctx, cluster)):
cluster = proxy.create_proxy_user_for_cluster(cluster)
instances = []
for node_group in cluster.node_groups:
for instance in node_group.instances:
instances.append(instance)
configure_instances(pctx, instances)
configure_topology_data(pctx, cluster)
def configure_instances(pctx, instances):
for instance in instances:
_provisioning_configs(pctx, instance)
_post_configuration(pctx, instance)
def _provisioning_configs(pctx, instance):
xmls, env = _generate_configs(pctx, instance.node_group)
_push_xml_configs(instance, xmls)
_push_env_configs(instance, env)
def _generate_configs(pctx, node_group):
hadoop_xml_confs = _get_hadoop_configs(pctx, node_group)
user_xml_confs, user_env_confs = _get_user_configs(pctx, node_group)
xml_confs = _merge_configs(user_xml_confs, hadoop_xml_confs)
env_confs = _merge_configs(pctx['env_confs'], user_env_confs)
return xml_confs, env_confs
def _get_hadoop_configs(pctx, node_group):
cluster = node_group.cluster
nn_hostname = vu.get_instance_hostname(vu.get_namenode(cluster))
dirs = _get_hadoop_dirs(node_group)
confs = {
'Hadoop': {
'fs.defaultFS': 'hdfs://%s:9000' % nn_hostname
},
'HDFS': {
'dfs.namenode.name.dir': ','.join(dirs['hadoop_name_dirs']),
'dfs.namenode.data.dir': ','.join(dirs['hadoop_data_dirs']),
'dfs.hosts': '%s/dn-include' % HADOOP_CONF_DIR,
'dfs.hosts.exclude': '%s/dn-exclude' % HADOOP_CONF_DIR
}
}
res_hostname = vu.get_instance_hostname(vu.get_resourcemanager(cluster))
if res_hostname:
confs['YARN'] = {
'yarn.nodemanager.aux-services': 'mapreduce_shuffle',
'yarn.resourcemanager.hostname': '%s' % res_hostname,
'yarn.resourcemanager.nodes.include-path': '%s/nm-include' % (
HADOOP_CONF_DIR),
'yarn.resourcemanager.nodes.exclude-path': '%s/nm-exclude' % (
HADOOP_CONF_DIR)
}
confs['MapReduce'] = {
'mapreduce.framework.name': 'yarn'
}
hs_hostname = vu.get_instance_hostname(vu.get_historyserver(cluster))
if hs_hostname:
confs['MapReduce']['mapreduce.jobhistory.address'] = (
"%s:10020" % hs_hostname)
oozie = vu.get_oozie(cluster)
if oozie:
hadoop_cfg = {
'hadoop.proxyuser.hadoop.hosts': '*',
'hadoop.proxyuser.hadoop.groups': 'hadoop'
}
confs['Hadoop'].update(hadoop_cfg)
oozie_cfg = o_helper.get_oozie_required_xml_configs(HADOOP_CONF_DIR)
if c_helper.is_mysql_enabled(pctx, cluster):
oozie_cfg.update(o_helper.get_oozie_mysql_configs())
confs['JobFlow'] = oozie_cfg
if c_helper.is_swift_enabled(pctx, cluster):
swift_configs = {}
for config in swift.get_swift_configs():
swift_configs[config['name']] = config['value']
confs['Hadoop'].update(swift_configs)
if c_helper.is_data_locality_enabled(pctx, cluster):
confs['Hadoop'].update(th.TOPOLOGY_CONFIG)
confs['Hadoop'].update({"topology.script.file.name":
HADOOP_CONF_DIR + "/topology.sh"})
hive_hostname = vu.get_instance_hostname(vu.get_hiveserver(cluster))
if hive_hostname:
hive_cfg = {
'hive.warehouse.subdir.inherit.perms': True,
'javax.jdo.option.ConnectionURL':
'jdbc:derby:;databaseName=/opt/hive/metastore_db;create=true'
}
if c_helper.is_mysql_enabled(pctx, cluster):
hive_cfg.update({
'javax.jdo.option.ConnectionURL':
'jdbc:mysql://%s/metastore' % hive_hostname,
'javax.jdo.option.ConnectionDriverName':
'com.mysql.jdbc.Driver',
'javax.jdo.option.ConnectionUserName': 'hive',
'javax.jdo.option.ConnectionPassword': 'pass',
'datanucleus.autoCreateSchema': 'false',
'datanucleus.fixedDatastore': 'true',
'hive.metastore.uris': 'thrift://%s:9083' % hive_hostname,
})
proxy_configs = cluster.cluster_configs.get('proxy_configs')
if proxy_configs and c_helper.is_swift_enabled(pctx, cluster):
hive_cfg.update({
swift.HADOOP_SWIFT_USERNAME: proxy_configs['proxy_username'],
swift.HADOOP_SWIFT_PASSWORD: proxy_configs['proxy_password'],
swift.HADOOP_SWIFT_TRUST_ID: proxy_configs['proxy_trust_id'],
swift.HADOOP_SWIFT_DOMAIN_NAME: CONF.proxy_user_domain_name
})
confs['Hive'] = hive_cfg
return confs
def _get_user_configs(pctx, node_group):
ng_xml_confs, ng_env_confs = _separate_configs(node_group.node_configs,
pctx['env_confs'])
cl_xml_confs, cl_env_confs = _separate_configs(
node_group.cluster.cluster_configs, pctx['env_confs'])
xml_confs = _merge_configs(cl_xml_confs, ng_xml_confs)
env_confs = _merge_configs(cl_env_confs, ng_env_confs)
return xml_confs, env_confs
def _separate_configs(configs, all_env_configs):
xml_configs = {}
env_configs = {}
for service, params in six.iteritems(configs):
for param, value in six.iteritems(params):
if all_env_configs.get(service, {}).get(param):
if not env_configs.get(service):
env_configs[service] = {}
env_configs[service][param] = value
else:
if not xml_configs.get(service):
xml_configs[service] = {}
xml_configs[service][param] = value
return xml_configs, env_configs
def _generate_xml(configs):
xml_confs = {}
for service, confs in six.iteritems(configs):
xml_confs[service] = x.create_hadoop_xml(confs)
return xml_confs
def _push_env_configs(instance, configs):
nn_heap = configs['HDFS']['NameNode Heap Size']
snn_heap = configs['HDFS']['SecondaryNameNode Heap Size']
dn_heap = configs['HDFS']['DataNode Heap Size']
rm_heap = configs['YARN']['ResourceManager Heap Size']
nm_heap = configs['YARN']['NodeManager Heap Size']
hs_heap = configs['MapReduce']['JobHistoryServer Heap Size']
with instance.remote() as r:
r.replace_remote_string(
'%s/hadoop-env.sh' % HADOOP_CONF_DIR,
'export HADOOP_NAMENODE_OPTS=.*',
'export HADOOP_NAMENODE_OPTS="-Xmx%dm"' % nn_heap)
r.replace_remote_string(
'%s/hadoop-env.sh' % HADOOP_CONF_DIR,
'export HADOOP_SECONDARYNAMENODE_OPTS=.*',
'export HADOOP_SECONDARYNAMENODE_OPTS="-Xmx%dm"' % snn_heap)
r.replace_remote_string(
'%s/hadoop-env.sh' % HADOOP_CONF_DIR,
'export HADOOP_DATANODE_OPTS=.*',
'export HADOOP_DATANODE_OPTS="-Xmx%dm"' % dn_heap)
r.replace_remote_string(
'%s/yarn-env.sh' % HADOOP_CONF_DIR,
'\\#export YARN_RESOURCEMANAGER_HEAPSIZE=.*',
'export YARN_RESOURCEMANAGER_HEAPSIZE=%d' % rm_heap)
r.replace_remote_string(
'%s/yarn-env.sh' % HADOOP_CONF_DIR,
'\\#export YARN_NODEMANAGER_HEAPSIZE=.*',
'export YARN_NODEMANAGER_HEAPSIZE=%d' % nm_heap)
r.replace_remote_string(
'%s/mapred-env.sh' % HADOOP_CONF_DIR,
'export HADOOP_JOB_HISTORYSERVER_HEAPSIZE=.*',
'export HADOOP_JOB_HISTORYSERVER_HEAPSIZE=%d' % hs_heap)
def _push_xml_configs(instance, configs):
xmls = _generate_xml(configs)
service_to_conf_map = {
'Hadoop': '%s/core-site.xml' % HADOOP_CONF_DIR,
'HDFS': '%s/hdfs-site.xml' % HADOOP_CONF_DIR,
'YARN': '%s/yarn-site.xml' % HADOOP_CONF_DIR,
'MapReduce': '%s/mapred-site.xml' % HADOOP_CONF_DIR,
'JobFlow': '%s/oozie-site.xml' % OOZIE_CONF_DIR,
'Hive': '%s/hive-site.xml' % HIVE_CONF_DIR
}
xml_confs = {}
for service, confs in six.iteritems(xmls):
if service not in service_to_conf_map.keys():
continue
xml_confs[service_to_conf_map[service]] = confs
_push_configs_to_instance(instance, xml_confs)
def _push_configs_to_instance(instance, configs):
LOG.debug("Push configs to instance \"%s\"", instance.instance_name)
with instance.remote() as r:
for fl, data in six.iteritems(configs):
r.write_file_to(fl, data, run_as_root=True)
def _post_configuration(pctx, instance):
node_group = instance.node_group
dirs = _get_hadoop_dirs(node_group)
args = {
'hadoop_user': HADOOP_USER,
'hadoop_group': HADOOP_GROUP,
'hadoop_conf_dir': HADOOP_CONF_DIR,
'oozie_conf_dir': OOZIE_CONF_DIR,
'hadoop_name_dirs': " ".join(dirs['hadoop_name_dirs']),
'hadoop_data_dirs': " ".join(dirs['hadoop_data_dirs']),
'hadoop_log_dir': dirs['hadoop_log_dir'],
'hadoop_secure_dn_log_dir': dirs['hadoop_secure_dn_log_dir'],
'yarn_log_dir': dirs['yarn_log_dir']
}
post_conf_script = f.get_file_text(
'plugins/vanilla/hadoop2/resources/post_conf.template')
post_conf_script = post_conf_script.format(**args)
with instance.remote() as r:
r.write_file_to('/tmp/post_conf.sh', post_conf_script)
r.execute_command('chmod +x /tmp/post_conf.sh')
r.execute_command('sudo /tmp/post_conf.sh')
if c_helper.is_data_locality_enabled(pctx,
instance.cluster):
t_script = HADOOP_CONF_DIR + '/topology.sh'
r.write_file_to(t_script, f.get_file_text(
'plugins/vanilla/hadoop2/resources/topology.sh'),
run_as_root=True)
r.execute_command('chmod +x ' + t_script, run_as_root=True)
def _get_hadoop_dirs(node_group):
dirs = {}
storage_paths = node_group.storage_paths()
dirs['hadoop_name_dirs'] = _make_hadoop_paths(
storage_paths, '/hdfs/namenode')
dirs['hadoop_data_dirs'] = _make_hadoop_paths(
storage_paths, '/hdfs/datanode')
dirs['hadoop_log_dir'] = _make_hadoop_paths(
storage_paths, '/hadoop/logs')[0]
dirs['hadoop_secure_dn_log_dir'] = _make_hadoop_paths(
storage_paths, '/hadoop/logs/secure')[0]
dirs['yarn_log_dir'] = _make_hadoop_paths(
storage_paths, '/yarn/logs')[0]
return dirs
def _make_hadoop_paths(paths, hadoop_dir):
return [path + hadoop_dir for path in paths]
def _merge_configs(a, b):
res = {}
def update(cfg):
for service, configs in six.iteritems(cfg):
if not res.get(service):
res[service] = {}
res[service].update(configs)
update(a)
update(b)
return res
def configure_topology_data(pctx, cluster):
if c_helper.is_data_locality_enabled(pctx, cluster):
LOG.info(_LI("Node group awareness is not implemented in YARN yet "
"so enable_hypervisor_awareness set to False explicitly"))
tpl_map = th.generate_topology_map(cluster, is_node_awareness=False)
topology_data = "\n".join(
[k + " " + v for k, v in tpl_map.items()]) + "\n"
for ng in cluster.node_groups:
for i in ng.instances:
i.remote().write_file_to(HADOOP_CONF_DIR + "/topology.data",
topology_data, run_as_root=True)
def get_open_ports(node_group):
ports = []
if "namenode" in node_group.node_processes:
ports.append(50070)
ports.append(9000)
if "secondarynamenode" in node_group.node_processes:
ports.append(50090)
if "resourcemanager" in node_group.node_processes:
ports.append(8088)
ports.append(8032)
if "historyserver" in node_group.node_processes:
ports.append(19888)
if "datanode" in node_group.node_processes:
ports.append(50010)
ports.append(50075)
ports.append(50020)
if "nodemanager" in node_group.node_processes:
ports.append(8042)
if "oozie" in node_group.node_processes:
ports.append(11000)
if "hiveserver" in node_group.node_processes:
ports.append(9999)
ports.append(10000)
return ports
|
Dapid/scipy | refs/heads/master | scipy/special/add_newdocs.py | 8 | # Docstrings for generated ufuncs
#
# The syntax is designed to look like the function add_newdoc is being
# called from numpy.lib, but in this file add_newdoc puts the
# docstrings in a dictionary. This dictionary is used in
# generate_ufuncs.py to generate the docstrings for the ufuncs in
# scipy.special at the C level when the ufuncs are created at compile
# time.
from __future__ import division, print_function, absolute_import
docdict = {}
def get(name):
return docdict.get(name)
def add_newdoc(place, name, doc):
docdict['.'.join((place, name))] = doc
add_newdoc("scipy.special", "sph_harm",
r"""
sph_harm(m, n, theta, phi)
Compute spherical harmonics.
.. math:: Y^m_n(\theta,\phi) = \sqrt{\frac{2n+1}{4\pi}\frac{(n-m)!}{(n+m)!}} e^{i m \theta} P^m_n(\cos(\phi))
Parameters
----------
m : int
``|m| <= n``; the order of the harmonic.
n : int
where `n` >= 0; the degree of the harmonic. This is often called
``l`` (lower case L) in descriptions of spherical harmonics.
theta : float
[0, 2*pi]; the azimuthal (longitudinal) coordinate.
phi : float
[0, pi]; the polar (colatitudinal) coordinate.
Returns
-------
y_mn : complex float
The harmonic :math:`Y^m_n` sampled at `theta` and `phi`
Notes
-----
There are different conventions for the meaning of input arguments
`theta` and `phi`. We take `theta` to be the azimuthal angle and
`phi` to be the polar angle. It is common to see the opposite
convention - that is `theta` as the polar angle and `phi` as the
azimuthal angle.
References
----------
.. [1] Digital Library of Mathematical Functions, 14.30. http://dlmf.nist.gov/14.30
""")
add_newdoc("scipy.special", "_ellip_harm",
"""
Internal function, use `ellip_harm` instead.
""")
add_newdoc("scipy.special", "_ellip_norm",
"""
Internal function, use `ellip_norm` instead.
""")
add_newdoc("scipy.special", "_lambertw",
"""
Internal function, use `lambertw` instead.
""")
add_newdoc("scipy.special", "airy",
"""
airy(z)
Airy functions and their derivatives.
Parameters
----------
z : float or complex
Argument.
Returns
-------
Ai, Aip, Bi, Bip
Airy functions Ai and Bi, and their derivatives Aip and Bip
Notes
-----
The Airy functions Ai and Bi are two independent solutions of y''(x) = x y.
""")
add_newdoc("scipy.special", "airye",
"""
airye(z)
Exponentially scaled Airy functions and their derivatives.
Scaling::
eAi = Ai * exp(2.0/3.0*z*sqrt(z))
eAip = Aip * exp(2.0/3.0*z*sqrt(z))
eBi = Bi * exp(-abs((2.0/3.0*z*sqrt(z)).real))
eBip = Bip * exp(-abs((2.0/3.0*z*sqrt(z)).real))
Parameters
----------
z : float or complex
Argument.
Returns
-------
eAi, eAip, eBi, eBip
Airy functions Ai and Bi, and their derivatives Aip and Bip
""")
add_newdoc("scipy.special", "bdtr",
"""
bdtr(k, n, p)
Binomial distribution cumulative distribution function.
Sum of the terms 0 through k of the Binomial probability density.
::
y = sum(nCj p**j (1-p)**(n-j),j=0..k)
Parameters
----------
k, n : int
Terms to include
p : float
Probability
Returns
-------
y : float
Sum of terms
""")
add_newdoc("scipy.special", "bdtrc",
"""
bdtrc(k, n, p)
Binomial distribution survival function.
Sum of the terms k+1 through n of the Binomial probability density
::
y = sum(nCj p**j (1-p)**(n-j), j=k+1..n)
Parameters
----------
k, n : int
Terms to include
p : float
Probability
Returns
-------
y : float
Sum of terms
""")
add_newdoc("scipy.special", "bdtri",
"""
bdtri(k, n, y)
Inverse function to bdtr vs. p
Finds probability `p` such that for the cumulative binomial
probability ``bdtr(k, n, p) == y``.
""")
add_newdoc("scipy.special", "bdtrik",
"""
bdtrik(y, n, p)
Inverse function to bdtr vs k
""")
add_newdoc("scipy.special", "bdtrin",
"""
bdtrin(k, y, p)
Inverse function to bdtr vs n
""")
add_newdoc("scipy.special", "binom",
"""
binom(n, k)
Binomial coefficient
""")
add_newdoc("scipy.special", "btdtria",
"""
btdtria(p, b, x)
Inverse of btdtr vs a
""")
add_newdoc("scipy.special", "btdtrib",
"""
btdtria(a, p, x)
Inverse of btdtr vs b
""")
add_newdoc("scipy.special", "bei",
"""
bei(x)
Kelvin function bei
""")
add_newdoc("scipy.special", "beip",
"""
beip(x)
Derivative of the Kelvin function bei
""")
add_newdoc("scipy.special", "ber",
"""
ber(x)
Kelvin function ber.
""")
add_newdoc("scipy.special", "berp",
"""
berp(x)
Derivative of the Kelvin function ber
""")
add_newdoc("scipy.special", "besselpoly",
r"""
besselpoly(a, lmb, nu)
Weighed integral of a Bessel function.
.. math::
\int_0^1 x^\lambda J_v(\nu, 2 a x) \, dx
where :math:`J_v` is a Bessel function and :math:`\lambda=lmb`,
:math:`\nu=nu`.
""")
add_newdoc("scipy.special", "beta",
"""
beta(a, b)
Beta function.
::
beta(a,b) = gamma(a) * gamma(b) / gamma(a+b)
""")
add_newdoc("scipy.special", "betainc",
"""
betainc(a, b, x)
Incomplete beta integral.
Compute the incomplete beta integral of the arguments, evaluated
from zero to x::
gamma(a+b) / (gamma(a)*gamma(b)) * integral(t**(a-1) (1-t)**(b-1), t=0..x).
Notes
-----
The incomplete beta is also sometimes defined without the terms
in gamma, in which case the above definition is the so-called regularized
incomplete beta. Under this definition, you can get the incomplete beta by
multiplying the result of the scipy function by beta(a, b).
""")
add_newdoc("scipy.special", "betaincinv",
"""
betaincinv(a, b, y)
Inverse function to beta integral.
Compute x such that betainc(a,b,x) = y.
""")
add_newdoc("scipy.special", "betaln",
"""
betaln(a, b)
Natural logarithm of absolute value of beta function.
Computes ``ln(abs(beta(x)))``.
""")
add_newdoc("scipy.special", "boxcox",
"""
boxcox(x, lmbda)
Compute the Box-Cox transformation.
The Box-Cox transformation is::
y = (x**lmbda - 1) / lmbda if lmbda != 0
log(x) if lmbda == 0
Returns `nan` if ``x < 0``.
Returns `-inf` if ``x == 0`` and ``lmbda < 0``.
Parameters
----------
x : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
y : array
Transformed data.
Notes
-----
.. versionadded:: 0.14.0
Examples
--------
>>> boxcox([1, 4, 10], 2.5)
array([ 0. , 12.4 , 126.09110641])
>>> boxcox(2, [0, 1, 2])
array([ 0.69314718, 1. , 1.5 ])
""")
add_newdoc("scipy.special", "boxcox1p",
"""
boxcox1p(x, lmbda)
Compute the Box-Cox transformation of 1 + `x`.
The Box-Cox transformation computed by `boxcox1p` is::
y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0
log(1+x) if lmbda == 0
Returns `nan` if ``x < -1``.
Returns `-inf` if ``x == -1`` and ``lmbda < 0``.
Parameters
----------
x : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
y : array
Transformed data.
Notes
-----
.. versionadded:: 0.14.0
Examples
--------
>>> boxcox1p(1e-4, [0, 0.5, 1])
array([ 9.99950003e-05, 9.99975001e-05, 1.00000000e-04])
>>> boxcox1p([0.01, 0.1], 0.25)
array([ 0.00996272, 0.09645476])
""")
add_newdoc("scipy.special", "inv_boxcox",
"""
inv_boxcox(y, lmbda)
Compute the inverse of the Box-Cox transformation.
Find ``x`` such that::
y = (x**lmbda - 1) / lmbda if lmbda != 0
log(x) if lmbda == 0
Parameters
----------
y : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
x : array
Transformed data.
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
>>> y = boxcox([1, 4, 10], 2.5)
>>> inv_boxcox(y, 2.5)
array([1., 4., 10.])
""")
add_newdoc("scipy.special", "inv_boxcox1p",
"""
inv_boxcox1p(y, lmbda)
Compute the inverse of the Box-Cox transformation.
Find ``x`` such that::
y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0
log(1+x) if lmbda == 0
Parameters
----------
y : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
x : array
Transformed data.
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
>>> y = boxcox1p([1, 4, 10], 2.5)
>>> inv_boxcox1p(y, 2.5)
array([1., 4., 10.])
""")
add_newdoc("scipy.special", "btdtr",
"""
btdtr(a,b,x)
Cumulative beta distribution.
Returns the area from zero to x under the beta density function::
gamma(a+b)/(gamma(a)*gamma(b)))*integral(t**(a-1) (1-t)**(b-1), t=0..x)
See Also
--------
betainc
""")
add_newdoc("scipy.special", "btdtri",
"""
btdtri(a,b,p)
p-th quantile of the beta distribution.
This is effectively the inverse of btdtr returning the value of x for which
``btdtr(a,b,x) = p``
See Also
--------
betaincinv
""")
add_newdoc("scipy.special", "cbrt",
"""
cbrt(x)
Cube root of x
""")
add_newdoc("scipy.special", "chdtr",
"""
chdtr(v, x)
Chi square cumulative distribution function
Returns the area under the left hand tail (from 0 to x) of the Chi
square probability density function with v degrees of freedom::
1/(2**(v/2) * gamma(v/2)) * integral(t**(v/2-1) * exp(-t/2), t=0..x)
""")
add_newdoc("scipy.special", "chdtrc",
"""
chdtrc(v,x)
Chi square survival function
Returns the area under the right hand tail (from x to
infinity) of the Chi square probability density function with v
degrees of freedom::
1/(2**(v/2) * gamma(v/2)) * integral(t**(v/2-1) * exp(-t/2), t=x..inf)
""")
add_newdoc("scipy.special", "chdtri",
"""
chdtri(v,p)
Inverse to chdtrc
Returns the argument x such that ``chdtrc(v,x) == p``.
""")
add_newdoc("scipy.special", "chdtriv",
"""
chdtri(p, x)
Inverse to chdtr vs v
Returns the argument v such that ``chdtr(v, x) == p``.
""")
add_newdoc("scipy.special", "chndtr",
"""
chndtr(x, df, nc)
Non-central chi square cumulative distribution function
""")
add_newdoc("scipy.special", "chndtrix",
"""
chndtrix(p, df, nc)
Inverse to chndtr vs x
""")
add_newdoc("scipy.special", "chndtridf",
"""
chndtridf(x, p, nc)
Inverse to chndtr vs df
""")
add_newdoc("scipy.special", "chndtrinc",
"""
chndtrinc(x, df, p)
Inverse to chndtr vs nc
""")
add_newdoc("scipy.special", "cosdg",
"""
cosdg(x)
Cosine of the angle x given in degrees.
""")
add_newdoc("scipy.special", "cosm1",
"""
cosm1(x)
cos(x) - 1 for use when x is near zero.
""")
add_newdoc("scipy.special", "cotdg",
"""
cotdg(x)
Cotangent of the angle x given in degrees.
""")
add_newdoc("scipy.special", "dawsn",
"""
dawsn(x)
Dawson's integral.
Computes::
exp(-x**2) * integral(exp(t**2),t=0..x).
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "ellipe",
"""
ellipe(m)
Complete elliptic integral of the second kind
This function is defined as
.. math:: E(m) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{1/2} dt
Parameters
----------
m : array_like
Defines the parameter of the elliptic integral.
Returns
-------
E : ndarray
Value of the elliptic integral.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near m = 1
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipeinc : Incomplete elliptic integral of the second kind
""")
add_newdoc("scipy.special", "ellipeinc",
"""
ellipeinc(phi, m)
Incomplete elliptic integral of the second kind
This function is defined as
.. math:: E(\\phi, m) = \\int_0^{\\phi} [1 - m \\sin(t)^2]^{1/2} dt
Parameters
----------
phi : array_like
amplitude of the elliptic integral.
m : array_like
parameter of the elliptic integral.
Returns
-------
E : ndarray
Value of the elliptic integral.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near m = 1
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
""")
add_newdoc("scipy.special", "ellipj",
"""
ellipj(u, m)
Jacobian elliptic functions
Calculates the Jacobian elliptic functions of parameter m between
0 and 1, and real u.
Parameters
----------
m, u
Parameters
Returns
-------
sn, cn, dn, ph
The returned functions::
sn(u|m), cn(u|m), dn(u|m)
The value ``ph`` is such that if ``u = ellik(ph, m)``,
then ``sn(u|m) = sin(ph)`` and ``cn(u|m) = cos(ph)``.
""")
add_newdoc("scipy.special", "ellipkm1",
"""
ellipkm1(p)
Complete elliptic integral of the first kind around m = 1
This function is defined as
.. math:: K(p) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{-1/2} dt
where `m = 1 - p`.
Parameters
----------
p : array_like
Defines the parameter of the elliptic integral as m = 1 - p.
Returns
-------
K : ndarray
Value of the elliptic integral.
See Also
--------
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
""")
add_newdoc("scipy.special", "ellipkinc",
"""
ellipkinc(phi, m)
Incomplete elliptic integral of the first kind
This function is defined as
.. math:: K(\\phi, m) = \\int_0^{\\phi} [1 - m \\sin(t)^2]^{-1/2} dt
Parameters
----------
phi : array_like
amplitude of the elliptic integral
m : array_like
parameter of the elliptic integral
Returns
-------
K : ndarray
Value of the elliptic integral
Notes
-----
This function is also called ``F(phi, m)``.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near m = 1
ellipk : Complete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
""")
add_newdoc("scipy.special", "entr",
r"""
entr(x)
Elementwise function for computing entropy.
.. math:: \text{entr}(x) = \begin{cases} - x \log(x) & x > 0 \\ 0 & x = 0 \\ -\infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
Input array.
Returns
-------
res : ndarray
The value of the elementwise entropy function at the given points x.
See Also
--------
kl_div, rel_entr
Notes
-----
This function is concave.
.. versionadded:: 0.14.0
""")
add_newdoc("scipy.special", "erf",
"""
erf(z)
Returns the error function of complex argument.
It is defined as ``2/sqrt(pi)*integral(exp(-t**2), t=0..z)``.
Parameters
----------
x : ndarray
Input array.
Returns
-------
res : ndarray
The values of the error function at the given points x.
See Also
--------
erfc, erfinv, erfcinv
Notes
-----
The cumulative of the unit normal distribution is given by
``Phi(z) = 1/2[1 + erf(z/sqrt(2))]``.
References
----------
.. [1] http://en.wikipedia.org/wiki/Error_function
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover,
1972. http://www.math.sfu.ca/~cbm/aands/page_297.htm
.. [3] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "erfc",
"""
erfc(x)
Complementary error function, 1 - erf(x).
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "erfi",
"""
erfi(z)
Imaginary error function, -i erf(i z).
Notes
-----
.. versionadded:: 0.12.0
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "erfcx",
"""
erfcx(x)
Scaled complementary error function, exp(x^2) erfc(x).
Notes
-----
.. versionadded:: 0.12.0
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "eval_jacobi",
"""
eval_jacobi(n, alpha, beta, x, out=None)
Evaluate Jacobi polynomial at a point.
""")
add_newdoc("scipy.special", "eval_sh_jacobi",
"""
eval_sh_jacobi(n, p, q, x, out=None)
Evaluate shifted Jacobi polynomial at a point.
""")
add_newdoc("scipy.special", "eval_gegenbauer",
"""
eval_gegenbauer(n, alpha, x, out=None)
Evaluate Gegenbauer polynomial at a point.
""")
add_newdoc("scipy.special", "eval_chebyt",
"""
eval_chebyt(n, x, out=None)
Evaluate Chebyshev T polynomial at a point.
This routine is numerically stable for `x` in ``[-1, 1]`` at least
up to order ``10000``.
""")
add_newdoc("scipy.special", "eval_chebyu",
"""
eval_chebyu(n, x, out=None)
Evaluate Chebyshev U polynomial at a point.
""")
add_newdoc("scipy.special", "eval_chebys",
"""
eval_chebys(n, x, out=None)
Evaluate Chebyshev S polynomial at a point.
""")
add_newdoc("scipy.special", "eval_chebyc",
"""
eval_chebyc(n, x, out=None)
Evaluate Chebyshev C polynomial at a point.
""")
add_newdoc("scipy.special", "eval_sh_chebyt",
"""
eval_sh_chebyt(n, x, out=None)
Evaluate shifted Chebyshev T polynomial at a point.
""")
add_newdoc("scipy.special", "eval_sh_chebyu",
"""
eval_sh_chebyu(n, x, out=None)
Evaluate shifted Chebyshev U polynomial at a point.
""")
add_newdoc("scipy.special", "eval_legendre",
"""
eval_legendre(n, x, out=None)
Evaluate Legendre polynomial at a point.
""")
add_newdoc("scipy.special", "eval_sh_legendre",
"""
eval_sh_legendre(n, x, out=None)
Evaluate shifted Legendre polynomial at a point.
""")
add_newdoc("scipy.special", "eval_genlaguerre",
"""
eval_genlaguerre(n, alpha, x, out=None)
Evaluate generalized Laguerre polynomial at a point.
""")
add_newdoc("scipy.special", "eval_laguerre",
"""
eval_laguerre(n, x, out=None)
Evaluate Laguerre polynomial at a point.
""")
add_newdoc("scipy.special", "eval_hermite",
"""
eval_hermite(n, x, out=None)
Evaluate Hermite polynomial at a point.
""")
add_newdoc("scipy.special", "eval_hermitenorm",
"""
eval_hermitenorm(n, x, out=None)
Evaluate normalized Hermite polynomial at a point.
""")
add_newdoc("scipy.special", "exp1",
"""
exp1(z)
Exponential integral E_1 of complex argument z
::
integral(exp(-z*t)/t,t=1..inf).
""")
add_newdoc("scipy.special", "exp10",
"""
exp10(x)
10**x
""")
add_newdoc("scipy.special", "exp2",
"""
exp2(x)
2**x
""")
add_newdoc("scipy.special", "expi",
"""
expi(x)
Exponential integral Ei
Defined as::
integral(exp(t)/t,t=-inf..x)
See `expn` for a different exponential integral.
""")
add_newdoc('scipy.special', 'expit',
"""
expit(x)
Expit ufunc for ndarrays.
The expit function, also known as the logistic function, is defined as
expit(x) = 1/(1+exp(-x)). It is the inverse of the logit function.
Parameters
----------
x : ndarray
The ndarray to apply expit to element-wise.
Returns
-------
out : ndarray
An ndarray of the same shape as x. Its entries
are expit of the corresponding entry of x.
Notes
-----
As a ufunc expit takes a number of optional
keyword arguments. For more information
see `ufuncs <http://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_
.. versionadded:: 0.10.0
""")
add_newdoc("scipy.special", "expm1",
"""
expm1(x)
exp(x) - 1 for use when x is near zero.
""")
add_newdoc("scipy.special", "expn",
"""
expn(n, x)
Exponential integral E_n
Returns the exponential integral for integer n and non-negative x and n::
integral(exp(-x*t) / t**n, t=1..inf).
""")
add_newdoc("scipy.special", "fdtr",
"""
fdtr(dfn, dfd, x)
F cumulative distribution function
Returns the area from zero to x under the F density function (also
known as Snedcor's density or the variance ratio density). This
is the density of X = (unum/dfn)/(uden/dfd), where unum and uden
are random variables having Chi square distributions with dfn and
dfd degrees of freedom, respectively.
""")
add_newdoc("scipy.special", "fdtrc",
"""
fdtrc(dfn, dfd, x)
F survival function
Returns the complemented F distribution function.
""")
add_newdoc("scipy.special", "fdtri",
"""
fdtri(dfn, dfd, p)
Inverse to fdtr vs x
Finds the F density argument x such that ``fdtr(dfn, dfd, x) == p``.
""")
add_newdoc("scipy.special", "fdtridfd",
"""
fdtridfd(dfn, p, x)
Inverse to fdtr vs dfd
Finds the F density argument dfd such that ``fdtr(dfn,dfd,x) == p``.
""")
add_newdoc("scipy.special", "fdtridfn",
"""
fdtridfn(p, dfd, x)
Inverse to fdtr vs dfn
finds the F density argument dfn such that ``fdtr(dfn,dfd,x) == p``.
""")
add_newdoc("scipy.special", "fresnel",
"""
fresnel(z)
Fresnel sin and cos integrals
Defined as::
ssa = integral(sin(pi/2 * t**2),t=0..z)
csa = integral(cos(pi/2 * t**2),t=0..z)
Parameters
----------
z : float or complex array_like
Argument
Returns
-------
ssa, csa
Fresnel sin and cos integral values
""")
add_newdoc("scipy.special", "gamma",
"""
gamma(z)
Gamma function
The gamma function is often referred to as the generalized
factorial since ``z*gamma(z) = gamma(z+1)`` and ``gamma(n+1) =
n!`` for natural number *n*.
""")
add_newdoc("scipy.special", "gammainc",
"""
gammainc(a, x)
Incomplete gamma function
Defined as::
1 / gamma(a) * integral(exp(-t) * t**(a-1), t=0..x)
`a` must be positive and `x` must be >= 0.
""")
add_newdoc("scipy.special", "gammaincc",
"""
gammaincc(a,x)
Complemented incomplete gamma integral
Defined as::
1 / gamma(a) * integral(exp(-t) * t**(a-1), t=x..inf) = 1 - gammainc(a,x)
`a` must be positive and `x` must be >= 0.
""")
add_newdoc("scipy.special", "gammainccinv",
"""
gammainccinv(a,y)
Inverse to gammaincc
Returns `x` such that ``gammaincc(a,x) == y``.
""")
add_newdoc("scipy.special", "gammaincinv",
"""
gammaincinv(a, y)
Inverse to gammainc
Returns `x` such that ``gammainc(a, x) = y``.
""")
add_newdoc("scipy.special", "gammaln",
"""
gammaln(z)
Logarithm of absolute value of gamma function
Defined as::
ln(abs(gamma(z)))
See Also
--------
gammasgn
""")
add_newdoc("scipy.special", "gammasgn",
"""
gammasgn(x)
Sign of the gamma function.
See Also
--------
gammaln
""")
add_newdoc("scipy.special", "gdtr",
"""
gdtr(a,b,x)
Gamma distribution cumulative density function.
Returns the integral from zero to x of the gamma probability
density function::
a**b / gamma(b) * integral(t**(b-1) exp(-at),t=0..x).
The arguments a and b are used differently here than in other
definitions.
""")
add_newdoc("scipy.special", "gdtrc",
"""
gdtrc(a,b,x)
Gamma distribution survival function.
Integral from x to infinity of the gamma probability density
function.
See Also
--------
gdtr, gdtri
""")
add_newdoc("scipy.special", "gdtria",
"""
gdtria(p, b, x, out=None)
Inverse of gdtr vs a.
Returns the inverse with respect to the parameter `a` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution.
Parameters
----------
p : array_like
Probability values.
b : array_like
`b` parameter values of `gdtr(a, b, x)`. `b` is the "shape" parameter
of the gamma distribution.
x : array_like
Nonnegative real values, from the domain of the gamma distribution.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
a : ndarray
Values of the `a` parameter such that `p = gdtr(a, b, x)`. `1/a`
is the "scale" parameter of the gamma distribution.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`.
gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`.
Examples
--------
First evaluate `gdtr`.
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtria(p, 3.4, 5.6)
1.2
""")
add_newdoc("scipy.special", "gdtrib",
"""
gdtrib(a, p, x, out=None)
Inverse of gdtr vs b.
Returns the inverse with respect to the parameter `b` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution.
Parameters
----------
a : array_like
`a` parameter values of `gdtr(a, b, x)`. `1/a` is the "scale"
parameter of the gamma distribution.
p : array_like
Probability values.
x : array_like
Nonnegative real values, from the domain of the gamma distribution.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
b : ndarray
Values of the `b` parameter such that `p = gdtr(a, b, x)`. `b` is
the "shape" parameter of the gamma distribution.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`.
gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`.
Examples
--------
First evaluate `gdtr`.
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtrib(1.2, p, 5.6)
3.3999999999723882
""")
add_newdoc("scipy.special", "gdtrix",
"""
gdtrix(a, b, p, out=None)
Inverse of gdtr vs x.
Returns the inverse with respect to the parameter `x` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution. This is also known as the p'th quantile of the
distribution.
Parameters
----------
a : array_like
`a` parameter values of `gdtr(a, b, x)`. `1/a` is the "scale"
parameter of the gamma distribution.
b : array_like
`b` parameter values of `gdtr(a, b, x)`. `b` is the "shape" parameter
of the gamma distribution.
p : array_like
Probability values.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
x : ndarray
Values of the `x` parameter such that `p = gdtr(a, b, x)`.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`.
gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`.
Examples
--------
First evaluate `gdtr`.
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtrix(1.2, 3.4, p)
5.5999999999999996
""")
add_newdoc("scipy.special", "hankel1",
"""
hankel1(v, z)
Hankel function of the first kind
Parameters
----------
v : float
Order
z : float or complex
Argument
""")
add_newdoc("scipy.special", "hankel1e",
"""
hankel1e(v, z)
Exponentially scaled Hankel function of the first kind
Defined as::
hankel1e(v,z) = hankel1(v,z) * exp(-1j * z)
Parameters
----------
v : float
Order
z : complex
Argument
""")
add_newdoc("scipy.special", "hankel2",
"""
hankel2(v, z)
Hankel function of the second kind
Parameters
----------
v : float
Order
z : complex
Argument
""")
add_newdoc("scipy.special", "hankel2e",
"""
hankel2e(v, z)
Exponentially scaled Hankel function of the second kind
Defined as::
hankel1e(v,z) = hankel1(v,z) * exp(1j * z)
Parameters
----------
v : float
Order
z : complex
Argument
""")
add_newdoc("scipy.special", "huber",
r"""
huber(delta, r)
Huber loss function.
.. math:: \text{huber}(\delta, r) = \begin{cases} \infty & \delta < 0 \\ \frac{1}{2}r^2 & 0 \le \delta, | r | \le \delta \\ \delta ( |r| - \frac{1}{2}\delta ) & \text{otherwise} \end{cases}
Parameters
----------
delta : ndarray
Input array, indicating the quadratic vs. linear loss changepoint.
r : ndarray
Input array, possibly representing residuals.
Returns
-------
res : ndarray
The computed Huber loss function values.
Notes
-----
This function is convex in r.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "hyp1f1",
"""
hyp1f1(a, b, x)
Confluent hypergeometric function 1F1(a, b; x)
""")
add_newdoc("scipy.special", "hyp1f2",
"""
hyp1f2(a, b, c, x)
Hypergeometric function 1F2 and error estimate
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyp2f0",
"""
hyp2f0(a, b, x, type)
Hypergeometric function 2F0 in y and an error estimate
The parameter `type` determines a convergence factor and can be
either 1 or 2.
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyp2f1",
"""
hyp2f1(a, b, c, z)
Gauss hypergeometric function 2F1(a, b; c; z).
""")
add_newdoc("scipy.special", "hyp3f0",
"""
hyp3f0(a, b, c, x)
Hypergeometric function 3F0 in y and an error estimate
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyperu",
"""
hyperu(a, b, x)
Confluent hypergeometric function U(a, b, x) of the second kind
""")
add_newdoc("scipy.special", "i0",
"""
i0(x)
Modified Bessel function of order 0
""")
add_newdoc("scipy.special", "i0e",
"""
i0e(x)
Exponentially scaled modified Bessel function of order 0.
Defined as::
i0e(x) = exp(-abs(x)) * i0(x).
""")
add_newdoc("scipy.special", "i1",
"""
i1(x)
Modified Bessel function of order 1
""")
add_newdoc("scipy.special", "i1e",
"""
i1e(x)
Exponentially scaled modified Bessel function of order 0.
Defined as::
i1e(x) = exp(-abs(x)) * i1(x)
""")
add_newdoc("scipy.special", "it2i0k0",
"""
it2i0k0(x)
Integrals related to modified Bessel functions of order 0
Returns
-------
ii0
``integral((i0(t)-1)/t, t=0..x)``
ik0
``int(k0(t)/t,t=x..inf)``
""")
add_newdoc("scipy.special", "it2j0y0",
"""
it2j0y0(x)
Integrals related to Bessel functions of order 0
Returns
-------
ij0
``integral((1-j0(t))/t, t=0..x)``
iy0
``integral(y0(t)/t, t=x..inf)``
""")
add_newdoc("scipy.special", "it2struve0",
"""
it2struve0(x)
Integral related to Struve function of order 0
Returns
-------
i
``integral(H0(t)/t, t=x..inf)``
""")
add_newdoc("scipy.special", "itairy",
"""
itairy(x)
Integrals of Airy functios
Calculates the integral of Airy functions from 0 to x
Returns
-------
Apt, Bpt
Integrals for positive arguments
Ant, Bnt
Integrals for negative arguments
""")
add_newdoc("scipy.special", "iti0k0",
"""
iti0k0(x)
Integrals of modified Bessel functions of order 0
Returns simple integrals from 0 to x of the zeroth order modified
Bessel functions i0 and k0.
Returns
-------
ii0, ik0
""")
add_newdoc("scipy.special", "itj0y0",
"""
itj0y0(x)
Integrals of Bessel functions of order 0
Returns simple integrals from 0 to x of the zeroth order Bessel
functions j0 and y0.
Returns
-------
ij0, iy0
""")
add_newdoc("scipy.special", "itmodstruve0",
"""
itmodstruve0(x)
Integral of the modified Struve function of order 0
Returns
-------
i
``integral(L0(t), t=0..x)``
""")
add_newdoc("scipy.special", "itstruve0",
"""
itstruve0(x)
Integral of the Struve function of order 0
Returns
-------
i
``integral(H0(t), t=0..x)``
""")
add_newdoc("scipy.special", "iv",
"""
iv(v,z)
Modified Bessel function of the first kind of real order
Parameters
----------
v
Order. If z is of real type and negative, v must be integer valued.
z
Argument.
""")
add_newdoc("scipy.special", "ive",
"""
ive(v,z)
Exponentially scaled modified Bessel function of the first kind
Defined as::
ive(v,z) = iv(v,z) * exp(-abs(z.real))
""")
add_newdoc("scipy.special", "j0",
"""
j0(x)
Bessel function the first kind of order 0
""")
add_newdoc("scipy.special", "j1",
"""
j1(x)
Bessel function of the first kind of order 1
""")
add_newdoc("scipy.special", "jn",
"""
jn(n, x)
Bessel function of the first kind of integer order n.
Notes
-----
`jn` is an alias of `jv`.
""")
add_newdoc("scipy.special", "jv",
"""
jv(v, z)
Bessel function of the first kind of real order v
""")
add_newdoc("scipy.special", "jve",
"""
jve(v, z)
Exponentially scaled Bessel function of order v
Defined as::
jve(v,z) = jv(v,z) * exp(-abs(z.imag))
""")
add_newdoc("scipy.special", "k0",
"""
k0(x)
Modified Bessel function K of order 0
Modified Bessel function of the second kind (sometimes called the
third kind) of order 0.
""")
add_newdoc("scipy.special", "k0e",
"""
k0e(x)
Exponentially scaled modified Bessel function K of order 0
Defined as::
k0e(x) = exp(x) * k0(x).
""")
add_newdoc("scipy.special", "k1",
"""
i1(x)
Modified Bessel function of the first kind of order 1
""")
add_newdoc("scipy.special", "k1e",
"""
k1e(x)
Exponentially scaled modified Bessel function K of order 1
Defined as::
k1e(x) = exp(x) * k1(x)
""")
add_newdoc("scipy.special", "kei",
"""
kei(x)
Kelvin function ker
""")
add_newdoc("scipy.special", "keip",
"""
keip(x)
Derivative of the Kelvin function kei
""")
add_newdoc("scipy.special", "kelvin",
"""
kelvin(x)
Kelvin functions as complex numbers
Returns
-------
Be, Ke, Bep, Kep
The tuple (Be, Ke, Bep, Kep) contains complex numbers
representing the real and imaginary Kelvin functions and their
derivatives evaluated at x. For example, kelvin(x)[0].real =
ber x and kelvin(x)[0].imag = bei x with similar relationships
for ker and kei.
""")
add_newdoc("scipy.special", "ker",
"""
ker(x)
Kelvin function ker
""")
add_newdoc("scipy.special", "kerp",
"""
kerp(x)
Derivative of the Kelvin function ker
""")
add_newdoc("scipy.special", "kl_div",
r"""
kl_div(x, y)
Elementwise function for computing Kullback-Leibler divergence.
.. math:: \text{kl\_div}(x, y) = \begin{cases} x \log(x / y) - x + y & x > 0, y > 0 \\ y & x = 0, y \ge 0 \\ \infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
First input array.
y : ndarray
Second input array.
Returns
-------
res : ndarray
Output array.
See Also
--------
entr, rel_entr
Notes
-----
This function is non-negative and is jointly convex in x and y.
.. versionadded:: 0.14.0
""")
add_newdoc("scipy.special", "kn",
"""
kn(n, x)
Modified Bessel function of the second kind of integer order n
These are also sometimes called functions of the third kind.
""")
add_newdoc("scipy.special", "kolmogi",
"""
kolmogi(p)
Inverse function to kolmogorov
Returns y such that ``kolmogorov(y) == p``.
""")
add_newdoc("scipy.special", "kolmogorov",
"""
kolmogorov(y)
Complementary cumulative distribution function of Kolmogorov distribution
Returns the complementary cumulative distribution function of
Kolmogorov's limiting distribution (Kn* for large n) of a
two-sided test for equality between an empirical and a theoretical
distribution. It is equal to the (limit as n->infinity of the)
probability that sqrt(n) * max absolute deviation > y.
""")
add_newdoc("scipy.special", "kv",
"""
kv(v,z)
Modified Bessel function of the second kind of real order v
Returns the modified Bessel function of the second kind (sometimes
called the third kind) for real order v at complex z.
""")
add_newdoc("scipy.special", "kve",
"""
kve(v,z)
Exponentially scaled modified Bessel function of the second kind.
Returns the exponentially scaled, modified Bessel function of the
second kind (sometimes called the third kind) for real order v at
complex z::
kve(v,z) = kv(v,z) * exp(z)
""")
add_newdoc("scipy.special", "log1p",
"""
log1p(x)
Calculates log(1+x) for use when x is near zero
""")
add_newdoc('scipy.special', 'logit',
"""
logit(x)
Logit ufunc for ndarrays.
The logit function is defined as logit(p) = log(p/(1-p)).
Note that logit(0) = -inf, logit(1) = inf, and logit(p)
for p<0 or p>1 yields nan.
Parameters
----------
x : ndarray
The ndarray to apply logit to element-wise.
Returns
-------
out : ndarray
An ndarray of the same shape as x. Its entries
are logit of the corresponding entry of x.
Notes
-----
As a ufunc logit takes a number of optional
keyword arguments. For more information
see `ufuncs <http://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_
.. versionadded:: 0.10.0
""")
add_newdoc("scipy.special", "lpmv",
"""
lpmv(m, v, x)
Associated legendre function of integer order.
Parameters
----------
m : int
Order
v : real
Degree. Must be ``v>-m-1`` or ``v<m``
x : complex
Argument. Must be ``|x| <= 1``.
""")
add_newdoc("scipy.special", "mathieu_a",
"""
mathieu_a(m,q)
Characteristic value of even Mathieu functions
Returns the characteristic value for the even solution,
``ce_m(z,q)``, of Mathieu's equation.
""")
add_newdoc("scipy.special", "mathieu_b",
"""
mathieu_b(m,q)
Characteristic value of odd Mathieu functions
Returns the characteristic value for the odd solution,
``se_m(z,q)``, of Mathieu's equation.
""")
add_newdoc("scipy.special", "mathieu_cem",
"""
mathieu_cem(m,q,x)
Even Mathieu function and its derivative
Returns the even Mathieu function, ``ce_m(x,q)``, of order m and
parameter q evaluated at x (given in degrees). Also returns the
derivative with respect to x of ce_m(x,q)
Parameters
----------
m
Order of the function
q
Parameter of the function
x
Argument of the function, *given in degrees, not radians*
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modcem1",
"""
mathieu_modcem1(m, q, x)
Even modified Mathieu function of the first kind and its derivative
Evaluates the even modified Mathieu function of the first kind,
``Mc1m(x,q)``, and its derivative at `x` for order m and parameter
`q`.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modcem2",
"""
mathieu_modcem2(m, q, x)
Even modified Mathieu function of the second kind and its derivative
Evaluates the even modified Mathieu function of the second kind,
Mc2m(x,q), and its derivative at x (given in degrees) for order m
and parameter q.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modsem1",
"""
mathieu_modsem1(m,q,x)
Odd modified Mathieu function of the first kind and its derivative
Evaluates the odd modified Mathieu function of the first kind,
Ms1m(x,q), and its derivative at x (given in degrees) for order m
and parameter q.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modsem2",
"""
mathieu_modsem2(m, q, x)
Odd modified Mathieu function of the second kind and its derivative
Evaluates the odd modified Mathieu function of the second kind,
Ms2m(x,q), and its derivative at x (given in degrees) for order m
and parameter q.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_sem",
"""
mathieu_sem(m, q, x)
Odd Mathieu function and its derivative
Returns the odd Mathieu function, se_m(x,q), of order m and
parameter q evaluated at x (given in degrees). Also returns the
derivative with respect to x of se_m(x,q).
Parameters
----------
m
Order of the function
q
Parameter of the function
x
Argument of the function, *given in degrees, not radians*.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "modfresnelm",
"""
modfresnelm(x)
Modified Fresnel negative integrals
Returns
-------
fm
Integral ``F_-(x)``: ``integral(exp(-1j*t*t),t=x..inf)``
km
Integral ``K_-(x)``: ``1/sqrt(pi)*exp(1j*(x*x+pi/4))*fp``
""")
add_newdoc("scipy.special", "modfresnelp",
"""
modfresnelp(x)
Modified Fresnel positive integrals
Returns
-------
fp
Integral ``F_+(x)``: ``integral(exp(1j*t*t),t=x..inf)``
kp
Integral ``K_+(x)``: ``1/sqrt(pi)*exp(-1j*(x*x+pi/4))*fp``
""")
add_newdoc("scipy.special", "modstruve",
"""
modstruve(v, x)
Modified Struve function
Returns the modified Struve function Lv(x) of order v at x, x must
be positive unless v is an integer.
""")
add_newdoc("scipy.special", "nbdtr",
"""
nbdtr(k, n, p)
Negative binomial cumulative distribution function
Returns the sum of the terms 0 through k of the negative binomial
distribution::
sum((n+j-1)Cj p**n (1-p)**j,j=0..k).
In a sequence of Bernoulli trials this is the probability that k
or fewer failures precede the nth success.
""")
add_newdoc("scipy.special", "nbdtrc",
"""
nbdtrc(k,n,p)
Negative binomial survival function
Returns the sum of the terms k+1 to infinity of the negative
binomial distribution.
""")
add_newdoc("scipy.special", "nbdtri",
"""
nbdtri(k, n, y)
Inverse of nbdtr vs p
Finds the argument p such that ``nbdtr(k,n,p) = y``.
""")
add_newdoc("scipy.special", "nbdtrik",
"""
nbdtrik(y,n,p)
Inverse of nbdtr vs k
Finds the argument k such that ``nbdtr(k,n,p) = y``.
""")
add_newdoc("scipy.special", "nbdtrin",
"""
nbdtrin(k,y,p)
Inverse of nbdtr vs n
Finds the argument n such that ``nbdtr(k,n,p) = y``.
""")
add_newdoc("scipy.special", "ncfdtr",
"""
ncfdtr(dfn, dfd, nc, f)
Cumulative distribution function of the non-central F distribution.
Parameters
----------
dfn : array_like
Degrees of freedom of the numerator sum of squares. Range (0, inf).
dfd : array_like
Degrees of freedom of the denominator sum of squares. Range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (0, 1e4).
f : array_like
Quantiles, i.e. the upper limit of integration.
Returns
-------
cdf : float or ndarray
The calculated CDF. If all inputs are scalar, the return will be a
float. Otherwise it will be an array.
See Also
--------
ncdfdtri : Inverse CDF (iCDF) of the non-central F distribution.
ncdfdtridfd : Calculate dfd, given CDF and iCDF values.
ncdfdtridfn : Calculate dfn, given CDF and iCDF values.
ncdfdtrinc : Calculate noncentrality parameter, given CDF, iCDF, dfn, dfd.
Examples
--------
>>> from scipy import special
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Plot the CDF of the non-central F distribution, for nc=0. Compare with the
F-distribution from scipy.stats:
>>> x = np.linspace(-1, 8, num=500)
>>> dfn = 3
>>> dfd = 2
>>> ncf_stats = stats.f.cdf(x, dfn, dfd)
>>> ncf_special = special.ncfdtr(dfn, dfd, 0, x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, ncf_stats, 'b-', lw=3)
>>> ax.plot(x, ncf_special, 'r-')
>>> plt.show()
""")
add_newdoc("scipy.special", "ncfdtri",
"""
ncfdtri(p, dfn, dfd, nc)
Inverse cumulative distribution function of the non-central F distribution.
See `ncfdtr` for more details.
""")
add_newdoc("scipy.special", "ncfdtridfd",
"""
ncfdtridfd(p, f, dfn, nc)
Calculate degrees of freedom (denominator) for the noncentral F-distribution.
See `ncfdtr` for more details.
""")
add_newdoc("scipy.special", "ncfdtridfn",
"""
ncfdtridfn(p, f, dfd, nc)
Calculate degrees of freedom (numerator) for the noncentral F-distribution.
See `ncfdtr` for more details.
""")
add_newdoc("scipy.special", "ncfdtrinc",
"""
ncfdtrinc(p, f, dfn, dfd)
Calculate non-centrality parameter for non-central F distribution.
See `ncfdtr` for more details.
""")
add_newdoc("scipy.special", "nctdtr",
"""
nctdtr(df, nc, t)
Cumulative distribution function of the non-central t distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
t : array_like
Quantiles, i.e. the upper limit of integration.
Returns
-------
cdf : float or ndarray
The calculated CDF. If all inputs are scalar, the return will be a
float. Otherwise it will be an array.
See Also
--------
nctdtrit : Inverse CDF (iCDF) of the non-central t distribution.
nctdtridf : Calculate degrees of freedom, given CDF and iCDF values.
nctdtrinc : Calculate non-centrality parameter, given CDF iCDF values.
Examples
--------
>>> from scipy import special
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Plot the CDF of the non-central t distribution, for nc=0. Compare with the
t-distribution from scipy.stats:
>>> x = np.linspace(-5, 5, num=500)
>>> df = 3
>>> nct_stats = stats.t.cdf(x, df)
>>> nct_special = special.nctdtr(df, 0, x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, nct_stats, 'b-', lw=3)
>>> ax.plot(x, nct_special, 'r-')
>>> plt.show()
""")
add_newdoc("scipy.special", "nctdtridf",
"""
nctdtridf(p, nc, t)
Calculate degrees of freedom for non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
t : array_like
Quantiles, i.e. the upper limit of integration.
""")
add_newdoc("scipy.special", "nctdtrinc",
"""
nctdtrinc(df, p, t)
Calculate non-centrality parameter for non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
p : array_like
CDF values, in range (0, 1].
t : array_like
Quantiles, i.e. the upper limit of integration.
""")
add_newdoc("scipy.special", "nctdtrit",
"""
nctdtrit(df, nc, p)
Inverse cumulative distribution function of the non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
p : array_like
CDF values, in range (0, 1].
""")
add_newdoc("scipy.special", "ndtr",
"""
ndtr(x)
Gaussian cumulative distribution function
Returns the area under the standard Gaussian probability
density function, integrated from minus infinity to x::
1/sqrt(2*pi) * integral(exp(-t**2 / 2),t=-inf..x)
""")
add_newdoc("scipy.special", "nrdtrimn",
"""
nrdtrimn(p, x, std)
Calculate mean of normal distribution given other params.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
x : array_like
Quantiles, i.e. the upper limit of integration.
std : array_like
Standard deviation.
Returns
-------
mn : float or ndarray
The mean of the normal distribution.
See Also
--------
nrdtrimn, ndtr
""")
add_newdoc("scipy.special", "nrdtrisd",
"""
nrdtrisd(p, x, mn)
Calculate standard deviation of normal distribution given other params.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
x : array_like
Quantiles, i.e. the upper limit of integration.
mn : float or ndarray
The mean of the normal distribution.
Returns
-------
std : array_like
Standard deviation.
See Also
--------
nrdtristd, ndtr
""")
add_newdoc("scipy.special", "log_ndtr",
"""
log_ndtr(x)
Logarithm of Gaussian cumulative distribution function
Returns the log of the area under the standard Gaussian probability
density function, integrated from minus infinity to x::
log(1/sqrt(2*pi) * integral(exp(-t**2 / 2), t=-inf..x))
""")
add_newdoc("scipy.special", "ndtri",
"""
ndtri(y)
Inverse of ndtr vs x
Returns the argument x for which the area under the Gaussian
probability density function (integrated from minus infinity to x)
is equal to y.
""")
add_newdoc("scipy.special", "obl_ang1",
"""
obl_ang1(m, n, c, x)
Oblate spheroidal angular function of the first kind and its derivative
Computes the oblate spheroidal angular function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_ang1_cv",
"""
obl_ang1_cv(m, n, c, cv, x)
Oblate spheroidal angular function obl_ang1 for precomputed characteristic value
Computes the oblate spheroidal angular function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_cv",
"""
obl_cv(m, n, c)
Characteristic value of oblate spheroidal function
Computes the characteristic value of oblate spheroidal wave
functions of order m,n (n>=m) and spheroidal parameter c.
""")
add_newdoc("scipy.special", "obl_rad1",
"""
obl_rad1(m,n,c,x)
Oblate spheroidal radial function of the first kind and its derivative
Computes the oblate spheroidal radial function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad1_cv",
"""
obl_rad1_cv(m,n,c,cv,x)
Oblate spheroidal radial function obl_rad1 for precomputed characteristic value
Computes the oblate spheroidal radial function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad2",
"""
obl_rad2(m,n,c,x)
Oblate spheroidal radial function of the second kind and its derivative.
Computes the oblate spheroidal radial function of the second kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad2_cv",
"""
obl_rad2_cv(m,n,c,cv,x)
Oblate spheroidal radial function obl_rad2 for precomputed characteristic value
Computes the oblate spheroidal radial function of the second kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbdv",
"""
pbdv(v, x)
Parabolic cylinder function D
Returns (d,dp) the parabolic cylinder function Dv(x) in d and the
derivative, Dv'(x) in dp.
Returns
-------
d
Value of the function
dp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbvv",
"""
pbvv(v,x)
Parabolic cylinder function V
Returns the parabolic cylinder function Vv(x) in v and the
derivative, Vv'(x) in vp.
Returns
-------
v
Value of the function
vp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbwa",
"""
pbwa(a,x)
Parabolic cylinder function W
Returns the parabolic cylinder function W(a,x) in w and the
derivative, W'(a,x) in wp.
.. warning::
May not be accurate for large (>5) arguments in a and/or x.
Returns
-------
w
Value of the function
wp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pdtr",
"""
pdtr(k, m)
Poisson cumulative distribution function
Returns the sum of the first k terms of the Poisson distribution:
sum(exp(-m) * m**j / j!, j=0..k) = gammaincc( k+1, m). Arguments
must both be positive and k an integer.
""")
add_newdoc("scipy.special", "pdtrc",
"""
pdtrc(k, m)
Poisson survival function
Returns the sum of the terms from k+1 to infinity of the Poisson
distribution: sum(exp(-m) * m**j / j!, j=k+1..inf) = gammainc(
k+1, m). Arguments must both be positive and k an integer.
""")
add_newdoc("scipy.special", "pdtri",
"""
pdtri(k,y)
Inverse to pdtr vs m
Returns the Poisson variable m such that the sum from 0 to k of
the Poisson density is equal to the given probability y:
calculated by gammaincinv(k+1, y). k must be a nonnegative
integer and y between 0 and 1.
""")
add_newdoc("scipy.special", "pdtrik",
"""
pdtrik(p,m)
Inverse to pdtr vs k
Returns the quantile k such that ``pdtr(k, m) = p``
""")
add_newdoc("scipy.special", "poch",
"""
poch(z, m)
Rising factorial (z)_m
The Pochhammer symbol (rising factorial), is defined as::
(z)_m = gamma(z + m) / gamma(z)
For positive integer `m` it reads::
(z)_m = z * (z + 1) * ... * (z + m - 1)
""")
add_newdoc("scipy.special", "pro_ang1",
"""
pro_ang1(m,n,c,x)
Prolate spheroidal angular function of the first kind and its derivative
Computes the prolate spheroidal angular function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_ang1_cv",
"""
pro_ang1_cv(m,n,c,cv,x)
Prolate spheroidal angular function pro_ang1 for precomputed characteristic value
Computes the prolate spheroidal angular function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_cv",
"""
pro_cv(m,n,c)
Characteristic value of prolate spheroidal function
Computes the characteristic value of prolate spheroidal wave
functions of order m,n (n>=m) and spheroidal parameter c.
""")
add_newdoc("scipy.special", "pro_rad1",
"""
pro_rad1(m,n,c,x)
Prolate spheroidal radial function of the first kind and its derivative
Computes the prolate spheroidal radial function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad1_cv",
"""
pro_rad1_cv(m,n,c,cv,x)
Prolate spheroidal radial function pro_rad1 for precomputed characteristic value
Computes the prolate spheroidal radial function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad2",
"""
pro_rad2(m,n,c,x)
Prolate spheroidal radial function of the secon kind and its derivative
Computes the prolate spheroidal radial function of the second kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad2_cv",
"""
pro_rad2_cv(m,n,c,cv,x)
Prolate spheroidal radial function pro_rad2 for precomputed characteristic value
Computes the prolate spheroidal radial function of the second kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pseudo_huber",
r"""
pseudo_huber(delta, r)
Pseudo-Huber loss function.
.. math:: \text{pseudo\_huber}(\delta, r) = \delta^2 \left( \sqrt{ 1 + \left( \frac{r}{\delta} \right)^2 } - 1 \right)
Parameters
----------
delta : ndarray
Input array, indicating the soft quadratic vs. linear loss changepoint.
r : ndarray
Input array, possibly representing residuals.
Returns
-------
res : ndarray
The computed Pseudo-Huber loss function values.
Notes
-----
This function is convex in r.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "psi",
"""
psi(z)
Digamma function
The derivative of the logarithm of the gamma function evaluated at
z (also called the digamma function).
""")
add_newdoc("scipy.special", "radian",
"""
radian(d, m, s)
Convert from degrees to radians
Returns the angle given in (d)egrees, (m)inutes, and (s)econds in
radians.
""")
add_newdoc("scipy.special", "rel_entr",
r"""
rel_entr(x, y)
Elementwise function for computing relative entropy.
.. math:: \text{rel\_entr}(x, y) = \begin{cases} x \log(x / y) & x > 0, y > 0 \\ 0 & x = 0, y \ge 0 \\ \infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
First input array.
y : ndarray
Second input array.
Returns
-------
res : ndarray
Output array.
See Also
--------
entr, kl_div
Notes
-----
This function is jointly convex in x and y.
.. versionadded:: 0.14.0
""")
add_newdoc("scipy.special", "rgamma",
"""
rgamma(z)
Gamma function inverted
Returns ``1/gamma(x)``
""")
add_newdoc("scipy.special", "round",
"""
round(x)
Round to nearest integer
Returns the nearest integer to x as a double precision floating
point result. If x ends in 0.5 exactly, the nearest even integer
is chosen.
""")
add_newdoc("scipy.special", "shichi",
"""
shichi(x)
Hyperbolic sine and cosine integrals
Returns
-------
shi
``integral(sinh(t)/t, t=0..x)``
chi
``eul + ln x + integral((cosh(t)-1)/t, t=0..x)``
where ``eul`` is Euler's constant.
""")
add_newdoc("scipy.special", "sici",
"""
sici(x)
Sine and cosine integrals
Returns
-------
si
``integral(sin(t)/t, t=0..x)``
ci
``eul + ln x + integral((cos(t) - 1)/t, t=0..x)``
where ``eul`` is Euler's constant.
""")
add_newdoc("scipy.special", "sindg",
"""
sindg(x)
Sine of angle given in degrees
""")
add_newdoc("scipy.special", "smirnov",
"""
smirnov(n, e)
Kolmogorov-Smirnov complementary cumulative distribution function
Returns the exact Kolmogorov-Smirnov complementary cumulative
distribution function (Dn+ or Dn-) for a one-sided test of
equality between an empirical and a theoretical distribution. It
is equal to the probability that the maximum difference between a
theoretical distribution and an empirical one based on n samples
is greater than e.
""")
add_newdoc("scipy.special", "smirnovi",
"""
smirnovi(n, y)
Inverse to smirnov
Returns ``e`` such that ``smirnov(n, e) = y``.
""")
add_newdoc("scipy.special", "spence",
"""
spence(x)
Dilogarithm integral
Returns the dilogarithm integral::
-integral(log t / (t-1),t=1..x)
""")
add_newdoc("scipy.special", "stdtr",
"""
stdtr(df,t)
Student t distribution cumulative density function
Returns the integral from minus infinity to t of the Student t
distribution with df > 0 degrees of freedom::
gamma((df+1)/2)/(sqrt(df*pi)*gamma(df/2)) *
integral((1+x**2/df)**(-df/2-1/2), x=-inf..t)
""")
add_newdoc("scipy.special", "stdtridf",
"""
stdtridf(p,t)
Inverse of stdtr vs df
Returns the argument df such that stdtr(df,t) is equal to p.
""")
add_newdoc("scipy.special", "stdtrit",
"""
stdtrit(df,p)
Inverse of stdtr vs t
Returns the argument t such that stdtr(df,t) is equal to p.
""")
add_newdoc("scipy.special", "struve",
"""
struve(v,x)
Struve function
Computes the struve function Hv(x) of order v at x, x must be
positive unless v is an integer.
""")
add_newdoc("scipy.special", "tandg",
"""
tandg(x)
Tangent of angle x given in degrees.
""")
add_newdoc("scipy.special", "tklmbda",
"""
tklmbda(x, lmbda)
Tukey-Lambda cumulative distribution function
""")
add_newdoc("scipy.special", "wofz",
"""
wofz(z)
Faddeeva function
Returns the value of the Faddeeva function for complex argument::
exp(-z**2)*erfc(-i*z)
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "xlogy",
"""
xlogy(x, y)
Compute ``x*log(y)`` so that the result is 0 if `x = 0`.
Parameters
----------
x : array_like
Multiplier
y : array_like
Argument
Returns
-------
z : array_like
Computed x*log(y)
Notes
-----
.. versionadded:: 0.13.0
""")
add_newdoc("scipy.special", "xlog1py",
"""
xlog1py(x, y)
Compute ``x*log1p(y)`` so that the result is 0 if `x = 0`.
Parameters
----------
x : array_like
Multiplier
y : array_like
Argument
Returns
-------
z : array_like
Computed x*log1p(y)
Notes
-----
.. versionadded:: 0.13.0
""")
add_newdoc("scipy.special", "y0",
"""
y0(x)
Bessel function of the second kind of order 0
Returns the Bessel function of the second kind of order 0 at x.
""")
add_newdoc("scipy.special", "y1",
"""
y1(x)
Bessel function of the second kind of order 1
Returns the Bessel function of the second kind of order 1 at x.
""")
add_newdoc("scipy.special", "yn",
"""
yn(n,x)
Bessel function of the second kind of integer order
Returns the Bessel function of the second kind of integer order n
at x.
""")
add_newdoc("scipy.special", "yv",
"""
yv(v,z)
Bessel function of the second kind of real order
Returns the Bessel function of the second kind of real order v at
complex z.
""")
add_newdoc("scipy.special", "yve",
"""
yve(v,z)
Exponentially scaled Bessel function of the second kind of real order
Returns the exponentially scaled Bessel function of the second
kind of real order v at complex z::
yve(v,z) = yv(v,z) * exp(-abs(z.imag))
""")
add_newdoc("scipy.special", "zeta",
"""
zeta(x, q)
Hurwitz zeta function
The Riemann zeta function of two arguments (also known as the
Hurwitz zeta funtion).
This function is defined as
.. math:: \\zeta(x, q) = \\sum_{k=0}^{\\infty} 1 / (k+q)^x,
where ``x > 1`` and ``q > 0``.
See also
--------
zetac
""")
add_newdoc("scipy.special", "zetac",
"""
zetac(x)
Riemann zeta function minus 1.
This function is defined as
.. math:: \\zeta(x) = \\sum_{k=2}^{\\infty} 1 / k^x,
where ``x > 1``.
See Also
--------
zeta
""")
add_newdoc("scipy.special", "_struve_asymp_large_z",
"""
_struve_asymp_large_z(v, z, is_h)
Internal function for testing struve & modstruve
Evaluates using asymptotic expansion
Returns
-------
v, err
""")
add_newdoc("scipy.special", "_struve_power_series",
"""
_struve_power_series(v, z, is_h)
Internal function for testing struve & modstruve
Evaluates using power series
Returns
-------
v, err
""")
add_newdoc("scipy.special", "_struve_bessel_series",
"""
_struve_bessel_series(v, z, is_h)
Internal function for testing struve & modstruve
Evaluates using Bessel function series
Returns
-------
v, err
""")
|
kurin/py-raft | refs/heads/master | raft/client.py | 1 | from __future__ import print_function
import uuid
import msgpack
import raft.tcp as tcp
class NoConnection(Exception):
pass
class RaftClient(object):
def __init__(self, server):
self.tcp = tcp.TCP(0, 'client')
self.tcp.start()
self.msgs = {}
self.tcp.connect(server)
if not self.tcp.u2c:
# wait 2 seconds to connect
self.tcp.recv(0.5)
if not self.tcp.u2c:
raise NoConnection
self.leader = next(iter(self.tcp.u2c.keys()))
def _send(self, rpc, msgid):
self.tcp.send(rpc, self.leader)
msgids = self.poll(0.5)
if not msgids or not msgid in msgids:
return # XXX put real recovery logic here
msg = self.msgs[msgid][0]
if msg['type'] == 'cr_rdr':
self.leader = msg['leader']
print("redirected to %s! %s" % (self.leader, msg['addr']))
self.tcp.connect(msg['addr'])
del self.msgs[msgid]
return self._send(rpc, msgid)
def poll(self, timeout=0):
ans = self.tcp.recv(timeout)
if not ans:
return
msgids = set()
for _, msgs in ans:
for msg in msgs:
msg = msgpack.unpackb(msg, use_list=False)
msgid = msg['id']
msgids.add(msgid)
ums = self.msgs.get(msgid, [])
ums.append(msg)
self.msgs[msgid] = ums
return msgids
def send(self, data):
msgid = uuid.uuid4().hex
rpc = self.cq_rpc(data, msgid)
self._send(rpc, msgid)
return msgid
def update_hosts(self, config):
msgid = uuid.uuid4().hex
rpc = self.pu_rpc(config, msgid)
self._send(rpc, msgid)
return msgid
def cq_rpc(self, data, msgid):
# client query rpc
rpc = {
'type': 'cq',
'id': msgid,
'data': data
}
return msgpack.packb(rpc)
def pu_rpc(self, config, msgid):
# protocol update rpc
rpc = {
'type': 'pu',
'id': msgid,
'config': config
}
return msgpack.packb(rpc)
|
171121130/SWI | refs/heads/master | venv/Lib/site-packages/pip/_vendor/packaging/version.py | 1151 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import collections
import itertools
import re
from ._structures import Infinity
__all__ = [
"parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"
]
_Version = collections.namedtuple(
"_Version",
["epoch", "release", "dev", "pre", "post", "local"],
)
def parse(version):
"""
Parse the given version string and return either a :class:`Version` object
or a :class:`LegacyVersion` object depending on if the given version is
a valid PEP 440 version or a legacy version.
"""
try:
return Version(version)
except InvalidVersion:
return LegacyVersion(version)
class InvalidVersion(ValueError):
"""
An invalid version was found, users should refer to PEP 440.
"""
class _BaseVersion(object):
def __hash__(self):
return hash(self._key)
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ge__(self, other):
return self._compare(other, lambda s, o: s >= o)
def __gt__(self, other):
return self._compare(other, lambda s, o: s > o)
def __ne__(self, other):
return self._compare(other, lambda s, o: s != o)
def _compare(self, other, method):
if not isinstance(other, _BaseVersion):
return NotImplemented
return method(self._key, other._key)
class LegacyVersion(_BaseVersion):
def __init__(self, version):
self._version = str(version)
self._key = _legacy_cmpkey(self._version)
def __str__(self):
return self._version
def __repr__(self):
return "<LegacyVersion({0})>".format(repr(str(self)))
@property
def public(self):
return self._version
@property
def base_version(self):
return self._version
@property
def local(self):
return None
@property
def is_prerelease(self):
return False
@property
def is_postrelease(self):
return False
_legacy_version_component_re = re.compile(
r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE,
)
_legacy_version_replacement_map = {
"pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@",
}
def _parse_version_parts(s):
for part in _legacy_version_component_re.split(s):
part = _legacy_version_replacement_map.get(part, part)
if not part or part == ".":
continue
if part[:1] in "0123456789":
# pad for numeric comparison
yield part.zfill(8)
else:
yield "*" + part
# ensure that alpha/beta/candidate are before final
yield "*final"
def _legacy_cmpkey(version):
# We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch
# greater than or equal to 0. This will effectively put the LegacyVersion,
# which uses the defacto standard originally implemented by setuptools,
# as before all PEP 440 versions.
epoch = -1
# This scheme is taken from pkg_resources.parse_version setuptools prior to
# it's adoption of the packaging library.
parts = []
for part in _parse_version_parts(version.lower()):
if part.startswith("*"):
# remove "-" before a prerelease tag
if part < "*final":
while parts and parts[-1] == "*final-":
parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1] == "00000000":
parts.pop()
parts.append(part)
parts = tuple(parts)
return epoch, parts
# Deliberately not anchored to the start and end of the string, to make it
# easier for 3rd party code to reuse
VERSION_PATTERN = r"""
v?
(?:
(?:(?P<epoch>[0-9]+)!)? # epoch
(?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
(?P<pre> # pre-release
[-_\.]?
(?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
[-_\.]?
(?P<pre_n>[0-9]+)?
)?
(?P<post> # post release
(?:-(?P<post_n1>[0-9]+))
|
(?:
[-_\.]?
(?P<post_l>post|rev|r)
[-_\.]?
(?P<post_n2>[0-9]+)?
)
)?
(?P<dev> # dev release
[-_\.]?
(?P<dev_l>dev)
[-_\.]?
(?P<dev_n>[0-9]+)?
)?
)
(?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
"""
class Version(_BaseVersion):
_regex = re.compile(
r"^\s*" + VERSION_PATTERN + r"\s*$",
re.VERBOSE | re.IGNORECASE,
)
def __init__(self, version):
# Validate the version and parse it into pieces
match = self._regex.search(version)
if not match:
raise InvalidVersion("Invalid version: '{0}'".format(version))
# Store the parsed out pieces of the version
self._version = _Version(
epoch=int(match.group("epoch")) if match.group("epoch") else 0,
release=tuple(int(i) for i in match.group("release").split(".")),
pre=_parse_letter_version(
match.group("pre_l"),
match.group("pre_n"),
),
post=_parse_letter_version(
match.group("post_l"),
match.group("post_n1") or match.group("post_n2"),
),
dev=_parse_letter_version(
match.group("dev_l"),
match.group("dev_n"),
),
local=_parse_local_version(match.group("local")),
)
# Generate a key which will be used for sorting
self._key = _cmpkey(
self._version.epoch,
self._version.release,
self._version.pre,
self._version.post,
self._version.dev,
self._version.local,
)
def __repr__(self):
return "<Version({0})>".format(repr(str(self)))
def __str__(self):
parts = []
# Epoch
if self._version.epoch != 0:
parts.append("{0}!".format(self._version.epoch))
# Release segment
parts.append(".".join(str(x) for x in self._version.release))
# Pre-release
if self._version.pre is not None:
parts.append("".join(str(x) for x in self._version.pre))
# Post-release
if self._version.post is not None:
parts.append(".post{0}".format(self._version.post[1]))
# Development release
if self._version.dev is not None:
parts.append(".dev{0}".format(self._version.dev[1]))
# Local version segment
if self._version.local is not None:
parts.append(
"+{0}".format(".".join(str(x) for x in self._version.local))
)
return "".join(parts)
@property
def public(self):
return str(self).split("+", 1)[0]
@property
def base_version(self):
parts = []
# Epoch
if self._version.epoch != 0:
parts.append("{0}!".format(self._version.epoch))
# Release segment
parts.append(".".join(str(x) for x in self._version.release))
return "".join(parts)
@property
def local(self):
version_string = str(self)
if "+" in version_string:
return version_string.split("+", 1)[1]
@property
def is_prerelease(self):
return bool(self._version.dev or self._version.pre)
@property
def is_postrelease(self):
return bool(self._version.post)
def _parse_letter_version(letter, number):
if letter:
# We consider there to be an implicit 0 in a pre-release if there is
# not a numeral associated with it.
if number is None:
number = 0
# We normalize any letters to their lower case form
letter = letter.lower()
# We consider some words to be alternate spellings of other words and
# in those cases we want to normalize the spellings to our preferred
# spelling.
if letter == "alpha":
letter = "a"
elif letter == "beta":
letter = "b"
elif letter in ["c", "pre", "preview"]:
letter = "rc"
elif letter in ["rev", "r"]:
letter = "post"
return letter, int(number)
if not letter and number:
# We assume if we are given a number, but we are not given a letter
# then this is using the implicit post release syntax (e.g. 1.0-1)
letter = "post"
return letter, int(number)
_local_version_seperators = re.compile(r"[\._-]")
def _parse_local_version(local):
"""
Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
"""
if local is not None:
return tuple(
part.lower() if not part.isdigit() else int(part)
for part in _local_version_seperators.split(local)
)
def _cmpkey(epoch, release, pre, post, dev, local):
# When we compare a release version, we want to compare it with all of the
# trailing zeros removed. So we'll use a reverse the list, drop all the now
# leading zeros until we come to something non zero, then take the rest
# re-reverse it back into the correct order and make it a tuple and use
# that for our sorting key.
release = tuple(
reversed(list(
itertools.dropwhile(
lambda x: x == 0,
reversed(release),
)
))
)
# We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
# We'll do this by abusing the pre segment, but we _only_ want to do this
# if there is not a pre or a post segment. If we have one of those then
# the normal sorting rules will handle this case correctly.
if pre is None and post is None and dev is not None:
pre = -Infinity
# Versions without a pre-release (except as noted above) should sort after
# those with one.
elif pre is None:
pre = Infinity
# Versions without a post segment should sort before those with one.
if post is None:
post = -Infinity
# Versions without a development segment should sort after those with one.
if dev is None:
dev = Infinity
if local is None:
# Versions without a local segment should sort before those with one.
local = -Infinity
else:
# Versions with a local segment need that segment parsed to implement
# the sorting rules in PEP440.
# - Alpha numeric segments sort before numeric segments
# - Alpha numeric segments sort lexicographically
# - Numeric segments sort numerically
# - Shorter versions sort before longer versions when the prefixes
# match exactly
local = tuple(
(i, "") if isinstance(i, int) else (-Infinity, i)
for i in local
)
return epoch, release, pre, post, dev, local
|
angel511wong/nixysa | refs/heads/master | third_party/ply-3.1/ply/lex.py | 16 | # -----------------------------------------------------------------------------
# ply: lex.py
#
# Author: David M. Beazley (dave@dabeaz.com)
#
# Copyright (C) 2001-2009, David M. Beazley
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See the file COPYING for a complete copy of the LGPL.
# -----------------------------------------------------------------------------
__version__ = "3.0"
__tabversion__ = "3.0" # Version of table file used
import re, sys, types, copy, os
# This tuple contains known string types
try:
# Python 2.6
StringTypes = (types.StringType, types.UnicodeType)
except AttributeError:
# Python 3.0
StringTypes = (str, bytes)
# Extract the code attribute of a function. Different implementations
# are for Python 2/3 compatibility.
if sys.version_info[0] < 3:
def func_code(f):
return f.func_code
else:
def func_code(f):
return f.__code__
# This regular expression is used to match valid token names
_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
# Exception thrown when invalid token encountered and no default error
# handler is defined.
class LexError(Exception):
def __init__(self,message,s):
self.args = (message,)
self.text = s
# Token class. This class is used to represent the tokens produced.
class LexToken(object):
def __str__(self):
return "LexToken(%s,%r,%d,%d)" % (self.type,self.value,self.lineno,self.lexpos)
def __repr__(self):
return str(self)
# This object is a stand-in for a logging object created by the
# logging module.
class PlyLogger(object):
def __init__(self,f):
self.f = f
def critical(self,msg,*args,**kwargs):
self.f.write((msg % args) + "\n")
def warning(self,msg,*args,**kwargs):
self.f.write("WARNING: "+ (msg % args) + "\n")
def error(self,msg,*args,**kwargs):
self.f.write("ERROR: " + (msg % args) + "\n")
info = critical
debug = critical
# Null logger is used when no output is generated. Does nothing.
class NullLogger(object):
def __getattribute__(self,name):
return self
def __call__(self,*args,**kwargs):
return self
# -----------------------------------------------------------------------------
# === Lexing Engine ===
#
# The following Lexer class implements the lexer runtime. There are only
# a few public methods and attributes:
#
# input() - Store a new string in the lexer
# token() - Get the next token
# clone() - Clone the lexer
#
# lineno - Current line number
# lexpos - Current position in the input string
# -----------------------------------------------------------------------------
class Lexer:
def __init__(self):
self.lexre = None # Master regular expression. This is a list of
# tuples (re,findex) where re is a compiled
# regular expression and findex is a list
# mapping regex group numbers to rules
self.lexretext = None # Current regular expression strings
self.lexstatere = {} # Dictionary mapping lexer states to master regexs
self.lexstateretext = {} # Dictionary mapping lexer states to regex strings
self.lexstaterenames = {} # Dictionary mapping lexer states to symbol names
self.lexstate = "INITIAL" # Current lexer state
self.lexstatestack = [] # Stack of lexer states
self.lexstateinfo = None # State information
self.lexstateignore = {} # Dictionary of ignored characters for each state
self.lexstateerrorf = {} # Dictionary of error functions for each state
self.lexreflags = 0 # Optional re compile flags
self.lexdata = None # Actual input data (as a string)
self.lexpos = 0 # Current position in input text
self.lexlen = 0 # Length of the input text
self.lexerrorf = None # Error rule (if any)
self.lextokens = None # List of valid tokens
self.lexignore = "" # Ignored characters
self.lexliterals = "" # Literal characters that can be passed through
self.lexmodule = None # Module
self.lineno = 1 # Current line number
self.lexoptimize = 0 # Optimized mode
def clone(self,object=None):
c = copy.copy(self)
# If the object parameter has been supplied, it means we are attaching the
# lexer to a new object. In this case, we have to rebind all methods in
# the lexstatere and lexstateerrorf tables.
if object:
newtab = { }
for key, ritem in self.lexstatere.items():
newre = []
for cre, findex in ritem:
newfindex = []
for f in findex:
if not f or not f[0]:
newfindex.append(f)
continue
newfindex.append((getattr(object,f[0].__name__),f[1]))
newre.append((cre,newfindex))
newtab[key] = newre
c.lexstatere = newtab
c.lexstateerrorf = { }
for key, ef in self.lexstateerrorf.items():
c.lexstateerrorf[key] = getattr(object,ef.__name__)
c.lexmodule = object
return c
# ------------------------------------------------------------
# writetab() - Write lexer information to a table file
# ------------------------------------------------------------
def writetab(self,tabfile,outputdir=""):
if isinstance(tabfile,types.ModuleType):
return
basetabfilename = tabfile.split(".")[-1]
filename = os.path.join(outputdir,basetabfilename)+".py"
tf = open(filename,"w")
tf.write("# %s.py. This file automatically created by PLY (version %s). Don't edit!\n" % (tabfile,__version__))
tf.write("_tabversion = %s\n" % repr(__version__))
tf.write("_lextokens = %s\n" % repr(self.lextokens))
tf.write("_lexreflags = %s\n" % repr(self.lexreflags))
tf.write("_lexliterals = %s\n" % repr(self.lexliterals))
tf.write("_lexstateinfo = %s\n" % repr(self.lexstateinfo))
tabre = { }
# Collect all functions in the initial state
initial = self.lexstatere["INITIAL"]
initialfuncs = []
for part in initial:
for f in part[1]:
if f and f[0]:
initialfuncs.append(f)
for key, lre in self.lexstatere.items():
titem = []
for i in range(len(lre)):
titem.append((self.lexstateretext[key][i],_funcs_to_names(lre[i][1],self.lexstaterenames[key][i])))
tabre[key] = titem
tf.write("_lexstatere = %s\n" % repr(tabre))
tf.write("_lexstateignore = %s\n" % repr(self.lexstateignore))
taberr = { }
for key, ef in self.lexstateerrorf.items():
if ef:
taberr[key] = ef.__name__
else:
taberr[key] = None
tf.write("_lexstateerrorf = %s\n" % repr(taberr))
tf.close()
# ------------------------------------------------------------
# readtab() - Read lexer information from a tab file
# ------------------------------------------------------------
def readtab(self,tabfile,fdict):
if isinstance(tabfile,types.ModuleType):
lextab = tabfile
else:
if sys.version_info[0] < 3:
exec("import %s as lextab" % tabfile)
else:
env = { }
exec("import %s as lextab" % tabfile, env,env)
lextab = env['lextab']
if getattr(lextab,"_tabversion","0.0") != __version__:
raise ImportError("Inconsistent PLY version")
self.lextokens = lextab._lextokens
self.lexreflags = lextab._lexreflags
self.lexliterals = lextab._lexliterals
self.lexstateinfo = lextab._lexstateinfo
self.lexstateignore = lextab._lexstateignore
self.lexstatere = { }
self.lexstateretext = { }
for key,lre in lextab._lexstatere.items():
titem = []
txtitem = []
for i in range(len(lre)):
titem.append((re.compile(lre[i][0],lextab._lexreflags),_names_to_funcs(lre[i][1],fdict)))
txtitem.append(lre[i][0])
self.lexstatere[key] = titem
self.lexstateretext[key] = txtitem
self.lexstateerrorf = { }
for key,ef in lextab._lexstateerrorf.items():
self.lexstateerrorf[key] = fdict[ef]
self.begin('INITIAL')
# ------------------------------------------------------------
# input() - Push a new string into the lexer
# ------------------------------------------------------------
def input(self,s):
# Pull off the first character to see if s looks like a string
c = s[:1]
if not isinstance(c,StringTypes):
raise ValueError("Expected a string")
self.lexdata = s
self.lexpos = 0
self.lexlen = len(s)
# ------------------------------------------------------------
# begin() - Changes the lexing state
# ------------------------------------------------------------
def begin(self,state):
if not state in self.lexstatere:
raise ValueError("Undefined state")
self.lexre = self.lexstatere[state]
self.lexretext = self.lexstateretext[state]
self.lexignore = self.lexstateignore.get(state,"")
self.lexerrorf = self.lexstateerrorf.get(state,None)
self.lexstate = state
# ------------------------------------------------------------
# push_state() - Changes the lexing state and saves old on stack
# ------------------------------------------------------------
def push_state(self,state):
self.lexstatestack.append(self.lexstate)
self.begin(state)
# ------------------------------------------------------------
# pop_state() - Restores the previous state
# ------------------------------------------------------------
def pop_state(self):
self.begin(self.lexstatestack.pop())
# ------------------------------------------------------------
# current_state() - Returns the current lexing state
# ------------------------------------------------------------
def current_state(self):
return self.lexstate
# ------------------------------------------------------------
# skip() - Skip ahead n characters
# ------------------------------------------------------------
def skip(self,n):
self.lexpos += n
# ------------------------------------------------------------
# opttoken() - Return the next token from the Lexer
#
# Note: This function has been carefully implemented to be as fast
# as possible. Don't make changes unless you really know what
# you are doing
# ------------------------------------------------------------
def token(self):
# Make local copies of frequently referenced attributes
lexpos = self.lexpos
lexlen = self.lexlen
lexignore = self.lexignore
lexdata = self.lexdata
while lexpos < lexlen:
# This code provides some short-circuit code for whitespace, tabs, and other ignored characters
if lexdata[lexpos] in lexignore:
lexpos += 1
continue
# Look for a regular expression match
for lexre,lexindexfunc in self.lexre:
m = lexre.match(lexdata,lexpos)
if not m: continue
# Create a token for return
tok = LexToken()
tok.value = m.group()
tok.lineno = self.lineno
tok.lexpos = lexpos
i = m.lastindex
func,tok.type = lexindexfunc[i]
if not func:
# If no token type was set, it's an ignored token
if tok.type:
self.lexpos = m.end()
return tok
else:
lexpos = m.end()
break
lexpos = m.end()
# If token is processed by a function, call it
tok.lexer = self # Set additional attributes useful in token rules
self.lexmatch = m
self.lexpos = lexpos
newtok = func(tok)
# Every function must return a token, if nothing, we just move to next token
if not newtok:
lexpos = self.lexpos # This is here in case user has updated lexpos.
lexignore = self.lexignore # This is here in case there was a state change
break
# Verify type of the token. If not in the token map, raise an error
if not self.lexoptimize:
if not newtok.type in self.lextokens:
raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
func_code(func).co_filename, func_code(func).co_firstlineno,
func.__name__, newtok.type),lexdata[lexpos:])
return newtok
else:
# No match, see if in literals
if lexdata[lexpos] in self.lexliterals:
tok = LexToken()
tok.value = lexdata[lexpos]
tok.lineno = self.lineno
tok.type = tok.value
tok.lexpos = lexpos
self.lexpos = lexpos + 1
return tok
# No match. Call t_error() if defined.
if self.lexerrorf:
tok = LexToken()
tok.value = self.lexdata[lexpos:]
tok.lineno = self.lineno
tok.type = "error"
tok.lexer = self
tok.lexpos = lexpos
self.lexpos = lexpos
newtok = self.lexerrorf(tok)
if lexpos == self.lexpos:
# Error method didn't change text position at all. This is an error.
raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
lexpos = self.lexpos
if not newtok: continue
return newtok
self.lexpos = lexpos
raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos],lexpos), lexdata[lexpos:])
self.lexpos = lexpos + 1
if self.lexdata is None:
raise RuntimeError("No input string given with input()")
return None
# Iterator interface
def __iter__(self):
return self
def next(self):
t = self.token()
if t is None:
raise StopIteration
return t
__next__ = next
# -----------------------------------------------------------------------------
# ==== Lex Builder ===
#
# The functions and classes below are used to collect lexing information
# and build a Lexer object from it.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# get_caller_module_dict()
#
# This function returns a dictionary containing all of the symbols defined within
# a caller further down the call stack. This is used to get the environment
# associated with the yacc() call if none was provided.
# -----------------------------------------------------------------------------
def get_caller_module_dict(levels):
try:
raise RuntimeError
except RuntimeError:
e,b,t = sys.exc_info()
f = t.tb_frame
while levels > 0:
f = f.f_back
levels -= 1
ldict = f.f_globals.copy()
if f.f_globals != f.f_locals:
ldict.update(f.f_locals)
return ldict
# -----------------------------------------------------------------------------
# _funcs_to_names()
#
# Given a list of regular expression functions, this converts it to a list
# suitable for output to a table file
# -----------------------------------------------------------------------------
def _funcs_to_names(funclist,namelist):
result = []
for f,name in zip(funclist,namelist):
if f and f[0]:
result.append((name, f[1]))
else:
result.append(f)
return result
# -----------------------------------------------------------------------------
# _names_to_funcs()
#
# Given a list of regular expression function names, this converts it back to
# functions.
# -----------------------------------------------------------------------------
def _names_to_funcs(namelist,fdict):
result = []
for n in namelist:
if n and n[0]:
result.append((fdict[n[0]],n[1]))
else:
result.append(n)
return result
# -----------------------------------------------------------------------------
# _form_master_re()
#
# This function takes a list of all of the regex components and attempts to
# form the master regular expression. Given limitations in the Python re
# module, it may be necessary to break the master regex into separate expressions.
# -----------------------------------------------------------------------------
def _form_master_re(relist,reflags,ldict,toknames):
if not relist: return []
regex = "|".join(relist)
try:
lexre = re.compile(regex,re.VERBOSE | reflags)
# Build the index to function map for the matching engine
lexindexfunc = [ None ] * (max(lexre.groupindex.values())+1)
lexindexnames = lexindexfunc[:]
for f,i in lexre.groupindex.items():
handle = ldict.get(f,None)
if type(handle) in (types.FunctionType, types.MethodType):
lexindexfunc[i] = (handle,toknames[f])
lexindexnames[i] = f
elif handle is not None:
lexindexnames[i] = f
if f.find("ignore_") > 0:
lexindexfunc[i] = (None,None)
else:
lexindexfunc[i] = (None, toknames[f])
return [(lexre,lexindexfunc)],[regex],[lexindexnames]
except Exception:
m = int(len(relist)/2)
if m == 0: m = 1
llist, lre, lnames = _form_master_re(relist[:m],reflags,ldict,toknames)
rlist, rre, rnames = _form_master_re(relist[m:],reflags,ldict,toknames)
return llist+rlist, lre+rre, lnames+rnames
# -----------------------------------------------------------------------------
# def _statetoken(s,names)
#
# Given a declaration name s of the form "t_" and a dictionary whose keys are
# state names, this function returns a tuple (states,tokenname) where states
# is a tuple of state names and tokenname is the name of the token. For example,
# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM')
# -----------------------------------------------------------------------------
def _statetoken(s,names):
nonstate = 1
parts = s.split("_")
for i in range(1,len(parts)):
if not parts[i] in names and parts[i] != 'ANY': break
if i > 1:
states = tuple(parts[1:i])
else:
states = ('INITIAL',)
if 'ANY' in states:
states = tuple(names)
tokenname = "_".join(parts[i:])
return (states,tokenname)
# -----------------------------------------------------------------------------
# LexerReflect()
#
# This class represents information needed to build a lexer as extracted from a
# user's input file.
# -----------------------------------------------------------------------------
class LexerReflect(object):
def __init__(self,ldict,log=None,reflags=0):
self.ldict = ldict
self.error_func = None
self.tokens = []
self.reflags = reflags
self.stateinfo = { 'INITIAL' : 'inclusive'}
self.files = {}
self.error = 0
if log is None:
self.log = PlyLogger(sys.stderr)
else:
self.log = log
# Get all of the basic information
def get_all(self):
self.get_tokens()
self.get_literals()
self.get_states()
self.get_rules()
# Validate all of the information
def validate_all(self):
self.validate_tokens()
self.validate_literals()
self.validate_rules()
return self.error
# Get the tokens map
def get_tokens(self):
tokens = self.ldict.get("tokens",None)
if not tokens:
self.log.error("No token list is defined")
self.error = 1
return
if not isinstance(tokens,(list, tuple)):
self.log.error("tokens must be a list or tuple")
self.error = 1
return
if not tokens:
self.log.error("tokens is empty")
self.error = 1
return
self.tokens = tokens
# Validate the tokens
def validate_tokens(self):
terminals = {}
for n in self.tokens:
if not _is_identifier.match(n):
self.log.error("Bad token name '%s'",n)
self.error = 1
if n in terminals:
self.log.warning("Token '%s' multiply defined", n)
terminals[n] = 1
# Get the literals specifier
def get_literals(self):
self.literals = self.ldict.get("literals","")
# Validate literals
def validate_literals(self):
try:
for c in self.literals:
if not isinstance(c,StringTypes) or len(c) > 1:
self.log.error("Invalid literal %s. Must be a single character", repr(c))
self.error = 1
continue
except TypeError:
self.log.error("Invalid literals specification. literals must be a sequence of characters")
self.error = 1
def get_states(self):
self.states = self.ldict.get("states",None)
# Build statemap
if self.states:
if not isinstance(self.states,(tuple,list)):
self.log.error("states must be defined as a tuple or list")
self.error = 1
else:
for s in self.states:
if not isinstance(s,tuple) or len(s) != 2:
self.log.error("Invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')",repr(s))
self.error = 1
continue
name, statetype = s
if not isinstance(name,StringTypes):
self.log.error("State name %s must be a string", repr(name))
self.error = 1
continue
if not (statetype == 'inclusive' or statetype == 'exclusive'):
self.log.error("State type for state %s must be 'inclusive' or 'exclusive'",name)
self.error = 1
continue
if name in self.stateinfo:
self.log.error("State '%s' already defined",name)
self.error = 1
continue
self.stateinfo[name] = statetype
# Get all of the symbols with a t_ prefix and sort them into various
# categories (functions, strings, error functions, and ignore characters)
def get_rules(self):
tsymbols = [f for f in self.ldict if f[:2] == 't_' ]
# Now build up a list of functions and a list of strings
self.toknames = { } # Mapping of symbols to token names
self.funcsym = { } # Symbols defined as functions
self.strsym = { } # Symbols defined as strings
self.ignore = { } # Ignore strings by state
self.errorf = { } # Error functions by state
for s in self.stateinfo:
self.funcsym[s] = []
self.strsym[s] = []
if len(tsymbols) == 0:
self.log.error("No rules of the form t_rulename are defined")
self.error = 1
return
for f in tsymbols:
t = self.ldict[f]
states, tokname = _statetoken(f,self.stateinfo)
self.toknames[f] = tokname
if hasattr(t,"__call__"):
if tokname == 'error':
for s in states:
self.errorf[s] = t
elif tokname == 'ignore':
line = func_code(t).co_firstlineno
file = func_code(t).co_filename
self.log.error("%s:%d: Rule '%s' must be defined as a string",file,line,t.__name__)
self.error = 1
else:
for s in states:
self.funcsym[s].append((f,t))
elif isinstance(t, StringTypes):
if tokname == 'ignore':
for s in states:
self.ignore[s] = t
if "\\" in t:
self.log.warning("%s contains a literal backslash '\\'",f)
elif tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", f)
self.error = 1
else:
for s in states:
self.strsym[s].append((f,t))
else:
self.log.error("%s not defined as a function or string", f)
self.error = 1
# Sort the functions by line number
for f in self.funcsym.values():
if sys.version_info[0] < 3:
f.sort(lambda x,y: cmp(func_code(x[1]).co_firstlineno,func_code(y[1]).co_firstlineno))
else:
# Python 3.0
f.sort(key=lambda x: func_code(x[1]).co_firstlineno)
# Sort the strings by regular expression length
for s in self.strsym.values():
if sys.version_info[0] < 3:
s.sort(lambda x,y: (len(x[1]) < len(y[1])) - (len(x[1]) > len(y[1])))
else:
# Python 3.0
s.sort(key=lambda x: len(x[1]),reverse=True)
# Validate all of the t_rules collected
def validate_rules(self):
for state in self.stateinfo:
# Validate all rules defined by functions
for fname, f in self.funcsym[state]:
line = func_code(f).co_firstlineno
file = func_code(f).co_filename
self.files[file] = 1
tokname = self.toknames[fname]
if isinstance(f, types.MethodType):
reqargs = 2
else:
reqargs = 1
nargs = func_code(f).co_argcount
if nargs > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,f.__name__)
self.error = 1
continue
if nargs < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument", file,line,f.__name__)
self.error = 1
continue
if not f.__doc__:
self.log.error("%s:%d: No regular expression defined for rule '%s'",file,line,f.__name__)
self.error = 1
continue
try:
c = re.compile("(?P<%s>%s)" % (fname,f.__doc__), re.VERBOSE | self.reflags)
if c.match(""):
self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file,line,f.__name__)
self.error = 1
except re.error:
_etype, e, _etrace = sys.exc_info()
self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file,line,f.__name__,e)
if '#' in f.__doc__:
self.log.error("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'",file,line, f.__name__)
self.error = 1
# Validate all rules defined by strings
for name,r in self.strsym[state]:
tokname = self.toknames[name]
if tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", name)
self.error = 1
continue
if not tokname in self.tokens and tokname.find("ignore_") < 0:
self.log.error("Rule '%s' defined for an unspecified token %s",name,tokname)
self.error = 1
continue
try:
c = re.compile("(?P<%s>%s)" % (name,r),re.VERBOSE | self.reflags)
if (c.match("")):
self.log.error("Regular expression for rule '%s' matches empty string",name)
self.error = 1
except re.error:
_etype, e, _etrace = sys.exc_info()
self.log.error("Invalid regular expression for rule '%s'. %s",name,e)
if '#' in r:
self.log.error("Make sure '#' in rule '%s' is escaped with '\\#'",name)
self.error = 1
if not self.funcsym[state] and not self.strsym[state]:
self.log.error("No rules defined for state '%s'",state)
self.error = 1
# Validate the error function
efunc = self.errorf.get(state,None)
if efunc:
f = efunc
line = func_code(f).co_firstlineno
file = func_code(f).co_filename
self.files[file] = 1
if isinstance(f, types.MethodType):
reqargs = 2
else:
reqargs = 1
nargs = func_code(f).co_argcount
if nargs > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,f.__name__)
self.error = 1
if nargs < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument", file,line,f.__name__)
self.error = 1
for f in self.files:
self.validate_file(f)
# -----------------------------------------------------------------------------
# validate_file()
#
# This checks to see if there are duplicated t_rulename() functions or strings
# in the parser input file. This is done using a simple regular expression
# match on each line in the given file.
# -----------------------------------------------------------------------------
def validate_file(self,filename):
import os.path
base,ext = os.path.splitext(filename)
if ext != '.py': return # No idea what the file is. Return OK
try:
f = open(filename)
lines = f.readlines()
f.close()
except IOError:
return # Couldn't find the file. Don't worry about it
fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
counthash = { }
linen = 1
for l in lines:
m = fre.match(l)
if not m:
m = sre.match(l)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
self.log.error("%s:%d: Rule %s redefined. Previously defined on line %d",filename,linen,name,prev)
self.error = 1
linen += 1
# -----------------------------------------------------------------------------
# lex(module)
#
# Build all of the regular expression rules from definitions in the supplied module
# -----------------------------------------------------------------------------
def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,nowarn=0,outputdir="", debuglog=None, errorlog=None):
global lexer
ldict = None
stateinfo = { 'INITIAL' : 'inclusive'}
lexobj = Lexer()
lexobj.lexoptimize = optimize
global token,input
if errorlog is None:
errorlog = PlyLogger(sys.stderr)
if debug:
if debuglog is None:
debuglog = PlyLogger(sys.stderr)
# Get the module dictionary used for the lexer
if object: module = object
if module:
_items = [(k,getattr(module,k)) for k in dir(module)]
ldict = dict(_items)
else:
ldict = get_caller_module_dict(2)
# Collect parser information from the dictionary
linfo = LexerReflect(ldict,log=errorlog,reflags=reflags)
linfo.get_all()
if not optimize:
if linfo.validate_all():
raise SyntaxError("Can't build lexer")
if optimize and lextab:
try:
lexobj.readtab(lextab,ldict)
token = lexobj.token
input = lexobj.input
lexer = lexobj
return lexobj
except ImportError:
pass
# Dump some basic debugging information
if debug:
debuglog.info("lex: tokens = %r", linfo.tokens)
debuglog.info("lex: literals = %r", linfo.literals)
debuglog.info("lex: states = %r", linfo.stateinfo)
# Build a dictionary of valid token names
lexobj.lextokens = { }
for n in linfo.tokens:
lexobj.lextokens[n] = 1
# Get literals specification
if isinstance(linfo.literals,(list,tuple)):
lexobj.lexliterals = type(linfo.literals[0])().join(linfo.literals)
else:
lexobj.lexliterals = linfo.literals
# Get the stateinfo dictionary
stateinfo = linfo.stateinfo
regexs = { }
# Build the master regular expressions
for state in stateinfo:
regex_list = []
# Add rules defined by functions first
for fname, f in linfo.funcsym[state]:
line = func_code(f).co_firstlineno
file = func_code(f).co_filename
regex_list.append("(?P<%s>%s)" % (fname,f.__doc__))
if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')",fname,f.__doc__, state)
# Now add all of the simple rules
for name,r in linfo.strsym[state]:
regex_list.append("(?P<%s>%s)" % (name,r))
if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')",name,r, state)
regexs[state] = regex_list
# Build the master regular expressions
if debug:
debuglog.info("lex: ==== MASTER REGEXS FOLLOW ====")
for state in regexs:
lexre, re_text, re_names = _form_master_re(regexs[state],reflags,ldict,linfo.toknames)
lexobj.lexstatere[state] = lexre
lexobj.lexstateretext[state] = re_text
lexobj.lexstaterenames[state] = re_names
if debug:
for i in range(len(re_text)):
debuglog.info("lex: state '%s' : regex[%d] = '%s'",state, i, re_text[i])
# For inclusive states, we need to add the regular expressions from the INITIAL state
for state,stype in stateinfo.items():
if state != "INITIAL" and stype == 'inclusive':
lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL'])
lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL'])
lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL'])
lexobj.lexstateinfo = stateinfo
lexobj.lexre = lexobj.lexstatere["INITIAL"]
lexobj.lexretext = lexobj.lexstateretext["INITIAL"]
# Set up ignore variables
lexobj.lexstateignore = linfo.ignore
lexobj.lexignore = lexobj.lexstateignore.get("INITIAL","")
# Set up error functions
lexobj.lexstateerrorf = linfo.errorf
lexobj.lexerrorf = linfo.errorf.get("INITIAL",None)
if not lexobj.lexerrorf:
errorlog.warning("No t_error rule is defined")
# Check state information for ignore and error rules
for s,stype in stateinfo.items():
if stype == 'exclusive':
if not s in linfo.errorf:
errorlog.warning("No error rule is defined for exclusive state '%s'", s)
if not s in linfo.ignore and lexobj.lexignore:
errorlog.warning("No ignore rule is defined for exclusive state '%s'", s)
elif stype == 'inclusive':
if not s in linfo.errorf:
linfo.errorf[s] = linfo.errorf.get("INITIAL",None)
if not s in linfo.ignore:
linfo.ignore[s] = linfo.ignore.get("INITIAL","")
# Create global versions of the token() and input() functions
token = lexobj.token
input = lexobj.input
lexer = lexobj
# If in optimize mode, we write the lextab
if lextab and optimize:
lexobj.writetab(lextab,outputdir)
return lexobj
# -----------------------------------------------------------------------------
# runmain()
#
# This runs the lexer as a main program
# -----------------------------------------------------------------------------
def runmain(lexer=None,data=None):
if not data:
try:
filename = sys.argv[1]
f = open(filename)
data = f.read()
f.close()
except IndexError:
sys.stdout.write("Reading from standard input (type EOF to end):\n")
data = sys.stdin.read()
if lexer:
_input = lexer.input
else:
_input = input
_input(data)
if lexer:
_token = lexer.token
else:
_token = token
while 1:
tok = _token()
if not tok: break
sys.stdout.write("(%s,%r,%d,%d)\n" % (tok.type, tok.value, tok.lineno,tok.lexpos))
# -----------------------------------------------------------------------------
# @TOKEN(regex)
#
# This decorator function can be used to set the regex expression on a function
# when its docstring might need to be set in an alternative way
# -----------------------------------------------------------------------------
def TOKEN(r):
def set_doc(f):
if hasattr(r,"__call__"):
f.__doc__ = r.__doc__
else:
f.__doc__ = r
return f
return set_doc
# Alternative spelling of the TOKEN decorator
Token = TOKEN
|
Morgan-Stanley/treadmill | refs/heads/master | lib/python/treadmill/supervisor/winss/scan_dir.py | 3 | """winss service directory management.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import os
from . import services
from .. import _scan_dir_base
from .. import _utils
_LOGGER = logging.getLogger(__name__)
class ScanDir(_scan_dir_base.ScanDir):
"""Models an winss scan directory.
"""
__slots__ = (
'_finish',
'_sigterm',
)
_CONTROL_DIR = '.winss-svscan'
_create_service = staticmethod(services.create_service)
def __init__(self, directory):
super(ScanDir, self).__init__(directory)
for attrib in self.__slots__:
setattr(self, attrib, None)
@staticmethod
def control_dir_name():
"""Gets the name of the svscan control directory.
"""
return ScanDir._CONTROL_DIR
@property
def _finish_file(self):
return os.path.join(self._control_dir, 'finish')
@property
def _sigterm_file(self):
return os.path.join(self._control_dir, 'SIGTERM')
def write(self):
"""Write down the service definition.
"""
super(ScanDir, self).write()
for attrib in self.__slots__:
if getattr(self, attrib) is not None:
attrib_filename = '%s_file' % attrib
_utils.script_write(
getattr(self, attrib_filename),
getattr(self, attrib)
)
# Define all the requirement properties
ScanDir.add_ctrlfile_props()
__all__ = (
'ScanDir',
)
|
CopeX/odoo | refs/heads/8.0 | addons/auth_openid/utils.py | 428 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-2012 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
KEY_LENGTH = 16
SREG2AX = { # from http://www.axschema.org/types/#sreg
'nickname': 'http://axschema.org/namePerson/friendly',
'email': 'http://axschema.org/contact/email',
'fullname': 'http://axschema.org/namePerson',
'dob': 'http://axschema.org/birthDate',
'gender': 'http://axschema.org/person/gender',
'postcode': 'http://axschema.org/contact/postalCode/home',
'country': 'http://axschema.org/contact/country/home',
'language': 'http://axschema.org/pref/language',
'timezone': 'http://axschema.org/pref/timezone',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
karan1276/servo | refs/heads/master | components/script/dom/bindings/codegen/parser/tests/test_callback.py | 126 | import WebIDL
def WebIDLTest(parser, harness):
parser.parse("""
interface TestCallback {
attribute CallbackType? listener;
};
callback CallbackType = boolean (unsigned long arg);
""")
results = parser.finish()
harness.ok(True, "TestCallback interface parsed without error.")
harness.check(len(results), 2, "Should be two productions.")
iface = results[0]
harness.ok(isinstance(iface, WebIDL.IDLInterface),
"Should be an IDLInterface")
harness.check(iface.identifier.QName(), "::TestCallback", "Interface has the right QName")
harness.check(iface.identifier.name, "TestCallback", "Interface has the right name")
harness.check(len(iface.members), 1, "Expect %s members" % 1)
attr = iface.members[0]
harness.ok(isinstance(attr, WebIDL.IDLAttribute),
"Should be an IDLAttribute")
harness.ok(attr.isAttr(), "Should be an attribute")
harness.ok(not attr.isMethod(), "Attr is not an method")
harness.ok(not attr.isConst(), "Attr is not a const")
harness.check(attr.identifier.QName(), "::TestCallback::listener", "Attr has the right QName")
harness.check(attr.identifier.name, "listener", "Attr has the right name")
t = attr.type
harness.ok(not isinstance(t, WebIDL.IDLWrapperType), "Attr has the right type")
harness.ok(isinstance(t, WebIDL.IDLNullableType), "Attr has the right type")
harness.ok(t.isCallback(), "Attr has the right type")
|
CiscoSystems/quantum | refs/heads/master | neutron/plugins/nec/common/config.py | 3 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Ryota MIBU
from oslo.config import cfg
from neutron.agent.common import config
from neutron.openstack.common import rpc # noqa
from neutron import scheduler
ovs_opts = [
cfg.StrOpt('integration_bridge', default='br-int',
help=_("Integration bridge to use")),
]
agent_opts = [
cfg.IntOpt('polling_interval', default=2,
help=_("The number of seconds the agent will wait between "
"polling for local device changes.")),
]
ofc_opts = [
cfg.StrOpt('host', default='127.0.0.1',
help=_("Host to connect to")),
cfg.StrOpt('port', default='8888',
help=_("Port to connect to")),
cfg.StrOpt('driver', default='trema',
help=_("Driver to use")),
cfg.BoolOpt('enable_packet_filter', default=True,
help=_("Enable packet filter")),
cfg.BoolOpt('use_ssl', default=False,
help=_("Use SSL to connect")),
cfg.StrOpt('key_file', default=None,
help=_("Key file")),
cfg.StrOpt('cert_file', default=None,
help=_("Certificate file")),
]
cfg.CONF.register_opts(ovs_opts, "OVS")
cfg.CONF.register_opts(agent_opts, "AGENT")
cfg.CONF.register_opts(ofc_opts, "OFC")
config.register_agent_state_opts_helper(cfg.CONF)
config.register_root_helper(cfg.CONF)
cfg.CONF.register_opts(scheduler.AGENTS_SCHEDULER_OPTS)
# shortcuts
CONF = cfg.CONF
OVS = cfg.CONF.OVS
AGENT = cfg.CONF.AGENT
OFC = cfg.CONF.OFC
|
joshloyal/scikit-learn | refs/heads/master | sklearn/manifold/tests/test_t_sne.py | 28 | import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import scipy.sparse as sp
from sklearn.neighbors import BallTree
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_in
from sklearn.utils import check_random_state
from sklearn.manifold.t_sne import _joint_probabilities
from sklearn.manifold.t_sne import _joint_probabilities_nn
from sklearn.manifold.t_sne import _kl_divergence
from sklearn.manifold.t_sne import _kl_divergence_bh
from sklearn.manifold.t_sne import _gradient_descent
from sklearn.manifold.t_sne import trustworthiness
from sklearn.manifold.t_sne import TSNE
from sklearn.manifold import _barnes_hut_tsne
from sklearn.manifold._utils import _binary_search_perplexity
from sklearn.datasets import make_blobs
from scipy.optimize import check_grad
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from sklearn.metrics.pairwise import pairwise_distances
def test_gradient_descent_stops():
# Test stopping conditions of gradient descent.
class ObjectiveSmallGradient:
def __init__(self):
self.it = -1
def __call__(self, _):
self.it += 1
return (10 - self.it) / 10.0, np.array([1e-5])
def flat_function(_):
return 0.0, np.ones(1)
# Gradient norm
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=1e-5, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 1.0)
assert_equal(it, 0)
assert("gradient norm" in out)
# Error difference
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.2, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.9)
assert_equal(it, 1)
assert("error difference" in out)
# Maximum number of iterations without improvement
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
flat_function, np.zeros(1), 0, n_iter=100,
n_iter_without_progress=10, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=-1.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 11)
assert("did not make any progress" in out)
# Maximum number of iterations
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=11,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 10)
assert("Iteration 10" in out)
def test_binary_search():
# Test if the binary search finds Gaussians with desired perplexity.
random_state = check_random_state(0)
distances = random_state.randn(50, 2).astype(np.float32)
# Distances shouldn't be negative
distances = np.abs(distances.dot(distances.T))
np.fill_diagonal(distances, 0.0)
desired_perplexity = 25.0
P = _binary_search_perplexity(distances, None, desired_perplexity,
verbose=0)
P = np.maximum(P, np.finfo(np.double).eps)
mean_perplexity = np.mean([np.exp(-np.sum(P[i] * np.log(P[i])))
for i in range(P.shape[0])])
assert_almost_equal(mean_perplexity, desired_perplexity, decimal=3)
def test_binary_search_neighbors():
# Binary perplexity search approximation.
# Should be approximately equal to the slow method when we use
# all points as neighbors.
n_samples = 500
desired_perplexity = 25.0
random_state = check_random_state(0)
distances = random_state.randn(n_samples, 2).astype(np.float32)
# Distances shouldn't be negative
distances = np.abs(distances.dot(distances.T))
np.fill_diagonal(distances, 0.0)
P1 = _binary_search_perplexity(distances, None, desired_perplexity,
verbose=0)
# Test that when we use all the neighbors the results are identical
k = n_samples
neighbors_nn = np.argsort(distances, axis=1)[:, :k].astype(np.int64)
P2 = _binary_search_perplexity(distances, neighbors_nn,
desired_perplexity, verbose=0)
assert_array_almost_equal(P1, P2, decimal=4)
# Test that the highest P_ij are the same when few neighbors are used
for k in np.linspace(80, n_samples, 10):
k = int(k)
topn = k * 10 # check the top 10 *k entries out of k * k entries
neighbors_nn = np.argsort(distances, axis=1)[:, :k].astype(np.int64)
P2k = _binary_search_perplexity(distances, neighbors_nn,
desired_perplexity, verbose=0)
idx = np.argsort(P1.ravel())[::-1]
P1top = P1.ravel()[idx][:topn]
P2top = P2k.ravel()[idx][:topn]
assert_array_almost_equal(P1top, P2top, decimal=2)
def test_binary_perplexity_stability():
# Binary perplexity search should be stable.
# The binary_search_perplexity had a bug wherein the P array
# was uninitialized, leading to sporadically failing tests.
k = 10
n_samples = 100
random_state = check_random_state(0)
distances = random_state.randn(n_samples, 2).astype(np.float32)
# Distances shouldn't be negative
distances = np.abs(distances.dot(distances.T))
np.fill_diagonal(distances, 0.0)
last_P = None
neighbors_nn = np.argsort(distances, axis=1)[:, :k].astype(np.int64)
for _ in range(100):
P = _binary_search_perplexity(distances.copy(), neighbors_nn.copy(),
3, verbose=0)
P1 = _joint_probabilities_nn(distances, neighbors_nn, 3, verbose=0)
if last_P is None:
last_P = P
last_P1 = P1
else:
assert_array_almost_equal(P, last_P, decimal=4)
assert_array_almost_equal(P1, last_P1, decimal=4)
def test_gradient():
# Test gradient of Kullback-Leibler divergence.
random_state = check_random_state(0)
n_samples = 50
n_features = 2
n_components = 2
alpha = 1.0
distances = random_state.randn(n_samples, n_features).astype(np.float32)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
X_embedded = random_state.randn(n_samples, n_components)
P = _joint_probabilities(distances, desired_perplexity=25.0,
verbose=0)
def fun(params):
return _kl_divergence(params, P, alpha, n_samples, n_components)[0]
def grad(params):
return _kl_divergence(params, P, alpha, n_samples, n_components)[1]
assert_almost_equal(check_grad(fun, grad, X_embedded.ravel()), 0.0,
decimal=5)
def test_trustworthiness():
# Test trustworthiness score.
random_state = check_random_state(0)
# Affine transformation
X = random_state.randn(100, 2)
assert_equal(trustworthiness(X, 5.0 + X / 10.0), 1.0)
# Randomly shuffled
X = np.arange(100).reshape(-1, 1)
X_embedded = X.copy()
random_state.shuffle(X_embedded)
assert_less(trustworthiness(X, X_embedded), 0.6)
# Completely different
X = np.arange(5).reshape(-1, 1)
X_embedded = np.array([[0], [2], [4], [1], [3]])
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 0.2)
def test_preserve_trustworthiness_approximately():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
# The Barnes-Hut approximation uses a different method to estimate
# P_ij using only a number of nearest neighbors instead of all
# points (so that k = 3 * perplexity). As a result we set the
# perplexity=5, so that the number of neighbors is 5%.
n_components = 2
methods = ['exact', 'barnes_hut']
X = random_state.randn(100, n_components).astype(np.float32)
for init in ('random', 'pca'):
for method in methods:
tsne = TSNE(n_components=n_components, perplexity=50,
learning_rate=100.0, init=init, random_state=0,
method=method)
X_embedded = tsne.fit_transform(X)
T = trustworthiness(X, X_embedded, n_neighbors=1)
assert_almost_equal(T, 1.0, decimal=1)
def test_optimization_minimizes_kl_divergence():
"""t-SNE should give a lower KL divergence with more iterations."""
random_state = check_random_state(0)
X, _ = make_blobs(n_features=3, random_state=random_state)
kl_divergences = []
for n_iter in [200, 250, 300]:
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
n_iter=n_iter, random_state=0)
tsne.fit_transform(X)
kl_divergences.append(tsne.kl_divergence_)
assert_less_equal(kl_divergences[1], kl_divergences[0])
assert_less_equal(kl_divergences[2], kl_divergences[1])
def test_fit_csr_matrix():
# X can be a sparse matrix.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0
X_csr = sp.csr_matrix(X)
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
random_state=0, method='exact')
X_embedded = tsne.fit_transform(X_csr)
assert_almost_equal(trustworthiness(X_csr, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_preserve_trustworthiness_approximately_with_precomputed_distances():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
D = squareform(pdist(X), "sqeuclidean")
tsne = TSNE(n_components=2, perplexity=2, learning_rate=100.0,
metric="precomputed", random_state=0, verbose=0)
X_embedded = tsne.fit_transform(D)
assert_almost_equal(trustworthiness(D, X_embedded, n_neighbors=1,
precomputed=True), 1.0, decimal=1)
def test_early_exaggeration_too_small():
# Early exaggeration factor must be >= 1.
tsne = TSNE(early_exaggeration=0.99)
assert_raises_regexp(ValueError, "early_exaggeration .*",
tsne.fit_transform, np.array([[0.0]]))
def test_too_few_iterations():
# Number of gradient descent iterations must be at least 200.
tsne = TSNE(n_iter=199)
assert_raises_regexp(ValueError, "n_iter .*", tsne.fit_transform,
np.array([[0.0]]))
def test_non_square_precomputed_distances():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed")
assert_raises_regexp(ValueError, ".* square distance matrix",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_init_not_available():
# 'init' must be 'pca', 'random', or numpy array.
m = "'init' must be 'pca', 'random', or a numpy array"
assert_raises_regexp(ValueError, m, TSNE, init="not available")
def test_init_ndarray():
# Initialize TSNE with ndarray and test fit
tsne = TSNE(init=np.zeros((100, 2)))
X_embedded = tsne.fit_transform(np.ones((100, 5)))
assert_array_equal(np.zeros((100, 2)), X_embedded)
def test_init_ndarray_precomputed():
# Initialize TSNE with ndarray and metric 'precomputed'
# Make sure no FutureWarning is thrown from _fit
tsne = TSNE(init=np.zeros((100, 2)), metric="precomputed")
tsne.fit(np.zeros((100, 100)))
def test_distance_not_available():
# 'metric' must be valid.
tsne = TSNE(metric="not available")
assert_raises_regexp(ValueError, "Unknown metric not available.*",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_pca_initialization_not_compatible_with_precomputed_kernel():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed", init="pca")
assert_raises_regexp(ValueError, "The parameter init=\"pca\" cannot be "
"used with metric=\"precomputed\".",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_answer_gradient_two_points():
# Test the tree with only a single set of children.
#
# These tests & answers have been checked against the reference
# implementation by LvdM.
pos_input = np.array([[1.0, 0.0], [0.0, 1.0]])
pos_output = np.array([[-4.961291e-05, -1.072243e-04],
[9.259460e-05, 2.702024e-04]])
neighbors = np.array([[1],
[0]])
grad_output = np.array([[-2.37012478e-05, -6.29044398e-05],
[2.37012478e-05, 6.29044398e-05]])
_run_answer_test(pos_input, pos_output, neighbors, grad_output)
def test_answer_gradient_four_points():
# Four points tests the tree with multiple levels of children.
#
# These tests & answers have been checked against the reference
# implementation by LvdM.
pos_input = np.array([[1.0, 0.0], [0.0, 1.0],
[5.0, 2.0], [7.3, 2.2]])
pos_output = np.array([[6.080564e-05, -7.120823e-05],
[-1.718945e-04, -4.000536e-05],
[-2.271720e-04, 8.663310e-05],
[-1.032577e-04, -3.582033e-05]])
neighbors = np.array([[1, 2, 3],
[0, 2, 3],
[1, 0, 3],
[1, 2, 0]])
grad_output = np.array([[5.81128448e-05, -7.78033454e-06],
[-5.81526851e-05, 7.80976444e-06],
[4.24275173e-08, -3.69569698e-08],
[-2.58720939e-09, 7.52706374e-09]])
_run_answer_test(pos_input, pos_output, neighbors, grad_output)
def test_skip_num_points_gradient():
# Test the kwargs option skip_num_points.
#
# Skip num points should make it such that the Barnes_hut gradient
# is not calculated for indices below skip_num_point.
# Aside from skip_num_points=2 and the first two gradient rows
# being set to zero, these data points are the same as in
# test_answer_gradient_four_points()
pos_input = np.array([[1.0, 0.0], [0.0, 1.0],
[5.0, 2.0], [7.3, 2.2]])
pos_output = np.array([[6.080564e-05, -7.120823e-05],
[-1.718945e-04, -4.000536e-05],
[-2.271720e-04, 8.663310e-05],
[-1.032577e-04, -3.582033e-05]])
neighbors = np.array([[1, 2, 3],
[0, 2, 3],
[1, 0, 3],
[1, 2, 0]])
grad_output = np.array([[0.0, 0.0],
[0.0, 0.0],
[4.24275173e-08, -3.69569698e-08],
[-2.58720939e-09, 7.52706374e-09]])
_run_answer_test(pos_input, pos_output, neighbors, grad_output,
False, 0.1, 2)
def _run_answer_test(pos_input, pos_output, neighbors, grad_output,
verbose=False, perplexity=0.1, skip_num_points=0):
distances = pairwise_distances(pos_input).astype(np.float32)
args = distances, perplexity, verbose
pos_output = pos_output.astype(np.float32)
neighbors = neighbors.astype(np.int64)
pij_input = _joint_probabilities(*args)
pij_input = squareform(pij_input).astype(np.float32)
grad_bh = np.zeros(pos_output.shape, dtype=np.float32)
_barnes_hut_tsne.gradient(pij_input, pos_output, neighbors,
grad_bh, 0.5, 2, 1, skip_num_points=0)
assert_array_almost_equal(grad_bh, grad_output, decimal=4)
def test_verbose():
# Verbose options write to stdout.
random_state = check_random_state(0)
tsne = TSNE(verbose=2)
X = random_state.randn(5, 2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[t-SNE]" in out)
assert("Computing pairwise distances" in out)
assert("Computed conditional probabilities" in out)
assert("Mean sigma" in out)
assert("Finished" in out)
assert("early exaggeration" in out)
assert("Finished" in out)
def test_chebyshev_metric():
# t-SNE should allow metrics that cannot be squared (issue #3526).
random_state = check_random_state(0)
tsne = TSNE(metric="chebyshev")
X = random_state.randn(5, 2)
tsne.fit_transform(X)
def test_reduction_to_one_component():
# t-SNE should allow reduction to one component (issue #4154).
random_state = check_random_state(0)
tsne = TSNE(n_components=1)
X = random_state.randn(5, 2)
X_embedded = tsne.fit(X).embedding_
assert(np.all(np.isfinite(X_embedded)))
def test_no_sparse_on_barnes_hut():
# No sparse matrices allowed on Barnes-Hut.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0
X_csr = sp.csr_matrix(X)
tsne = TSNE(n_iter=199, method='barnes_hut')
assert_raises_regexp(TypeError, "A sparse matrix was.*",
tsne.fit_transform, X_csr)
def test_64bit():
# Ensure 64bit arrays are handled correctly.
random_state = check_random_state(0)
methods = ['barnes_hut', 'exact']
for method in methods:
for dt in [np.float32, np.float64]:
X = random_state.randn(100, 2).astype(dt)
tsne = TSNE(n_components=2, perplexity=2, learning_rate=100.0,
random_state=0, method=method)
tsne.fit_transform(X)
def test_barnes_hut_angle():
# When Barnes-Hut's angle=0 this corresponds to the exact method.
angle = 0.0
perplexity = 10
n_samples = 100
for n_components in [2, 3]:
n_features = 5
degrees_of_freedom = float(n_components - 1.0)
random_state = check_random_state(0)
distances = random_state.randn(n_samples, n_features)
distances = distances.astype(np.float32)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
params = random_state.randn(n_samples, n_components)
P = _joint_probabilities(distances, perplexity, False)
kl, gradex = _kl_divergence(params, P, degrees_of_freedom, n_samples,
n_components)
k = n_samples - 1
bt = BallTree(distances)
distances_nn, neighbors_nn = bt.query(distances, k=k + 1)
neighbors_nn = neighbors_nn[:, 1:]
Pbh = _joint_probabilities_nn(distances, neighbors_nn,
perplexity, False)
kl, gradbh = _kl_divergence_bh(params, Pbh, neighbors_nn,
degrees_of_freedom, n_samples,
n_components, angle=angle,
skip_num_points=0, verbose=False)
assert_array_almost_equal(Pbh, P, decimal=5)
assert_array_almost_equal(gradex, gradbh, decimal=5)
def test_quadtree_similar_point():
# Introduce a point into a quad tree where a similar point already exists.
# Test will hang if it doesn't complete.
Xs = []
# check the case where points are actually different
Xs.append(np.array([[1, 2], [3, 4]], dtype=np.float32))
# check the case where points are the same on X axis
Xs.append(np.array([[1.0, 2.0], [1.0, 3.0]], dtype=np.float32))
# check the case where points are arbitrarily close on X axis
Xs.append(np.array([[1.00001, 2.0], [1.00002, 3.0]], dtype=np.float32))
# check the case where points are the same on Y axis
Xs.append(np.array([[1.0, 2.0], [3.0, 2.0]], dtype=np.float32))
# check the case where points are arbitrarily close on Y axis
Xs.append(np.array([[1.0, 2.00001], [3.0, 2.00002]], dtype=np.float32))
# check the case where points are arbitrarily close on both axes
Xs.append(np.array([[1.00001, 2.00001], [1.00002, 2.00002]],
dtype=np.float32))
# check the case where points are arbitrarily close on both axes
# close to machine epsilon - x axis
Xs.append(np.array([[1, 0.0003817754041], [2, 0.0003817753750]],
dtype=np.float32))
# check the case where points are arbitrarily close on both axes
# close to machine epsilon - y axis
Xs.append(np.array([[0.0003817754041, 1.0], [0.0003817753750, 2.0]],
dtype=np.float32))
for X in Xs:
counts = np.zeros(3, dtype='int64')
_barnes_hut_tsne.check_quadtree(X, counts)
m = "Tree consistency failed: unexpected number of points at root node"
assert_equal(counts[0], counts[1], m)
m = "Tree consistency failed: unexpected number of points on the tree"
assert_equal(counts[0], counts[2], m)
def test_index_offset():
# Make sure translating between 1D and N-D indices are preserved
assert_equal(_barnes_hut_tsne.test_index2offset(), 1)
assert_equal(_barnes_hut_tsne.test_index_offset(), 1)
def test_n_iter_without_progress():
# Make sure that the parameter n_iter_without_progress is used correctly
random_state = check_random_state(0)
X = random_state.randn(100, 2)
tsne = TSNE(n_iter_without_progress=2, verbose=2,
random_state=0, method='exact')
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
# The output needs to contain the value of n_iter_without_progress
assert_in("did not make any progress during the "
"last 2 episodes. Finished.", out)
def test_min_grad_norm():
# Make sure that the parameter min_grad_norm is used correctly
random_state = check_random_state(0)
X = random_state.randn(100, 2)
min_grad_norm = 0.002
tsne = TSNE(min_grad_norm=min_grad_norm, verbose=2,
random_state=0, method='exact')
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
lines_out = out.split('\n')
# extract the gradient norm from the verbose output
gradient_norm_values = []
for line in lines_out:
# When the computation is Finished just an old gradient norm value
# is repeated that we do not need to store
if 'Finished' in line:
break
start_grad_norm = line.find('gradient norm')
if start_grad_norm >= 0:
line = line[start_grad_norm:]
line = line.replace('gradient norm = ', '')
gradient_norm_values.append(float(line))
# Compute how often the gradient norm is smaller than min_grad_norm
gradient_norm_values = np.array(gradient_norm_values)
n_smaller_gradient_norms = \
len(gradient_norm_values[gradient_norm_values <= min_grad_norm])
# The gradient norm can be smaller than min_grad_norm at most once,
# because in the moment it becomes smaller the optimization stops
assert_less_equal(n_smaller_gradient_norms, 1)
|
tersmitten/ansible | refs/heads/devel | test/units/parsing/test_metadata.py | 125 | # coding: utf-8
# (c) 2017, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import pytest
from ansible.parsing import metadata as md
LICENSE = b"""# some license text boilerplate
# That we have at the top of files
"""
FUTURE_IMPORTS = b"""
from __future__ import (absolute_import, division, print_function)
"""
REGULAR_IMPORTS = b"""
import test
from foo import bar
"""
STANDARD_METADATA = b"""
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
"""
TEXT_STD_METADATA = b"""
ANSIBLE_METADATA = u'''
metadata_version: '1.1'
status:
- 'stableinterface'
supported_by: 'core'
'''
"""
BYTES_STD_METADATA = b"""
ANSIBLE_METADATA = b'''
metadata_version: '1.1'
status:
- 'stableinterface'
supported_by: 'core'
'''
"""
TRAILING_COMMENT_METADATA = b"""
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'} # { Testing }
"""
MULTIPLE_STATEMENTS_METADATA = b"""
DOCUMENTATION = "" ; ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'} ; RETURNS = ""
"""
EMBEDDED_COMMENT_METADATA = b"""
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
# { Testing }
'supported_by': 'core'}
"""
HASH_SYMBOL_METADATA = b"""
ANSIBLE_METADATA = {'metadata_version': '1.1 # 4',
'status': ['stableinterface'],
'supported_by': 'core # Testing '}
"""
HASH_SYMBOL_METADATA = b"""
ANSIBLE_METADATA = {'metadata_version': '1.1 # 4',
'status': ['stableinterface'],
'supported_by': 'core # Testing '}
"""
HASH_COMBO_METADATA = b"""
ANSIBLE_METADATA = {'metadata_version': '1.1 # 4',
'status': ['stableinterface'],
# { Testing }
'supported_by': 'core'} # { Testing }
"""
METADATA = {'metadata_version': '1.1', 'status': ['stableinterface'], 'supported_by': 'core'}
HASH_SYMBOL_METADATA = {'metadata_version': '1.1 # 4', 'status': ['stableinterface'], 'supported_by': 'core'}
METADATA_EXAMPLES = (
# Standard import
(LICENSE + FUTURE_IMPORTS + STANDARD_METADATA + REGULAR_IMPORTS,
(METADATA, 5, 0, 7, 42, ['ANSIBLE_METADATA'])),
# Metadata at end of file
(LICENSE + FUTURE_IMPORTS + REGULAR_IMPORTS + STANDARD_METADATA.rstrip(),
(METADATA, 8, 0, 10, 42, ['ANSIBLE_METADATA'])),
# Metadata at beginning of file
(STANDARD_METADATA + LICENSE + REGULAR_IMPORTS,
(METADATA, 1, 0, 3, 42, ['ANSIBLE_METADATA'])),
# Standard import with a trailing comment
(LICENSE + FUTURE_IMPORTS + TRAILING_COMMENT_METADATA + REGULAR_IMPORTS,
(METADATA, 5, 0, 7, 42, ['ANSIBLE_METADATA'])),
# Metadata at end of file with a trailing comment
(LICENSE + FUTURE_IMPORTS + REGULAR_IMPORTS + TRAILING_COMMENT_METADATA.rstrip(),
(METADATA, 8, 0, 10, 42, ['ANSIBLE_METADATA'])),
# Metadata at beginning of file with a trailing comment
(TRAILING_COMMENT_METADATA + LICENSE + REGULAR_IMPORTS,
(METADATA, 1, 0, 3, 42, ['ANSIBLE_METADATA'])),
# FIXME: Current code cannot handle multiple statements on the same line.
# This is bad style so we're just going to ignore it for now
# Standard import with other statements on the same line
# (LICENSE + FUTURE_IMPORTS + MULTIPLE_STATEMENTS_METADATA + REGULAR_IMPORTS,
# (METADATA, 5, 0, 7, 42, ['ANSIBLE_METADATA'])),
# Metadata at end of file with other statements on the same line
# (LICENSE + FUTURE_IMPORTS + REGULAR_IMPORTS + MULTIPLE_STATEMENTS_METADATA.rstrip(),
# (METADATA, 8, 0, 10, 42, ['ANSIBLE_METADATA'])),
# Metadata at beginning of file with other statements on the same line
# (MULTIPLE_STATEMENTS_METADATA + LICENSE + REGULAR_IMPORTS,
# (METADATA, 1, 0, 3, 42, ['ANSIBLE_METADATA'])),
# Standard import with comment inside the metadata
(LICENSE + FUTURE_IMPORTS + EMBEDDED_COMMENT_METADATA + REGULAR_IMPORTS,
(METADATA, 5, 0, 8, 42, ['ANSIBLE_METADATA'])),
# Metadata at end of file with comment inside the metadata
(LICENSE + FUTURE_IMPORTS + REGULAR_IMPORTS + EMBEDDED_COMMENT_METADATA.rstrip(),
(METADATA, 8, 0, 11, 42, ['ANSIBLE_METADATA'])),
# Metadata at beginning of file with comment inside the metadata
(EMBEDDED_COMMENT_METADATA + LICENSE + REGULAR_IMPORTS,
(METADATA, 1, 0, 4, 42, ['ANSIBLE_METADATA'])),
# FIXME: Current code cannot handle hash symbols in the last element of
# the metadata. Fortunately, the metadata currently fully specifies all
# the strings inside of metadata and none of them can contain a hash.
# Need to fix this to future-proof it against strings containing hashes
# Standard import with hash symbol in metadata
# (LICENSE + FUTURE_IMPORTS + HASH_SYMBOL_METADATA + REGULAR_IMPORTS,
# (HASH_SYMBOL_METADATA, 5, 0, 7, 53, ['ANSIBLE_METADATA'])),
# Metadata at end of file with hash symbol in metadata
# (LICENSE + FUTURE_IMPORTS + REGULAR_IMPORTS + HASH_SYMBOL_HASH_SYMBOL_METADATA.rstrip(),
# (HASH_SYMBOL_METADATA, 8, 0, 10, 53, ['ANSIBLE_METADATA'])),
# Metadata at beginning of file with hash symbol in metadata
# (HASH_SYMBOL_HASH_SYMBOL_METADATA + LICENSE + REGULAR_IMPORTS,
# (HASH_SYMBOL_METADATA, 1, 0, 3, 53, ['ANSIBLE_METADATA'])),
# Standard import with a bunch of hashes everywhere
(LICENSE + FUTURE_IMPORTS + HASH_COMBO_METADATA + REGULAR_IMPORTS,
(HASH_SYMBOL_METADATA, 5, 0, 8, 42, ['ANSIBLE_METADATA'])),
# Metadata at end of file with a bunch of hashes everywhere
(LICENSE + FUTURE_IMPORTS + REGULAR_IMPORTS + HASH_COMBO_METADATA.rstrip(),
(HASH_SYMBOL_METADATA, 8, 0, 11, 42, ['ANSIBLE_METADATA'])),
# Metadata at beginning of file with a bunch of hashes everywhere
(HASH_COMBO_METADATA + LICENSE + REGULAR_IMPORTS,
(HASH_SYMBOL_METADATA, 1, 0, 4, 42, ['ANSIBLE_METADATA'])),
# Standard import with a junk ANSIBLE_METADATA as well
(LICENSE + FUTURE_IMPORTS + b"\nANSIBLE_METADATA = 10\n" + HASH_COMBO_METADATA + REGULAR_IMPORTS,
(HASH_SYMBOL_METADATA, 7, 0, 10, 42, ['ANSIBLE_METADATA'])),
)
# FIXME: String/yaml metadata is not implemented yet. Need more test cases once it is implemented
STRING_METADATA_EXAMPLES = (
# Standard import
(LICENSE + FUTURE_IMPORTS + TEXT_STD_METADATA + REGULAR_IMPORTS,
(METADATA, 5, 0, 10, 3, ['ANSIBLE_METADATA'])),
# Metadata at end of file
(LICENSE + FUTURE_IMPORTS + REGULAR_IMPORTS + TEXT_STD_METADATA.rstrip(),
(METADATA, 8, 0, 13, 3, ['ANSIBLE_METADATA'])),
# Metadata at beginning of file
(TEXT_STD_METADATA + LICENSE + REGULAR_IMPORTS,
(METADATA, 1, 0, 6, 3, ['ANSIBLE_METADATA'])),
# Standard import
(LICENSE + FUTURE_IMPORTS + BYTES_STD_METADATA + REGULAR_IMPORTS,
(METADATA, 5, 0, 10, 3, ['ANSIBLE_METADATA'])),
# Metadata at end of file
(LICENSE + FUTURE_IMPORTS + REGULAR_IMPORTS + BYTES_STD_METADATA.rstrip(),
(METADATA, 8, 0, 13, 3, ['ANSIBLE_METADATA'])),
# Metadata at beginning of file
(BYTES_STD_METADATA + LICENSE + REGULAR_IMPORTS,
(METADATA, 1, 0, 6, 3, ['ANSIBLE_METADATA'])),
)
@pytest.mark.parametrize("code, expected", METADATA_EXAMPLES)
def test_dict_metadata(code, expected):
assert md.extract_metadata(module_data=code, offsets=True) == expected
@pytest.mark.parametrize("code, expected", STRING_METADATA_EXAMPLES)
def test_string_metadata(code, expected):
# FIXME: String/yaml metadata is not implemented yet.
with pytest.raises(NotImplementedError):
assert md.extract_metadata(module_data=code, offsets=True) == expected
def test_required_params():
with pytest.raises(TypeError, message='One of module_ast or module_data must be given'):
assert md.extract_metadata()
def test_module_data_param_given_with_offset():
with pytest.raises(TypeError, message='If offsets is True then module_data must also be given'):
assert md.extract_metadata(module_ast='something', offsets=True)
def test_invalid_dict_metadata():
with pytest.raises(SyntaxError):
assert md.extract_metadata(module_data=LICENSE + FUTURE_IMPORTS + b'ANSIBLE_METADATA={"metadata_version": "1.1",\n' + REGULAR_IMPORTS)
with pytest.raises(md.ParseError, message='Unable to find the end of dictionary'):
assert md.extract_metadata(module_ast=ast.parse(LICENSE + FUTURE_IMPORTS + b'ANSIBLE_METADATA={"metadata_version": "1.1"}\n' + REGULAR_IMPORTS),
module_data=LICENSE + FUTURE_IMPORTS + b'ANSIBLE_METADATA={"metadata_version": "1.1",\n' + REGULAR_IMPORTS,
offsets=True)
def test_multiple_statements_limitation():
with pytest.raises(md.ParseError, message='Multiple statements per line confuses the module metadata parser.'):
assert md.extract_metadata(module_data=LICENSE + FUTURE_IMPORTS + b'ANSIBLE_METADATA={"metadata_version": "1.1"}; a=b\n' + REGULAR_IMPORTS,
offsets=True)
|
markYoungH/chromium.src | refs/heads/nw12 | mojo/public/tools/bindings/pylib/mojom_tests/parse/run_parser.py | 99 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Simple testing utility to just run the mojom parser."""
import os.path
import sys
sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)),
os.path.pardir, os.path.pardir))
from mojom.parse.parser import Parse, ParseError
def main(argv):
if len(argv) < 2:
print "usage: %s filename" % argv[0]
return 0
for filename in argv[1:]:
with open(filename) as f:
print "%s:" % filename
try:
print Parse(f.read(), filename)
except ParseError, e:
print e
return 1
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
opensourcechipspark/platform_external_chromium_org | refs/heads/master | third_party/tlslite/tlslite/__init__.py | 409 | """
TLS Lite is a free python library that implements SSL v3, TLS v1, and
TLS v1.1. TLS Lite supports non-traditional authentication methods
such as SRP, shared keys, and cryptoIDs, in addition to X.509
certificates. TLS Lite is pure python, however it can access OpenSSL,
cryptlib, pycrypto, and GMPY for faster crypto operations. TLS Lite
integrates with httplib, xmlrpclib, poplib, imaplib, smtplib,
SocketServer, asyncore, and Twisted.
To use, do::
from tlslite.api import *
Then use the L{tlslite.TLSConnection.TLSConnection} class with a socket,
or use one of the integration classes in L{tlslite.integration}.
@version: 0.3.8
"""
__version__ = "0.3.8"
__all__ = ["api",
"BaseDB",
"Checker",
"constants",
"errors",
"FileObject",
"HandshakeSettings",
"mathtls",
"messages",
"Session",
"SessionCache",
"SharedKeyDB",
"TLSConnection",
"TLSRecordLayer",
"VerifierDB",
"X509",
"X509CertChain",
"integration",
"utils"]
|
robertwb/incubator-beam | refs/heads/master | sdks/python/apache_beam/testing/benchmarks/chicago_taxi/trainer/task.py | 5 | # Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trainer for the chicago_taxi demo."""
# pytype: skip-file
import argparse
import os
import tensorflow as tf
import tensorflow_model_analysis as tfma
import tensorflow_transform as tft
from trainer import model
from trainer import taxi
SERVING_MODEL_DIR = 'serving_model_dir'
EVAL_MODEL_DIR = 'eval_model_dir'
TRAIN_BATCH_SIZE = 40
EVAL_BATCH_SIZE = 40
# Number of nodes in the first layer of the DNN
FIRST_DNN_LAYER_SIZE = 100
NUM_DNN_LAYERS = 4
DNN_DECAY_FACTOR = 0.7
def train_and_maybe_evaluate(hparams):
"""Run the training and evaluate using the high level API.
Args:
hparams: Holds hyperparameters used to train the model as name/value pairs.
Returns:
The estimator that was used for training (and maybe eval)
"""
schema = taxi.read_schema(hparams.schema_file)
tf_transform_output = tft.TFTransformOutput(hparams.tf_transform_dir)
train_input = lambda: model.input_fn(
hparams.train_files, tf_transform_output, batch_size=TRAIN_BATCH_SIZE)
eval_input = lambda: model.input_fn(
hparams.eval_files, tf_transform_output, batch_size=EVAL_BATCH_SIZE)
train_spec = tf.estimator.TrainSpec(
train_input, max_steps=hparams.train_steps)
serving_receiver_fn = lambda: model.example_serving_receiver_fn(
tf_transform_output, schema)
exporter = tf.estimator.FinalExporter('chicago-taxi', serving_receiver_fn)
eval_spec = tf.estimator.EvalSpec(
eval_input,
steps=hparams.eval_steps,
exporters=[exporter],
name='chicago-taxi-eval')
run_config = tf.estimator.RunConfig(
save_checkpoints_steps=999, keep_checkpoint_max=1)
serving_model_dir = os.path.join(hparams.output_dir, SERVING_MODEL_DIR)
run_config = run_config.replace(model_dir=serving_model_dir)
estimator = model.build_estimator(
tf_transform_output,
# Construct layers sizes with exponetial decay
hidden_units=[
max(2, int(FIRST_DNN_LAYER_SIZE * DNN_DECAY_FACTOR**i))
for i in range(NUM_DNN_LAYERS)
],
config=run_config)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
return estimator
def run_experiment(hparams):
"""Train the model then export it for tf.model_analysis evaluation.
Args:
hparams: Holds hyperparameters used to train the model as name/value pairs.
"""
estimator = train_and_maybe_evaluate(hparams)
schema = taxi.read_schema(hparams.schema_file)
tf_transform_output = tft.TFTransformOutput(hparams.tf_transform_dir)
# Save a model for tfma eval
eval_model_dir = os.path.join(hparams.output_dir, EVAL_MODEL_DIR)
receiver_fn = lambda: model.eval_input_receiver_fn(
tf_transform_output, schema)
tfma.export.export_eval_savedmodel(
estimator=estimator,
export_dir_base=eval_model_dir,
eval_input_receiver_fn=receiver_fn)
def main():
parser = argparse.ArgumentParser()
# Input Arguments
parser.add_argument(
'--train-files',
help='GCS or local paths to training data',
nargs='+',
required=True)
parser.add_argument(
'--tf-transform-dir',
help='Tf-transform directory with model from preprocessing step',
required=True)
parser.add_argument(
'--output-dir',
help="""\
Directory under which which the serving model (under /serving_model_dir)\
and the tf-mode-analysis model (under /eval_model_dir) will be written\
""",
required=True)
parser.add_argument(
'--eval-files',
help='GCS or local paths to evaluation data',
nargs='+',
required=True)
# Training arguments
parser.add_argument(
'--job-dir',
help='GCS location to write checkpoints and export models',
required=True)
# Argument to turn on all logging
parser.add_argument(
'--verbosity',
choices=['DEBUG', 'ERROR', 'FATAL', 'INFO', 'WARN'],
default='INFO',
)
# Experiment arguments
parser.add_argument(
'--train-steps',
help='Count of steps to run the training job for',
required=True,
type=int)
parser.add_argument(
'--eval-steps',
help='Number of steps to run evalution for at each checkpoint',
default=100,
type=int)
parser.add_argument(
'--schema-file', help='File holding the schema for the input data')
args = parser.parse_args()
# Set python level verbosity
tf.compat.v1.logging.set_verbosity(args.verbosity)
# Set C++ Graph Execution level verbosity
os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(
getattr(tf.compat.v1.logging, args.verbosity) / 10)
# Run the training job
hparams = tf.contrib.training.HParams(**args.__dict__)
run_experiment(hparams)
if __name__ == '__main__':
main()
|
Moriadry/tensorflow | refs/heads/master | tensorflow/contrib/boosted_trees/python/utils/losses.py | 21 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Losses for Gtflow Estimator and Batch Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
def per_example_logistic_loss(labels, weights, predictions):
"""Logistic loss given labels, example weights and predictions.
Args:
labels: Rank 2 (N, 1) tensor of per-example labels.
weights: Rank 2 (N, 1) tensor of per-example weights.
predictions: Rank 2 (N, 1) tensor of per-example predictions.
Returns:
loss: A Rank 2 (N, 1) tensor of per-example logistic loss.
update_op: An update operation to update the loss's internal state.
"""
labels = math_ops.to_float(labels)
unweighted_loss = nn.sigmoid_cross_entropy_with_logits(
labels=labels, logits=predictions)
return unweighted_loss * weights, control_flow_ops.no_op()
# This is classical form of Maximum entropy loss, that is twice differentiable
# (sparse_softmax_cross_entropy which is what we go for is not twice
# differentiable).
def per_example_maxent_loss(labels, weights, logits, num_classes, eps=1e-15):
"""Maximum entropy loss for multiclass problems.
Maximum entropy is a generalization of logistic loss for the case when more
than 2 classes are present.
Args:
labels: Rank 2 (N, 1) or Rank 1 (N) tensor of per-example labels.
weights: Rank 2 (N, 1) tensor of per-example weights.
logits: Rank 2 (N, K) tensor of per-example predictions, K - num of
classes.
num_classes: number of classes in classification task. Used to expand label
indices into one-hot encodings.
eps: tolerance, used as a minimum possible value.
Returns:
loss: A Rank 2 (N, 1) tensor of per-example maxent loss
update_op: An update operation to update the loss's internal state.
"""
labels = math_ops.to_int64(labels)
# If labels are of rank 1, make them rank 2.
labels_shape = labels.get_shape()
if len(labels_shape) != 2:
labels = array_ops.expand_dims(labels, 1)
# Labels are indices of classes, convert them to one hot encodings.
target_one_hot = array_ops.one_hot(indices=labels, depth=num_classes)
labels = math_ops.reduce_sum(
input_tensor=target_one_hot, reduction_indices=[1])
labels = math_ops.to_float(labels)
# Calculate softmax probabilities for each class.
unnormalized_probs = math_ops.exp(logits)
normalizers = math_ops.reduce_sum(unnormalized_probs, 1, keep_dims=True)
softmax_predictions = math_ops.divide(unnormalized_probs,
math_ops.add(normalizers, eps))
# Pull out the probabilities for real label.
probs_for_real_class = math_ops.reduce_sum(labels * softmax_predictions, 1)
# Add handling for values near 0 and 1.
zeros = array_ops.zeros_like(probs_for_real_class, dtype=logits.dtype) + eps
one_minus_eps = array_ops.ones_like(
probs_for_real_class, dtype=logits.dtype) - eps
# Take maximum(eps, pred)
cond = (probs_for_real_class >= eps)
probs_for_real_class = array_ops.where(cond, probs_for_real_class, zeros)
# Take minimum(1-eps, pred)
cond = (probs_for_real_class <= 1 - eps)
probs_for_real_class = array_ops.where(cond, probs_for_real_class,
one_minus_eps)
unweighted_loss = array_ops.expand_dims(-math_ops.log(probs_for_real_class),
1)
return unweighted_loss * weights, control_flow_ops.no_op()
def per_example_squared_loss(labels, weights, predictions):
"""Squared loss given labels, example weights and predictions.
Args:
labels: Rank 2 (N, D) tensor of per-example labels.
weights: Rank 2 (N, 1) tensor of per-example weights.
predictions: Rank 2 (N, D) tensor of per-example predictions.
Returns:
loss: A Rank 2 (N, 1) tensor of per-example squared loss.
update_op: An update operation to update the loss's internal state.
"""
unweighted_loss = math_ops.reduce_sum(
math_ops.square(predictions - labels), 1, keep_dims=True)
return unweighted_loss * weights, control_flow_ops.no_op()
def per_example_exp_loss(labels, weights, predictions, name=None, eps=0.1):
"""Exponential loss given labels, example weights and predictions.
Note that this is only for binary classification.
If logistic loss tries to make sure that the classifier is certain of its
predictions, exp loss says: "as long as it got it correct, even barely, i
don't care". Can be used on noisy data, or when you don't care about getting
the actual probabilities from the model, just the correct label.
The loss returns is exp(-targets*modified_predictions), where
modified_predictions are 1 if sigmoid is >= 0.5+eps (eg we predict positive
class), -1 if sigmoid < 0.5-eps (e.g. we predict negative class) and ax+b in
the interval 0.5-eps, 0.5+eps, where a = 1/eps, b=1/(2eps).
Args:
labels: Rank 2 (N, D) tensor of per-example labels.
weights: Rank 2 (N, 1) tensor of per-example weights.
predictions: Rank 2 (N, D) tensor of per-example predictions.
name: A name for the operation (optional).
eps: For the range (0.5-eps, 0.5+eps) we set the predictions to be ax+b.
Returns:
loss: A Rank 2 (N, 1) tensor of per-example exp loss
update_op: An update operation to update the loss's internal state.
"""
def exp_with_logits(name, eps, labels=None, logits=None):
"""Computes exponential loss given `logits`.
The loss returns is exp(-targets*modified_predictions), where
modified_predictions are 1 if sigmoid is >= 0.5+eps (eg we predict positive
class), -1 if sigmoid < 0.5-eps (e.g. we predict negative class) and ax+b in
the interval 0.5-eps, 0.5+eps, where a = 1/eps, b=1/(2eps).
Args:
name: A name for the operation (optional).
eps: For the range (0.5-eps, 0.5+eps) we set the predictions to be ax+b.
labels: A `Tensor` of the same type and shape as `logits`.
logits: A `Tensor` of type `float32` or `float64`.
Returns:
A `Tensor` of the same shape as `logits` with the componentwise
exponential losses.
Raises:
ValueError: If `logits` and `labels` do not have the same shape.
"""
with ops.name_scope(name, "exp_loss", [logits, labels]) as name:
logits = ops.convert_to_tensor(logits, name="logits")
labels = ops.convert_to_tensor(labels, name="labels")
try:
labels.get_shape().merge_with(logits.get_shape())
except ValueError:
raise ValueError("logits and labels must have the same shape (%s vs %s)"
% (logits.get_shape(), labels.get_shape()))
# Default threshold to switch between classes
zeros = array_ops.zeros_like(logits, dtype=logits.dtype)
ones = array_ops.ones_like(logits, dtype=logits.dtype)
neg_ones = -array_ops.ones_like(logits, dtype=logits.dtype)
# Convert labels to 1 and -1
cond_labels = (labels > zeros)
labels_converted = array_ops.where(cond_labels, ones, neg_ones)
# Convert predictions to 1 and -1
# The loss we build is min(1, max(-1,ax+b))
# where a=1/eps, b=-1/2eps.
a = 1.0 / eps
b = -1.0 / 2 / eps
probs = math_ops.sigmoid(logits)
y = a * probs + b
# Build max(-1, ax+b)
cond = (y < -1)
max_res = array_ops.where(cond, neg_ones, y)
# Build min part
cond = (max_res > 1)
min_res = array_ops.where(cond, ones, max_res)
preds_converted = min_res
return math_ops.exp(-preds_converted * labels_converted)
labels = math_ops.to_float(labels)
unweighted_loss = exp_with_logits(
name=name, eps=eps, labels=labels, logits=predictions)
return unweighted_loss * weights, control_flow_ops.no_op()
|
britcey/ansible | refs/heads/devel | lib/ansible/modules/cloud/openstack/os_zone.py | 22 | #!/usr/bin/python
# Copyright (c) 2016 Hewlett-Packard Enterprise
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_zone
short_description: Manage OpenStack DNS zones
extends_documentation_fragment: openstack
version_added: "2.2"
author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
description:
- Manage OpenStack DNS zones. Zones can be created, deleted or
updated. Only the I(email), I(description), I(ttl) and I(masters) values
can be updated.
options:
name:
description:
- Zone name
required: true
zone_type:
description:
- Zone type
choices: [primary, secondary]
default: None
email:
description:
- Email of the zone owner (only applies if zone_type is primary)
required: false
description:
description:
- Zone description
required: false
default: None
ttl:
description:
- TTL (Time To Live) value in seconds
required: false
default: None
masters:
description:
- Master nameservers (only applies if zone_type is secondary)
required: false
default: None
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
availability_zone:
description:
- Ignored. Present for backwards compatability
required: false
requirements:
- "python >= 2.6"
- "shade"
'''
EXAMPLES = '''
# Create a zone named "example.net"
- os_zone:
cloud: mycloud
state: present
name: example.net.
zone_type: primary
email: test@example.net
description: Test zone
ttl: 3600
# Update the TTL on existing "example.net." zone
- os_zone:
cloud: mycloud
state: present
name: example.net.
ttl: 7200
# Delete zone named "example.net."
- os_zone:
cloud: mycloud
state: absent
name: example.net.
'''
RETURN = '''
zone:
description: Dictionary describing the zone.
returned: On success when I(state) is 'present'.
type: complex
contains:
id:
description: Unique zone ID
type: string
sample: "c1c530a3-3619-46f3-b0f6-236927b2618c"
name:
description: Zone name
type: string
sample: "example.net."
type:
description: Zone type
type: string
sample: "PRIMARY"
email:
description: Zone owner email
type: string
sample: "test@example.net"
description:
description: Zone description
type: string
sample: "Test description"
ttl:
description: Zone TTL value
type: int
sample: 3600
masters:
description: Zone master nameservers
type: list
sample: []
'''
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
from distutils.version import StrictVersion
def _system_state_change(state, email, description, ttl, masters, zone):
if state == 'present':
if not zone:
return True
if email is not None and zone.email != email:
return True
if description is not None and zone.description != description:
return True
if ttl is not None and zone.ttl != ttl:
return True
if masters is not None and zone.masters != masters:
return True
if state == 'absent' and zone:
return True
return False
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=True),
zone_type=dict(required=False, choice=['primary', 'secondary']),
email=dict(required=False, default=None),
description=dict(required=False, default=None),
ttl=dict(required=False, default=None, type='int'),
masters=dict(required=False, default=None, type='list'),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
if StrictVersion(shade.__version__) < StrictVersion('1.8.0'):
module.fail_json(msg="To utilize this module, the installed version of"
"the shade library MUST be >=1.8.0")
name = module.params.get('name')
state = module.params.get('state')
try:
cloud = shade.openstack_cloud(**module.params)
zone = cloud.get_zone(name)
if state == 'present':
zone_type = module.params.get('zone_type')
email = module.params.get('email')
description = module.params.get('description')
ttl = module.params.get('ttl')
masters = module.params.get('masters')
if module.check_mode:
module.exit_json(changed=_system_state_change(state, email,
description, ttl,
masters, zone))
if zone is None:
zone = cloud.create_zone(
name=name, zone_type=zone_type, email=email,
description=description, ttl=ttl, masters=masters)
changed = True
else:
if masters is None:
masters = []
pre_update_zone = zone
changed = _system_state_change(state, email,
description, ttl,
masters, pre_update_zone)
if changed:
zone = cloud.update_zone(
name, email=email,
description=description,
ttl=ttl, masters=masters)
module.exit_json(changed=changed, zone=zone)
elif state == 'absent':
if module.check_mode:
module.exit_json(changed=_system_state_change(state, None,
None, None,
None, zone))
if zone is None:
changed=False
else:
cloud.delete_zone(name)
changed=True
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
|
signed/intellij-community | refs/heads/master | python/lib/Lib/htmlentitydefs.py | 390 | """HTML character entity references."""
# maps the HTML entity name to the Unicode codepoint
name2codepoint = {
'AElig': 0x00c6, # latin capital letter AE = latin capital ligature AE, U+00C6 ISOlat1
'Aacute': 0x00c1, # latin capital letter A with acute, U+00C1 ISOlat1
'Acirc': 0x00c2, # latin capital letter A with circumflex, U+00C2 ISOlat1
'Agrave': 0x00c0, # latin capital letter A with grave = latin capital letter A grave, U+00C0 ISOlat1
'Alpha': 0x0391, # greek capital letter alpha, U+0391
'Aring': 0x00c5, # latin capital letter A with ring above = latin capital letter A ring, U+00C5 ISOlat1
'Atilde': 0x00c3, # latin capital letter A with tilde, U+00C3 ISOlat1
'Auml': 0x00c4, # latin capital letter A with diaeresis, U+00C4 ISOlat1
'Beta': 0x0392, # greek capital letter beta, U+0392
'Ccedil': 0x00c7, # latin capital letter C with cedilla, U+00C7 ISOlat1
'Chi': 0x03a7, # greek capital letter chi, U+03A7
'Dagger': 0x2021, # double dagger, U+2021 ISOpub
'Delta': 0x0394, # greek capital letter delta, U+0394 ISOgrk3
'ETH': 0x00d0, # latin capital letter ETH, U+00D0 ISOlat1
'Eacute': 0x00c9, # latin capital letter E with acute, U+00C9 ISOlat1
'Ecirc': 0x00ca, # latin capital letter E with circumflex, U+00CA ISOlat1
'Egrave': 0x00c8, # latin capital letter E with grave, U+00C8 ISOlat1
'Epsilon': 0x0395, # greek capital letter epsilon, U+0395
'Eta': 0x0397, # greek capital letter eta, U+0397
'Euml': 0x00cb, # latin capital letter E with diaeresis, U+00CB ISOlat1
'Gamma': 0x0393, # greek capital letter gamma, U+0393 ISOgrk3
'Iacute': 0x00cd, # latin capital letter I with acute, U+00CD ISOlat1
'Icirc': 0x00ce, # latin capital letter I with circumflex, U+00CE ISOlat1
'Igrave': 0x00cc, # latin capital letter I with grave, U+00CC ISOlat1
'Iota': 0x0399, # greek capital letter iota, U+0399
'Iuml': 0x00cf, # latin capital letter I with diaeresis, U+00CF ISOlat1
'Kappa': 0x039a, # greek capital letter kappa, U+039A
'Lambda': 0x039b, # greek capital letter lambda, U+039B ISOgrk3
'Mu': 0x039c, # greek capital letter mu, U+039C
'Ntilde': 0x00d1, # latin capital letter N with tilde, U+00D1 ISOlat1
'Nu': 0x039d, # greek capital letter nu, U+039D
'OElig': 0x0152, # latin capital ligature OE, U+0152 ISOlat2
'Oacute': 0x00d3, # latin capital letter O with acute, U+00D3 ISOlat1
'Ocirc': 0x00d4, # latin capital letter O with circumflex, U+00D4 ISOlat1
'Ograve': 0x00d2, # latin capital letter O with grave, U+00D2 ISOlat1
'Omega': 0x03a9, # greek capital letter omega, U+03A9 ISOgrk3
'Omicron': 0x039f, # greek capital letter omicron, U+039F
'Oslash': 0x00d8, # latin capital letter O with stroke = latin capital letter O slash, U+00D8 ISOlat1
'Otilde': 0x00d5, # latin capital letter O with tilde, U+00D5 ISOlat1
'Ouml': 0x00d6, # latin capital letter O with diaeresis, U+00D6 ISOlat1
'Phi': 0x03a6, # greek capital letter phi, U+03A6 ISOgrk3
'Pi': 0x03a0, # greek capital letter pi, U+03A0 ISOgrk3
'Prime': 0x2033, # double prime = seconds = inches, U+2033 ISOtech
'Psi': 0x03a8, # greek capital letter psi, U+03A8 ISOgrk3
'Rho': 0x03a1, # greek capital letter rho, U+03A1
'Scaron': 0x0160, # latin capital letter S with caron, U+0160 ISOlat2
'Sigma': 0x03a3, # greek capital letter sigma, U+03A3 ISOgrk3
'THORN': 0x00de, # latin capital letter THORN, U+00DE ISOlat1
'Tau': 0x03a4, # greek capital letter tau, U+03A4
'Theta': 0x0398, # greek capital letter theta, U+0398 ISOgrk3
'Uacute': 0x00da, # latin capital letter U with acute, U+00DA ISOlat1
'Ucirc': 0x00db, # latin capital letter U with circumflex, U+00DB ISOlat1
'Ugrave': 0x00d9, # latin capital letter U with grave, U+00D9 ISOlat1
'Upsilon': 0x03a5, # greek capital letter upsilon, U+03A5 ISOgrk3
'Uuml': 0x00dc, # latin capital letter U with diaeresis, U+00DC ISOlat1
'Xi': 0x039e, # greek capital letter xi, U+039E ISOgrk3
'Yacute': 0x00dd, # latin capital letter Y with acute, U+00DD ISOlat1
'Yuml': 0x0178, # latin capital letter Y with diaeresis, U+0178 ISOlat2
'Zeta': 0x0396, # greek capital letter zeta, U+0396
'aacute': 0x00e1, # latin small letter a with acute, U+00E1 ISOlat1
'acirc': 0x00e2, # latin small letter a with circumflex, U+00E2 ISOlat1
'acute': 0x00b4, # acute accent = spacing acute, U+00B4 ISOdia
'aelig': 0x00e6, # latin small letter ae = latin small ligature ae, U+00E6 ISOlat1
'agrave': 0x00e0, # latin small letter a with grave = latin small letter a grave, U+00E0 ISOlat1
'alefsym': 0x2135, # alef symbol = first transfinite cardinal, U+2135 NEW
'alpha': 0x03b1, # greek small letter alpha, U+03B1 ISOgrk3
'amp': 0x0026, # ampersand, U+0026 ISOnum
'and': 0x2227, # logical and = wedge, U+2227 ISOtech
'ang': 0x2220, # angle, U+2220 ISOamso
'aring': 0x00e5, # latin small letter a with ring above = latin small letter a ring, U+00E5 ISOlat1
'asymp': 0x2248, # almost equal to = asymptotic to, U+2248 ISOamsr
'atilde': 0x00e3, # latin small letter a with tilde, U+00E3 ISOlat1
'auml': 0x00e4, # latin small letter a with diaeresis, U+00E4 ISOlat1
'bdquo': 0x201e, # double low-9 quotation mark, U+201E NEW
'beta': 0x03b2, # greek small letter beta, U+03B2 ISOgrk3
'brvbar': 0x00a6, # broken bar = broken vertical bar, U+00A6 ISOnum
'bull': 0x2022, # bullet = black small circle, U+2022 ISOpub
'cap': 0x2229, # intersection = cap, U+2229 ISOtech
'ccedil': 0x00e7, # latin small letter c with cedilla, U+00E7 ISOlat1
'cedil': 0x00b8, # cedilla = spacing cedilla, U+00B8 ISOdia
'cent': 0x00a2, # cent sign, U+00A2 ISOnum
'chi': 0x03c7, # greek small letter chi, U+03C7 ISOgrk3
'circ': 0x02c6, # modifier letter circumflex accent, U+02C6 ISOpub
'clubs': 0x2663, # black club suit = shamrock, U+2663 ISOpub
'cong': 0x2245, # approximately equal to, U+2245 ISOtech
'copy': 0x00a9, # copyright sign, U+00A9 ISOnum
'crarr': 0x21b5, # downwards arrow with corner leftwards = carriage return, U+21B5 NEW
'cup': 0x222a, # union = cup, U+222A ISOtech
'curren': 0x00a4, # currency sign, U+00A4 ISOnum
'dArr': 0x21d3, # downwards double arrow, U+21D3 ISOamsa
'dagger': 0x2020, # dagger, U+2020 ISOpub
'darr': 0x2193, # downwards arrow, U+2193 ISOnum
'deg': 0x00b0, # degree sign, U+00B0 ISOnum
'delta': 0x03b4, # greek small letter delta, U+03B4 ISOgrk3
'diams': 0x2666, # black diamond suit, U+2666 ISOpub
'divide': 0x00f7, # division sign, U+00F7 ISOnum
'eacute': 0x00e9, # latin small letter e with acute, U+00E9 ISOlat1
'ecirc': 0x00ea, # latin small letter e with circumflex, U+00EA ISOlat1
'egrave': 0x00e8, # latin small letter e with grave, U+00E8 ISOlat1
'empty': 0x2205, # empty set = null set = diameter, U+2205 ISOamso
'emsp': 0x2003, # em space, U+2003 ISOpub
'ensp': 0x2002, # en space, U+2002 ISOpub
'epsilon': 0x03b5, # greek small letter epsilon, U+03B5 ISOgrk3
'equiv': 0x2261, # identical to, U+2261 ISOtech
'eta': 0x03b7, # greek small letter eta, U+03B7 ISOgrk3
'eth': 0x00f0, # latin small letter eth, U+00F0 ISOlat1
'euml': 0x00eb, # latin small letter e with diaeresis, U+00EB ISOlat1
'euro': 0x20ac, # euro sign, U+20AC NEW
'exist': 0x2203, # there exists, U+2203 ISOtech
'fnof': 0x0192, # latin small f with hook = function = florin, U+0192 ISOtech
'forall': 0x2200, # for all, U+2200 ISOtech
'frac12': 0x00bd, # vulgar fraction one half = fraction one half, U+00BD ISOnum
'frac14': 0x00bc, # vulgar fraction one quarter = fraction one quarter, U+00BC ISOnum
'frac34': 0x00be, # vulgar fraction three quarters = fraction three quarters, U+00BE ISOnum
'frasl': 0x2044, # fraction slash, U+2044 NEW
'gamma': 0x03b3, # greek small letter gamma, U+03B3 ISOgrk3
'ge': 0x2265, # greater-than or equal to, U+2265 ISOtech
'gt': 0x003e, # greater-than sign, U+003E ISOnum
'hArr': 0x21d4, # left right double arrow, U+21D4 ISOamsa
'harr': 0x2194, # left right arrow, U+2194 ISOamsa
'hearts': 0x2665, # black heart suit = valentine, U+2665 ISOpub
'hellip': 0x2026, # horizontal ellipsis = three dot leader, U+2026 ISOpub
'iacute': 0x00ed, # latin small letter i with acute, U+00ED ISOlat1
'icirc': 0x00ee, # latin small letter i with circumflex, U+00EE ISOlat1
'iexcl': 0x00a1, # inverted exclamation mark, U+00A1 ISOnum
'igrave': 0x00ec, # latin small letter i with grave, U+00EC ISOlat1
'image': 0x2111, # blackletter capital I = imaginary part, U+2111 ISOamso
'infin': 0x221e, # infinity, U+221E ISOtech
'int': 0x222b, # integral, U+222B ISOtech
'iota': 0x03b9, # greek small letter iota, U+03B9 ISOgrk3
'iquest': 0x00bf, # inverted question mark = turned question mark, U+00BF ISOnum
'isin': 0x2208, # element of, U+2208 ISOtech
'iuml': 0x00ef, # latin small letter i with diaeresis, U+00EF ISOlat1
'kappa': 0x03ba, # greek small letter kappa, U+03BA ISOgrk3
'lArr': 0x21d0, # leftwards double arrow, U+21D0 ISOtech
'lambda': 0x03bb, # greek small letter lambda, U+03BB ISOgrk3
'lang': 0x2329, # left-pointing angle bracket = bra, U+2329 ISOtech
'laquo': 0x00ab, # left-pointing double angle quotation mark = left pointing guillemet, U+00AB ISOnum
'larr': 0x2190, # leftwards arrow, U+2190 ISOnum
'lceil': 0x2308, # left ceiling = apl upstile, U+2308 ISOamsc
'ldquo': 0x201c, # left double quotation mark, U+201C ISOnum
'le': 0x2264, # less-than or equal to, U+2264 ISOtech
'lfloor': 0x230a, # left floor = apl downstile, U+230A ISOamsc
'lowast': 0x2217, # asterisk operator, U+2217 ISOtech
'loz': 0x25ca, # lozenge, U+25CA ISOpub
'lrm': 0x200e, # left-to-right mark, U+200E NEW RFC 2070
'lsaquo': 0x2039, # single left-pointing angle quotation mark, U+2039 ISO proposed
'lsquo': 0x2018, # left single quotation mark, U+2018 ISOnum
'lt': 0x003c, # less-than sign, U+003C ISOnum
'macr': 0x00af, # macron = spacing macron = overline = APL overbar, U+00AF ISOdia
'mdash': 0x2014, # em dash, U+2014 ISOpub
'micro': 0x00b5, # micro sign, U+00B5 ISOnum
'middot': 0x00b7, # middle dot = Georgian comma = Greek middle dot, U+00B7 ISOnum
'minus': 0x2212, # minus sign, U+2212 ISOtech
'mu': 0x03bc, # greek small letter mu, U+03BC ISOgrk3
'nabla': 0x2207, # nabla = backward difference, U+2207 ISOtech
'nbsp': 0x00a0, # no-break space = non-breaking space, U+00A0 ISOnum
'ndash': 0x2013, # en dash, U+2013 ISOpub
'ne': 0x2260, # not equal to, U+2260 ISOtech
'ni': 0x220b, # contains as member, U+220B ISOtech
'not': 0x00ac, # not sign, U+00AC ISOnum
'notin': 0x2209, # not an element of, U+2209 ISOtech
'nsub': 0x2284, # not a subset of, U+2284 ISOamsn
'ntilde': 0x00f1, # latin small letter n with tilde, U+00F1 ISOlat1
'nu': 0x03bd, # greek small letter nu, U+03BD ISOgrk3
'oacute': 0x00f3, # latin small letter o with acute, U+00F3 ISOlat1
'ocirc': 0x00f4, # latin small letter o with circumflex, U+00F4 ISOlat1
'oelig': 0x0153, # latin small ligature oe, U+0153 ISOlat2
'ograve': 0x00f2, # latin small letter o with grave, U+00F2 ISOlat1
'oline': 0x203e, # overline = spacing overscore, U+203E NEW
'omega': 0x03c9, # greek small letter omega, U+03C9 ISOgrk3
'omicron': 0x03bf, # greek small letter omicron, U+03BF NEW
'oplus': 0x2295, # circled plus = direct sum, U+2295 ISOamsb
'or': 0x2228, # logical or = vee, U+2228 ISOtech
'ordf': 0x00aa, # feminine ordinal indicator, U+00AA ISOnum
'ordm': 0x00ba, # masculine ordinal indicator, U+00BA ISOnum
'oslash': 0x00f8, # latin small letter o with stroke, = latin small letter o slash, U+00F8 ISOlat1
'otilde': 0x00f5, # latin small letter o with tilde, U+00F5 ISOlat1
'otimes': 0x2297, # circled times = vector product, U+2297 ISOamsb
'ouml': 0x00f6, # latin small letter o with diaeresis, U+00F6 ISOlat1
'para': 0x00b6, # pilcrow sign = paragraph sign, U+00B6 ISOnum
'part': 0x2202, # partial differential, U+2202 ISOtech
'permil': 0x2030, # per mille sign, U+2030 ISOtech
'perp': 0x22a5, # up tack = orthogonal to = perpendicular, U+22A5 ISOtech
'phi': 0x03c6, # greek small letter phi, U+03C6 ISOgrk3
'pi': 0x03c0, # greek small letter pi, U+03C0 ISOgrk3
'piv': 0x03d6, # greek pi symbol, U+03D6 ISOgrk3
'plusmn': 0x00b1, # plus-minus sign = plus-or-minus sign, U+00B1 ISOnum
'pound': 0x00a3, # pound sign, U+00A3 ISOnum
'prime': 0x2032, # prime = minutes = feet, U+2032 ISOtech
'prod': 0x220f, # n-ary product = product sign, U+220F ISOamsb
'prop': 0x221d, # proportional to, U+221D ISOtech
'psi': 0x03c8, # greek small letter psi, U+03C8 ISOgrk3
'quot': 0x0022, # quotation mark = APL quote, U+0022 ISOnum
'rArr': 0x21d2, # rightwards double arrow, U+21D2 ISOtech
'radic': 0x221a, # square root = radical sign, U+221A ISOtech
'rang': 0x232a, # right-pointing angle bracket = ket, U+232A ISOtech
'raquo': 0x00bb, # right-pointing double angle quotation mark = right pointing guillemet, U+00BB ISOnum
'rarr': 0x2192, # rightwards arrow, U+2192 ISOnum
'rceil': 0x2309, # right ceiling, U+2309 ISOamsc
'rdquo': 0x201d, # right double quotation mark, U+201D ISOnum
'real': 0x211c, # blackletter capital R = real part symbol, U+211C ISOamso
'reg': 0x00ae, # registered sign = registered trade mark sign, U+00AE ISOnum
'rfloor': 0x230b, # right floor, U+230B ISOamsc
'rho': 0x03c1, # greek small letter rho, U+03C1 ISOgrk3
'rlm': 0x200f, # right-to-left mark, U+200F NEW RFC 2070
'rsaquo': 0x203a, # single right-pointing angle quotation mark, U+203A ISO proposed
'rsquo': 0x2019, # right single quotation mark, U+2019 ISOnum
'sbquo': 0x201a, # single low-9 quotation mark, U+201A NEW
'scaron': 0x0161, # latin small letter s with caron, U+0161 ISOlat2
'sdot': 0x22c5, # dot operator, U+22C5 ISOamsb
'sect': 0x00a7, # section sign, U+00A7 ISOnum
'shy': 0x00ad, # soft hyphen = discretionary hyphen, U+00AD ISOnum
'sigma': 0x03c3, # greek small letter sigma, U+03C3 ISOgrk3
'sigmaf': 0x03c2, # greek small letter final sigma, U+03C2 ISOgrk3
'sim': 0x223c, # tilde operator = varies with = similar to, U+223C ISOtech
'spades': 0x2660, # black spade suit, U+2660 ISOpub
'sub': 0x2282, # subset of, U+2282 ISOtech
'sube': 0x2286, # subset of or equal to, U+2286 ISOtech
'sum': 0x2211, # n-ary sumation, U+2211 ISOamsb
'sup': 0x2283, # superset of, U+2283 ISOtech
'sup1': 0x00b9, # superscript one = superscript digit one, U+00B9 ISOnum
'sup2': 0x00b2, # superscript two = superscript digit two = squared, U+00B2 ISOnum
'sup3': 0x00b3, # superscript three = superscript digit three = cubed, U+00B3 ISOnum
'supe': 0x2287, # superset of or equal to, U+2287 ISOtech
'szlig': 0x00df, # latin small letter sharp s = ess-zed, U+00DF ISOlat1
'tau': 0x03c4, # greek small letter tau, U+03C4 ISOgrk3
'there4': 0x2234, # therefore, U+2234 ISOtech
'theta': 0x03b8, # greek small letter theta, U+03B8 ISOgrk3
'thetasym': 0x03d1, # greek small letter theta symbol, U+03D1 NEW
'thinsp': 0x2009, # thin space, U+2009 ISOpub
'thorn': 0x00fe, # latin small letter thorn with, U+00FE ISOlat1
'tilde': 0x02dc, # small tilde, U+02DC ISOdia
'times': 0x00d7, # multiplication sign, U+00D7 ISOnum
'trade': 0x2122, # trade mark sign, U+2122 ISOnum
'uArr': 0x21d1, # upwards double arrow, U+21D1 ISOamsa
'uacute': 0x00fa, # latin small letter u with acute, U+00FA ISOlat1
'uarr': 0x2191, # upwards arrow, U+2191 ISOnum
'ucirc': 0x00fb, # latin small letter u with circumflex, U+00FB ISOlat1
'ugrave': 0x00f9, # latin small letter u with grave, U+00F9 ISOlat1
'uml': 0x00a8, # diaeresis = spacing diaeresis, U+00A8 ISOdia
'upsih': 0x03d2, # greek upsilon with hook symbol, U+03D2 NEW
'upsilon': 0x03c5, # greek small letter upsilon, U+03C5 ISOgrk3
'uuml': 0x00fc, # latin small letter u with diaeresis, U+00FC ISOlat1
'weierp': 0x2118, # script capital P = power set = Weierstrass p, U+2118 ISOamso
'xi': 0x03be, # greek small letter xi, U+03BE ISOgrk3
'yacute': 0x00fd, # latin small letter y with acute, U+00FD ISOlat1
'yen': 0x00a5, # yen sign = yuan sign, U+00A5 ISOnum
'yuml': 0x00ff, # latin small letter y with diaeresis, U+00FF ISOlat1
'zeta': 0x03b6, # greek small letter zeta, U+03B6 ISOgrk3
'zwj': 0x200d, # zero width joiner, U+200D NEW RFC 2070
'zwnj': 0x200c, # zero width non-joiner, U+200C NEW RFC 2070
}
# maps the Unicode codepoint to the HTML entity name
codepoint2name = {}
# maps the HTML entity name to the character
# (or a character reference if the character is outside the Latin-1 range)
entitydefs = {}
for (name, codepoint) in name2codepoint.iteritems():
codepoint2name[codepoint] = name
if codepoint <= 0xff:
entitydefs[name] = chr(codepoint)
else:
entitydefs[name] = '&#%d;' % codepoint
del name, codepoint
|
stutivarshney/Bal-Aveksha | refs/heads/master | WebServer/BalAvekshaEnv/lib/python3.5/site-packages/pip/_vendor/requests/packages/chardet/hebrewprober.py | 2928 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Shy Shalom
# Portions created by the Initial Developer are Copyright (C) 2005
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .constants import eNotMe, eDetecting
from .compat import wrap_ord
# This prober doesn't actually recognize a language or a charset.
# It is a helper prober for the use of the Hebrew model probers
### General ideas of the Hebrew charset recognition ###
#
# Four main charsets exist in Hebrew:
# "ISO-8859-8" - Visual Hebrew
# "windows-1255" - Logical Hebrew
# "ISO-8859-8-I" - Logical Hebrew
# "x-mac-hebrew" - ?? Logical Hebrew ??
#
# Both "ISO" charsets use a completely identical set of code points, whereas
# "windows-1255" and "x-mac-hebrew" are two different proper supersets of
# these code points. windows-1255 defines additional characters in the range
# 0x80-0x9F as some misc punctuation marks as well as some Hebrew-specific
# diacritics and additional 'Yiddish' ligature letters in the range 0xc0-0xd6.
# x-mac-hebrew defines similar additional code points but with a different
# mapping.
#
# As far as an average Hebrew text with no diacritics is concerned, all four
# charsets are identical with respect to code points. Meaning that for the
# main Hebrew alphabet, all four map the same values to all 27 Hebrew letters
# (including final letters).
#
# The dominant difference between these charsets is their directionality.
# "Visual" directionality means that the text is ordered as if the renderer is
# not aware of a BIDI rendering algorithm. The renderer sees the text and
# draws it from left to right. The text itself when ordered naturally is read
# backwards. A buffer of Visual Hebrew generally looks like so:
# "[last word of first line spelled backwards] [whole line ordered backwards
# and spelled backwards] [first word of first line spelled backwards]
# [end of line] [last word of second line] ... etc' "
# adding punctuation marks, numbers and English text to visual text is
# naturally also "visual" and from left to right.
#
# "Logical" directionality means the text is ordered "naturally" according to
# the order it is read. It is the responsibility of the renderer to display
# the text from right to left. A BIDI algorithm is used to place general
# punctuation marks, numbers and English text in the text.
#
# Texts in x-mac-hebrew are almost impossible to find on the Internet. From
# what little evidence I could find, it seems that its general directionality
# is Logical.
#
# To sum up all of the above, the Hebrew probing mechanism knows about two
# charsets:
# Visual Hebrew - "ISO-8859-8" - backwards text - Words and sentences are
# backwards while line order is natural. For charset recognition purposes
# the line order is unimportant (In fact, for this implementation, even
# word order is unimportant).
# Logical Hebrew - "windows-1255" - normal, naturally ordered text.
#
# "ISO-8859-8-I" is a subset of windows-1255 and doesn't need to be
# specifically identified.
# "x-mac-hebrew" is also identified as windows-1255. A text in x-mac-hebrew
# that contain special punctuation marks or diacritics is displayed with
# some unconverted characters showing as question marks. This problem might
# be corrected using another model prober for x-mac-hebrew. Due to the fact
# that x-mac-hebrew texts are so rare, writing another model prober isn't
# worth the effort and performance hit.
#
#### The Prober ####
#
# The prober is divided between two SBCharSetProbers and a HebrewProber,
# all of which are managed, created, fed data, inquired and deleted by the
# SBCSGroupProber. The two SBCharSetProbers identify that the text is in
# fact some kind of Hebrew, Logical or Visual. The final decision about which
# one is it is made by the HebrewProber by combining final-letter scores
# with the scores of the two SBCharSetProbers to produce a final answer.
#
# The SBCSGroupProber is responsible for stripping the original text of HTML
# tags, English characters, numbers, low-ASCII punctuation characters, spaces
# and new lines. It reduces any sequence of such characters to a single space.
# The buffer fed to each prober in the SBCS group prober is pure text in
# high-ASCII.
# The two SBCharSetProbers (model probers) share the same language model:
# Win1255Model.
# The first SBCharSetProber uses the model normally as any other
# SBCharSetProber does, to recognize windows-1255, upon which this model was
# built. The second SBCharSetProber is told to make the pair-of-letter
# lookup in the language model backwards. This in practice exactly simulates
# a visual Hebrew model using the windows-1255 logical Hebrew model.
#
# The HebrewProber is not using any language model. All it does is look for
# final-letter evidence suggesting the text is either logical Hebrew or visual
# Hebrew. Disjointed from the model probers, the results of the HebrewProber
# alone are meaningless. HebrewProber always returns 0.00 as confidence
# since it never identifies a charset by itself. Instead, the pointer to the
# HebrewProber is passed to the model probers as a helper "Name Prober".
# When the Group prober receives a positive identification from any prober,
# it asks for the name of the charset identified. If the prober queried is a
# Hebrew model prober, the model prober forwards the call to the
# HebrewProber to make the final decision. In the HebrewProber, the
# decision is made according to the final-letters scores maintained and Both
# model probers scores. The answer is returned in the form of the name of the
# charset identified, either "windows-1255" or "ISO-8859-8".
# windows-1255 / ISO-8859-8 code points of interest
FINAL_KAF = 0xea
NORMAL_KAF = 0xeb
FINAL_MEM = 0xed
NORMAL_MEM = 0xee
FINAL_NUN = 0xef
NORMAL_NUN = 0xf0
FINAL_PE = 0xf3
NORMAL_PE = 0xf4
FINAL_TSADI = 0xf5
NORMAL_TSADI = 0xf6
# Minimum Visual vs Logical final letter score difference.
# If the difference is below this, don't rely solely on the final letter score
# distance.
MIN_FINAL_CHAR_DISTANCE = 5
# Minimum Visual vs Logical model score difference.
# If the difference is below this, don't rely at all on the model score
# distance.
MIN_MODEL_DISTANCE = 0.01
VISUAL_HEBREW_NAME = "ISO-8859-8"
LOGICAL_HEBREW_NAME = "windows-1255"
class HebrewProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mLogicalProber = None
self._mVisualProber = None
self.reset()
def reset(self):
self._mFinalCharLogicalScore = 0
self._mFinalCharVisualScore = 0
# The two last characters seen in the previous buffer,
# mPrev and mBeforePrev are initialized to space in order to simulate
# a word delimiter at the beginning of the data
self._mPrev = ' '
self._mBeforePrev = ' '
# These probers are owned by the group prober.
def set_model_probers(self, logicalProber, visualProber):
self._mLogicalProber = logicalProber
self._mVisualProber = visualProber
def is_final(self, c):
return wrap_ord(c) in [FINAL_KAF, FINAL_MEM, FINAL_NUN, FINAL_PE,
FINAL_TSADI]
def is_non_final(self, c):
# The normal Tsadi is not a good Non-Final letter due to words like
# 'lechotet' (to chat) containing an apostrophe after the tsadi. This
# apostrophe is converted to a space in FilterWithoutEnglishLetters
# causing the Non-Final tsadi to appear at an end of a word even
# though this is not the case in the original text.
# The letters Pe and Kaf rarely display a related behavior of not being
# a good Non-Final letter. Words like 'Pop', 'Winamp' and 'Mubarak'
# for example legally end with a Non-Final Pe or Kaf. However, the
# benefit of these letters as Non-Final letters outweighs the damage
# since these words are quite rare.
return wrap_ord(c) in [NORMAL_KAF, NORMAL_MEM, NORMAL_NUN, NORMAL_PE]
def feed(self, aBuf):
# Final letter analysis for logical-visual decision.
# Look for evidence that the received buffer is either logical Hebrew
# or visual Hebrew.
# The following cases are checked:
# 1) A word longer than 1 letter, ending with a final letter. This is
# an indication that the text is laid out "naturally" since the
# final letter really appears at the end. +1 for logical score.
# 2) A word longer than 1 letter, ending with a Non-Final letter. In
# normal Hebrew, words ending with Kaf, Mem, Nun, Pe or Tsadi,
# should not end with the Non-Final form of that letter. Exceptions
# to this rule are mentioned above in isNonFinal(). This is an
# indication that the text is laid out backwards. +1 for visual
# score
# 3) A word longer than 1 letter, starting with a final letter. Final
# letters should not appear at the beginning of a word. This is an
# indication that the text is laid out backwards. +1 for visual
# score.
#
# The visual score and logical score are accumulated throughout the
# text and are finally checked against each other in GetCharSetName().
# No checking for final letters in the middle of words is done since
# that case is not an indication for either Logical or Visual text.
#
# We automatically filter out all 7-bit characters (replace them with
# spaces) so the word boundary detection works properly. [MAP]
if self.get_state() == eNotMe:
# Both model probers say it's not them. No reason to continue.
return eNotMe
aBuf = self.filter_high_bit_only(aBuf)
for cur in aBuf:
if cur == ' ':
# We stand on a space - a word just ended
if self._mBeforePrev != ' ':
# next-to-last char was not a space so self._mPrev is not a
# 1 letter word
if self.is_final(self._mPrev):
# case (1) [-2:not space][-1:final letter][cur:space]
self._mFinalCharLogicalScore += 1
elif self.is_non_final(self._mPrev):
# case (2) [-2:not space][-1:Non-Final letter][
# cur:space]
self._mFinalCharVisualScore += 1
else:
# Not standing on a space
if ((self._mBeforePrev == ' ') and
(self.is_final(self._mPrev)) and (cur != ' ')):
# case (3) [-2:space][-1:final letter][cur:not space]
self._mFinalCharVisualScore += 1
self._mBeforePrev = self._mPrev
self._mPrev = cur
# Forever detecting, till the end or until both model probers return
# eNotMe (handled above)
return eDetecting
def get_charset_name(self):
# Make the decision: is it Logical or Visual?
# If the final letter score distance is dominant enough, rely on it.
finalsub = self._mFinalCharLogicalScore - self._mFinalCharVisualScore
if finalsub >= MIN_FINAL_CHAR_DISTANCE:
return LOGICAL_HEBREW_NAME
if finalsub <= -MIN_FINAL_CHAR_DISTANCE:
return VISUAL_HEBREW_NAME
# It's not dominant enough, try to rely on the model scores instead.
modelsub = (self._mLogicalProber.get_confidence()
- self._mVisualProber.get_confidence())
if modelsub > MIN_MODEL_DISTANCE:
return LOGICAL_HEBREW_NAME
if modelsub < -MIN_MODEL_DISTANCE:
return VISUAL_HEBREW_NAME
# Still no good, back to final letter distance, maybe it'll save the
# day.
if finalsub < 0.0:
return VISUAL_HEBREW_NAME
# (finalsub > 0 - Logical) or (don't know what to do) default to
# Logical.
return LOGICAL_HEBREW_NAME
def get_state(self):
# Remain active as long as any of the model probers are active.
if (self._mLogicalProber.get_state() == eNotMe) and \
(self._mVisualProber.get_state() == eNotMe):
return eNotMe
return eDetecting
|
MikeOfNoTrades/netmiko | refs/heads/master | examples/scp_example.py | 18 | #!/usr/bin/env python
'''
Cisco IOS only
Requires scp https://github.com/jbardin/scp.py
'''
from netmiko import ConnectHandler, SCPConn
from SECRET_DEVICE_CREDS import cisco_881
def main():
'''
SCP transfer cisco_logging.txt to network device
Use ssh_conn as ssh channel into network device
scp_conn must be closed after file transfer
'''
ssh_conn = ConnectHandler(**cisco_881)
scp_conn = SCPConn(ssh_conn)
s_file = 'cisco_logging.txt'
d_file = 'cisco_logging.txt'
print "\n\n"
scp_conn.scp_transfer_file(s_file, d_file)
scp_conn.close()
output = ssh_conn.send_command("show flash: | inc cisco_logging")
print ">> " + output + '\n'
# Disable file copy confirmation
output = ssh_conn.send_config_set(["file prompt quiet"])
# Execute config merge
print "Performing config merge\n"
output = ssh_conn.send_command("copy flash:cisco_logging.txt running-config")
# Verify change
print "Verifying logging buffer change"
output = ssh_conn.send_command("show run | inc logging buffer")
print ">> " + output + '\n'
# Restore copy confirmation
output = ssh_conn.send_config_set(["file prompt alert"])
if __name__ == "__main__":
main()
|
qedi-r/home-assistant | refs/heads/dev | tests/components/radarr/__init__.py | 36 | """Tests for the radarr component."""
|
linvictor88/vse-lbaas-driver | refs/heads/master | quantum/tests/unit/mlnx/__init__.py | 139 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
jnerin/ansible | refs/heads/devel | lib/ansible/modules/network/nxos/_nxos_portchannel.py | 8 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_portchannel
extends_documentation_fragment: nxos
version_added: "2.2"
deprecated:
removed_in: "2.9"
why: Replaced with common C(*_linkagg) network modules.
alternative: Use M(nxos_linkagg) instead.
short_description: Manages port-channel interfaces.
description:
- Manages port-channel specific configuration parameters.
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbino (@GGabriele)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- C(state=absent) removes the portchannel config and interface if it
already exists. If members to be removed are not explicitly
passed, all existing members (if any), are removed.
- Members must be a list.
- LACP needs to be enabled first if active/passive modes are used.
options:
group:
description:
- Channel-group number for the port-channel.
required: true
mode:
description:
- Mode for the port-channel, i.e. on, active, passive.
required: false
default: on
choices: ['active','passive','on']
min_links:
description:
- Min links required to keep portchannel up.
required: false
default: null
members:
description:
- List of interfaces that will be managed in a given portchannel.
required: false
default: null
force:
description:
- When true it forces port-channel members to match what is
declared in the members param. This can be used to remove
members.
required: false
choices: ['true', 'false']
default: false
state:
description:
- Manage the state of the resource.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
# Ensure port-channel99 is created, add two members, and set to mode on
- nxos_portchannel:
group: 99
members: ['Ethernet1/1','Ethernet1/2']
mode: 'active'
state: present
'''
RETURN = '''
commands:
description: command sent to the device
returned: always
type: list
sample: ["interface Ethernet2/6", "no channel-group 12",
"interface Ethernet2/5", "no channel-group 12",
"interface Ethernet2/6", "channel-group 12 mode on",
"interface Ethernet2/5", "channel-group 12 mode on"]
'''
import collections
import re
from ansible.module_utils.network.nxos.nxos import get_config, load_config, run_commands
from ansible.module_utils.network.nxos.nxos import get_capabilities, nxos_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.config import CustomNetworkConfig
def get_value(arg, config, module):
param_to_command_keymap = {
'min_links': 'lacp min-links'
}
REGEX = re.compile(r'(?:{0}\s)(?P<value>.*)$'.format(param_to_command_keymap[arg]), re.M)
value = ''
if param_to_command_keymap[arg] in config:
value = REGEX.search(config).group('value')
return value
def check_interface(module, netcfg):
config = str(netcfg)
REGEX = re.compile(r'\s+interface port-channel{0}$'.format(module.params['group']), re.M)
value = False
try:
if REGEX.search(config):
value = True
except TypeError:
value = False
return value
def get_custom_value(arg, config, module):
REGEX = re.compile(r'\s+member vni {0} associate-vrf\s*$'.format(
module.params['vni']), re.M)
value = False
try:
if REGEX.search(config):
value = True
except TypeError:
value = False
return value
def execute_show_command(command, module):
device_info = get_capabilities(module)
network_api = device_info.get('network_api', 'nxapi')
if network_api == 'cliconf':
if 'show port-channel summary' in command:
command += ' | json'
cmds = [command]
body = run_commands(module, cmds)
elif network_api == 'nxapi':
cmds = [command]
body = run_commands(module, cmds)
return body
def get_portchannel_members(pchannel):
try:
members = pchannel['TABLE_member']['ROW_member']
except KeyError:
members = []
return members
def get_portchannel_mode(interface, protocol, module, netcfg):
if protocol != 'LACP':
mode = 'on'
else:
netcfg = CustomNetworkConfig(indent=2, contents=get_config(module))
parents = ['interface {0}'.format(interface.capitalize())]
body = netcfg.get_section(parents)
mode_list = body.split('\n')
for line in mode_list:
this_line = line.strip()
if this_line.startswith('channel-group'):
find = this_line
if 'mode' in find:
if 'passive' in find:
mode = 'passive'
elif 'active' in find:
mode = 'active'
return mode
def get_portchannel(module, netcfg=None):
command = 'show port-channel summary'
portchannel = {}
portchannel_table = {}
members = []
try:
body = execute_show_command(command, module)[0]
pc_table = body['TABLE_channel']['ROW_channel']
if isinstance(pc_table, dict):
pc_table = [pc_table]
for pc in pc_table:
if pc['group'] == module.params['group']:
portchannel_table = pc
elif module.params['group'].isdigit() and pc['group'] == int(module.params['group']):
portchannel_table = pc
except (KeyError, AttributeError, TypeError, IndexError):
return {}
if portchannel_table:
portchannel['group'] = portchannel_table['group']
protocol = portchannel_table['prtcl']
members_list = get_portchannel_members(portchannel_table)
if isinstance(members_list, dict):
members_list = [members_list]
member_dictionary = {}
for each_member in members_list:
interface = each_member['port']
members.append(interface)
pc_member = {}
pc_member['status'] = str(each_member['port-status'])
pc_member['mode'] = get_portchannel_mode(interface,
protocol, module, netcfg)
member_dictionary[interface] = pc_member
portchannel['members'] = members
portchannel['members_detail'] = member_dictionary
# Ensure each member have the same mode.
modes = set()
for each, value in member_dictionary.items():
modes.update([value['mode']])
if len(modes) == 1:
portchannel['mode'] = value['mode']
else:
portchannel['mode'] = 'unknown'
return portchannel
def get_existing(module, args):
existing = {}
netcfg = CustomNetworkConfig(indent=2, contents=get_config(module))
interface_exist = check_interface(module, netcfg)
if interface_exist:
parents = ['interface port-channel{0}'.format(module.params['group'])]
config = netcfg.get_section(parents)
if config:
existing['min_links'] = get_value('min_links', config, module)
existing.update(get_portchannel(module, netcfg=netcfg))
return existing, interface_exist
def config_portchannel(proposed, mode, group, force):
commands = []
# NOTE: Leading whitespace for force option is important
force = ' force' if force else ''
config_args = {
'mode': 'channel-group {group}{force} mode {mode}',
'min_links': 'lacp min-links {min_links}',
}
for member in proposed.get('members', []):
commands.append('interface {0}'.format(member))
commands.append(config_args.get('mode').format(group=group, force=force, mode=mode))
min_links = proposed.get('min_links', None)
if min_links:
command = 'interface port-channel {0}'.format(group)
commands.append(command)
commands.append(config_args.get('min_links').format(
min_links=min_links))
return commands
def get_commands_to_add_members(proposed, existing, force, module):
try:
proposed_members = proposed['members']
except KeyError:
proposed_members = []
try:
existing_members = existing['members']
except KeyError:
existing_members = []
members_to_add = list(set(proposed_members).difference(existing_members))
commands = []
# NOTE: Leading whitespace for force option is important
force = ' force' if force else ''
if members_to_add:
for member in members_to_add:
commands.append('interface {0}'.format(member))
commands.append('channel-group {0}{1} mode {2}'.format(
existing['group'], force, proposed['mode']))
return commands
def get_commands_to_remove_members(proposed, existing, module):
try:
proposed_members = proposed['members']
except KeyError:
proposed_members = []
try:
existing_members = existing['members']
except KeyError:
existing_members = []
members_to_remove = list(set(existing_members).difference(proposed_members))
commands = []
if members_to_remove:
for member in members_to_remove:
commands.append('interface {0}'.format(member))
commands.append('no channel-group {0}'.format(existing['group']))
return commands
def get_commands_if_mode_change(proposed, existing, group, mode, force, module):
try:
proposed_members = proposed['members']
except KeyError:
proposed_members = []
try:
existing_members = existing['members']
except KeyError:
existing_members = []
try:
members_dict = existing['members_detail']
except KeyError:
members_dict = {}
members_to_remove = set(existing_members).difference(proposed_members)
members_with_mode_change = []
if members_dict:
for interface, values in members_dict.items():
if (interface in proposed_members and
(interface not in members_to_remove)):
if values['mode'] != mode:
members_with_mode_change.append(interface)
commands = []
# NOTE: Leading whitespace for force option is important
force = ' force' if force else ''
if members_with_mode_change:
for member in members_with_mode_change:
commands.append('interface {0}'.format(member))
commands.append('no channel-group {0}'.format(group))
for member in members_with_mode_change:
commands.append('interface {0}'.format(member))
commands.append('channel-group {0}{1} mode {2}'.format(group, force, mode))
return commands
def get_commands_min_links(existing, proposed, group, min_links, module):
commands = []
try:
if (existing['min_links'] is None or
(existing['min_links'] != proposed['min_links'])):
commands.append('interface port-channel{0}'.format(group))
commands.append('lacp min-link {0}'.format(min_links))
except KeyError:
commands.append('interface port-channel{0}'.format(group))
commands.append('lacp min-link {0}'.format(min_links))
return commands
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def state_present(module, existing, proposed, interface_exist, force, warnings):
commands = []
group = str(module.params['group'])
mode = module.params['mode']
min_links = module.params['min_links']
if not interface_exist:
command = config_portchannel(proposed, mode, group, force)
commands.append(command)
commands.insert(0, 'interface port-channel{0}'.format(group))
warnings.append("The proposed port-channel interface did not "
"exist. It's recommended to use nxos_interface to "
"create all logical interfaces.")
elif existing and interface_exist:
if force:
command = get_commands_to_remove_members(proposed, existing, module)
commands.append(command)
command = get_commands_to_add_members(proposed, existing, force, module)
commands.append(command)
mode_command = get_commands_if_mode_change(proposed, existing, group, mode, force, module)
commands.insert(0, mode_command)
if min_links:
command = get_commands_min_links(existing, proposed, group, min_links, module)
commands.append(command)
return commands
def state_absent(module, existing, proposed):
commands = []
group = str(module.params['group'])
commands.append(['no interface port-channel{0}'.format(group)])
return commands
def main():
argument_spec = dict(
group=dict(required=True, type='str'),
mode=dict(required=False, choices=['on', 'active', 'passive'], default='on', type='str'),
min_links=dict(required=False, default=None, type='str'),
members=dict(required=False, default=None, type='list'),
force=dict(required=False, default='false', type='str', choices=['true', 'false']),
state=dict(required=False, choices=['absent', 'present'], default='present'),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
warnings = list()
results = dict(changed=False, warnings=warnings)
group = str(module.params['group'])
mode = module.params['mode']
min_links = module.params['min_links']
members = module.params['members']
state = module.params['state']
if str(module.params['force']).lower() == 'true':
force = True
elif module.params['force'] == 'false':
force = False
if ((min_links or mode) and
(not members and state == 'present')):
module.fail_json(msg='"members" is required when state=present and '
'"min_links" or "mode" are provided')
args = [
'group',
'members',
'min_links',
'mode'
]
existing, interface_exist = get_existing(module, args)
proposed = dict((k, v) for k, v in module.params.items()
if v is not None and k in args)
commands = []
if state == 'absent' and existing:
commands = state_absent(module, existing, proposed)
elif state == 'present':
commands = state_present(module, existing, proposed, interface_exist, force, warnings)
cmds = flatten_list(commands)
if cmds:
if module.check_mode:
module.exit_json(**results)
else:
load_config(module, cmds)
results['changed'] = True
if 'configure' in cmds:
cmds.pop(0)
results['commands'] = cmds
module.exit_json(**results)
if __name__ == '__main__':
main()
|
martinogden/djangae | refs/heads/master | djangae/db/caching.py | 6 | from google.appengine.api import datastore
from djangae.db.backends.appengine import caching, context
class DisableCache(object):
"""
Decorator and context manager for disabling the caches,
passing no args disables everything, but you can pass
memcache=False or context=False to only disable one or
the other.
"""
def __init__(self, context=True, memcache=True):
self.func = None
self.memcache = memcache
self.context = context
def __call__(self, *args, **kwargs):
def call_func(*_args, **_kwargs):
try:
self.__enter__()
return self.func(*_args, **_kwargs)
finally:
self.__exit__()
if not self.func:
assert args and callable(args[0])
self.func = args[0]
return call_func
if self.func:
return call_func(*args, **kwargs)
def __enter__(self):
caching.ensure_context()
self.orig_memcache = caching._context.memcache_enabled
self.orig_context = caching._context.context_enabled
caching._context.memcache_enabled = not self.memcache
caching._context.context_enabled = not self.context
def __exit__(self, *args, **kwargs):
caching._context.memcache_enabled = self.orig_memcache
caching._context.context_enabled = self.orig_context
disable_cache = DisableCache
def clear_context_cache():
"""
Resets the context cache, don't do this inside a transaction... in fact, probably
just don't do this.
"""
if datastore.IsInTransaction():
raise RuntimeError("Clearing the context cache inside a transaction breaks everything, we can't let you do that")
caching._context.stack = context.ContextStack()
|
burdell/CS4464-Final-Project | refs/heads/master | django/core/exceptions.py | 292 | """
Global Django exception and warning classes.
"""
class DjangoRuntimeWarning(RuntimeWarning):
pass
class ObjectDoesNotExist(Exception):
"The requested object does not exist"
silent_variable_failure = True
class MultipleObjectsReturned(Exception):
"The query returned multiple objects when only one was expected."
pass
class SuspiciousOperation(Exception):
"The user did something suspicious"
pass
class PermissionDenied(Exception):
"The user did not have permission to do that"
pass
class ViewDoesNotExist(Exception):
"The requested view does not exist"
pass
class MiddlewareNotUsed(Exception):
"This middleware is not used in this server configuration"
pass
class ImproperlyConfigured(Exception):
"Django is somehow improperly configured"
pass
class FieldError(Exception):
"""Some kind of problem with a model field."""
pass
NON_FIELD_ERRORS = '__all__'
class ValidationError(Exception):
"""An error while validating data."""
def __init__(self, message, code=None, params=None):
import operator
from django.utils.encoding import force_unicode
"""
ValidationError can be passed any object that can be printed (usually
a string), a list of objects or a dictionary.
"""
if isinstance(message, dict):
self.message_dict = message
# Reduce each list of messages into a single list.
message = reduce(operator.add, message.values())
if isinstance(message, list):
self.messages = [force_unicode(msg) for msg in message]
else:
self.code = code
self.params = params
message = force_unicode(message)
self.messages = [message]
def __str__(self):
# This is needed because, without a __str__(), printing an exception
# instance would result in this:
# AttributeError: ValidationError instance has no attribute 'args'
# See http://www.python.org/doc/current/tut/node10.html#handling
if hasattr(self, 'message_dict'):
return repr(self.message_dict)
return repr(self.messages)
def __repr__(self):
if hasattr(self, 'message_dict'):
return 'ValidationError(%s)' % repr(self.message_dict)
return 'ValidationError(%s)' % repr(self.messages)
def update_error_dict(self, error_dict):
if hasattr(self, 'message_dict'):
if error_dict:
for k, v in self.message_dict.items():
error_dict.setdefault(k, []).extend(v)
else:
error_dict = self.message_dict
else:
error_dict[NON_FIELD_ERRORS] = self.messages
return error_dict
|
luoyetx/mxnet | refs/heads/master | example/numpy-ops/custom_softmax_rtc.py | 19 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
import logging
import numpy as np
import mxnet as mx
class Softmax(mx.operator.CustomOp):
def __init__(self):
super(Softmax,self).__init__()
# Each thread processes a row (a sample in the batch).
fwd_src = r"""
template<class DType>
__global__ void fwd(const DType* x, DType* y, const int row_size, const int req) {
const int offset = row_size * threadIdx.x;
DType max = x[offset];
for(int i = 1; i < row_size; ++i) {
if(max < x[offset + i]) {
max = x[offset + i];
}
}
DType sum = 0;
for(int i = 0; i < row_size; ++i) {
sum += exp(x[offset + i] - max);
}
switch(req) {
case 1:
for(int i = 0; i < row_size; ++i) {
y[offset + i] = exp(x[offset + i] - max) / sum;
}
break;
case 2:
for(int i = 0; i < row_size; ++i) {
y[offset + i] += exp(x[offset + i] - max) / sum;
}
break;
}
}
"""
# Each block processes a row and each thread in a block calculate an element of `dx`.
bwd_src = r"""
template<class DType>
__global__ void bwd(const DType* l, const DType* y, DType* dx, const int req) {
const int z = static_cast<int>(l[blockIdx.x]);
const int i = threadIdx.x + blockDim.x * blockIdx.x;
if(req == 1) {
dx[i] = threadIdx.x == z ? y[i] - 1 : y[i];
} else {
dx[i] += threadIdx.x == z ? y[i] - 1 : y[i];
}
}
"""
fwd_kernel_mod = mx.rtc.CudaModule(fwd_src, exports=["fwd<float>", "fwd<double>"])
bwd_kernel_mod = mx.rtc.CudaModule(bwd_src, exports=["bwd<float>", "bwd<double>"])
fwd_kernel_float_signature = "const float*, const float*, const int, const int"
self.fwd_float_kernel = fwd_kernel_mod.get_kernel("fwd<float>", fwd_kernel_float_signature)
bwd_kernel_float_signature = "const float*, const float*, float*, const int"
self.bwd_float_kernel = bwd_kernel_mod.get_kernel("bwd<float>", bwd_kernel_float_signature)
fwd_kernel_double_signature = "const double*, const double*, const int, const int"
self.fwd_double_kernel = fwd_kernel_mod.get_kernel("fwd<double>", fwd_kernel_double_signature)
bwd_kernel_double_signature = "const double*, const double*, double*, const int"
self.bwd_double_kernel = bwd_kernel_mod.get_kernel("bwd<double>", bwd_kernel_double_signature)
def forward(self, is_train, req, in_data, out_data, aux):
if req[0] == "null":
return
x = in_data[0] # input
y = out_data[0] # output
if y.dtype == np.float64:
# args, ctx, grid_shape, block_shape, shared_mem = 0
self.fwd_double_kernel.launch((x, y, x.shape[1], self._reqCode(req[0])), mx.gpu(0), (1, 1, 1), (x.shape[0], 1, 1))
else:
# args, ctx, grid_shape, block_shape, shared_mem = 0
self.fwd_float_kernel.launch((x, y, x.shape[1], self._reqCode(req[0])), mx.gpu(0), (1, 1, 1), (x.shape[0], 1, 1))
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
if req[0] == "null":
return
l = in_data[1] # label
y = out_data[0] # output from the forward pass
dx = in_grad[0] # the storage for the gradient
if dx.dtype == np.float64:
# args, ctx, grid_shape, block_shape, shared_mem = 0
self.bwd_double_kernel.launch((l, y, dx, self._reqCode(req[0])), mx.gpu(0), (y.shape[0], 1, 1), (y.shape[1], 1, 1))
else:
# args, ctx, grid_shape, block_shape, shared_mem = 0
self.bwd_float_kernel.launch((l, y, dx, self._reqCode(req[0])), mx.gpu(0), (y.shape[0], 1, 1), (y.shape[1], 1, 1))
def _reqCode(self, req):
if(req == "write"):
return 1
elif(req == "add"):
return 2
elif(req == "null"):
return 0
else:
raise ValueError("Invalid value of `req`: {}".format(req))
@mx.operator.register("softmax")
class SoftmaxProp(mx.operator.CustomOpProp):
def __init__(self):
super(SoftmaxProp, self).__init__(need_top_grad=False)
def list_arguments(self):
return ['data', 'label']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
data_shape = in_shape[0]
label_shape = (in_shape[0][0],)
output_shape = in_shape[0]
return [data_shape, label_shape], [output_shape], []
def infer_type(self, in_type):
return in_type, [in_type[0]], []
def create_operator(self, ctx, in_shapes, in_dtypes):
return Softmax()
# define mlp
data = mx.symbol.Variable('data')
fc1 = mx.symbol.FullyConnected(data=data, name='fc1', num_hidden=128)
act1 = mx.symbol.Activation(data=fc1, name='relu1', act_type="relu")
fc2 = mx.symbol.FullyConnected(data=act1, name='fc2', num_hidden=64)
act2 = mx.symbol.Activation(data=fc2, name='relu2', act_type="relu")
fc3 = mx.symbol.FullyConnected(data=act2, name='fc3', num_hidden=10)
#mlp = mx.symbol.SoftmaxOutput(data = fc3, name = 'softmax')
mlp = mx.symbol.Custom(data=fc3, name='softmax', op_type='softmax')
# data
train, val = mx.test_utils.get_mnist_iterator(batch_size=100, input_shape=(784,))
# train
logging.basicConfig(level=logging.DEBUG)
context = mx.gpu(0)
mod = mx.mod.Module(mlp, context=context)
mod.fit(
train_data=train,
eval_data=val,
optimizer='sgd',
optimizer_params={'learning_rate':0.1, 'momentum': 0.9, 'wd': 0.00001},
num_epoch=10,
batch_end_callback=mx.callback.Speedometer(100, 100)
)
|
tgerla/ansible | refs/heads/devel | lib/ansible/plugins/lookup/url.py | 95 | # (c) 2015, Brian Coca <bcoca@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import urllib2
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
from ansible.utils.unicode import to_unicode
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
validate_certs = kwargs.get('validate_certs', True)
ret = []
for term in terms:
self._display.vvvv("url lookup connecting to %s" % term)
try:
response = open_url(term, validate_certs=validate_certs)
except urllib2.URLError as e:
raise AnsibleError("Failed lookup url for %s : %s" % (term, str(e)))
except urllib2.HTTPError as e:
raise AnsibleError("Received HTTP error for %s : %s" % (term, str(e)))
except SSLValidationError as e:
raise AnsibleError("Error validating the server's certificate for %s: %s" % (term, str(e)))
except ConnectionError as e:
raise AnsibleError("Error connecting to %s: %s" % (term, str(e)))
for line in response.read().splitlines():
ret.append(to_unicode(line))
return ret
|
angadpc/Alexa-Project- | refs/heads/master | requests/packages/urllib3/util/retry.py | 86 | from __future__ import absolute_import
import time
import logging
from collections import namedtuple
from itertools import takewhile
import email
import re
from ..exceptions import (
ConnectTimeoutError,
MaxRetryError,
ProtocolError,
ReadTimeoutError,
ResponseError,
InvalidHeader,
)
from ..packages import six
log = logging.getLogger(__name__)
# Data structure for representing the metadata of requests that result in a retry.
RequestHistory = namedtuple('RequestHistory', ["method", "url", "error",
"status", "redirect_location"])
class Retry(object):
""" Retry configuration.
Each retry attempt will create a new Retry object with updated values, so
they can be safely reused.
Retries can be defined as a default for a pool::
retries = Retry(connect=5, read=2, redirect=5)
http = PoolManager(retries=retries)
response = http.request('GET', 'http://example.com/')
Or per-request (which overrides the default for the pool)::
response = http.request('GET', 'http://example.com/', retries=Retry(10))
Retries can be disabled by passing ``False``::
response = http.request('GET', 'http://example.com/', retries=False)
Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless
retries are disabled, in which case the causing exception will be raised.
:param int total:
Total number of retries to allow. Takes precedence over other counts.
Set to ``None`` to remove this constraint and fall back on other
counts. It's a good idea to set this to some sensibly-high value to
account for unexpected edge cases and avoid infinite retry loops.
Set to ``0`` to fail on the first retry.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param int connect:
How many connection-related errors to retry on.
These are errors raised before the request is sent to the remote server,
which we assume has not triggered the server to process the request.
Set to ``0`` to fail on the first retry of this type.
:param int read:
How many times to retry on read errors.
These errors are raised after the request was sent to the server, so the
request may have side-effects.
Set to ``0`` to fail on the first retry of this type.
:param int redirect:
How many redirects to perform. Limit this to avoid infinite redirect
loops.
A redirect is a HTTP response with a status code 301, 302, 303, 307 or
308.
Set to ``0`` to fail on the first retry of this type.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param iterable method_whitelist:
Set of uppercased HTTP method verbs that we should retry on.
By default, we only retry on methods which are considered to be
idempotent (multiple requests with the same parameters end with the
same state). See :attr:`Retry.DEFAULT_METHOD_WHITELIST`.
Set to a ``False`` value to retry on any verb.
:param iterable status_forcelist:
A set of integer HTTP status codes that we should force a retry on.
A retry is initiated if the request method is in ``method_whitelist``
and the response status code is in ``status_forcelist``.
By default, this is disabled with ``None``.
:param float backoff_factor:
A backoff factor to apply between attempts after the second try
(most errors are resolved immediately by a second try without a
delay). urllib3 will sleep for::
{backoff factor} * (2 ^ ({number of total retries} - 1))
seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep
for [0.0s, 0.2s, 0.4s, ...] between retries. It will never be longer
than :attr:`Retry.BACKOFF_MAX`.
By default, backoff is disabled (set to 0).
:param bool raise_on_redirect: Whether, if the number of redirects is
exhausted, to raise a MaxRetryError, or to return a response with a
response code in the 3xx range.
:param bool raise_on_status: Similar meaning to ``raise_on_redirect``:
whether we should raise an exception, or return a response,
if status falls in ``status_forcelist`` range and retries have
been exhausted.
:param tuple history: The history of the request encountered during
each call to :meth:`~Retry.increment`. The list is in the order
the requests occurred. Each list item is of class :class:`RequestHistory`.
:param bool respect_retry_after_header:
Whether to respect Retry-After header on status codes defined as
:attr:`Retry.RETRY_AFTER_STATUS_CODES` or not.
"""
DEFAULT_METHOD_WHITELIST = frozenset([
'HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS', 'TRACE'])
RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503])
#: Maximum backoff time.
BACKOFF_MAX = 120
def __init__(self, total=10, connect=None, read=None, redirect=None,
method_whitelist=DEFAULT_METHOD_WHITELIST, status_forcelist=None,
backoff_factor=0, raise_on_redirect=True, raise_on_status=True,
history=None, respect_retry_after_header=True):
self.total = total
self.connect = connect
self.read = read
if redirect is False or total is False:
redirect = 0
raise_on_redirect = False
self.redirect = redirect
self.status_forcelist = status_forcelist or set()
self.method_whitelist = method_whitelist
self.backoff_factor = backoff_factor
self.raise_on_redirect = raise_on_redirect
self.raise_on_status = raise_on_status
self.history = history or tuple()
self.respect_retry_after_header = respect_retry_after_header
def new(self, **kw):
params = dict(
total=self.total,
connect=self.connect, read=self.read, redirect=self.redirect,
method_whitelist=self.method_whitelist,
status_forcelist=self.status_forcelist,
backoff_factor=self.backoff_factor,
raise_on_redirect=self.raise_on_redirect,
raise_on_status=self.raise_on_status,
history=self.history,
)
params.update(kw)
return type(self)(**params)
@classmethod
def from_int(cls, retries, redirect=True, default=None):
""" Backwards-compatibility for the old retries format."""
if retries is None:
retries = default if default is not None else cls.DEFAULT
if isinstance(retries, Retry):
return retries
redirect = bool(redirect) and None
new_retries = cls(retries, redirect=redirect)
log.debug("Converted retries value: %r -> %r", retries, new_retries)
return new_retries
def get_backoff_time(self):
""" Formula for computing the current backoff
:rtype: float
"""
# We want to consider only the last consecutive errors sequence (Ignore redirects).
consecutive_errors_len = len(list(takewhile(lambda x: x.redirect_location is None,
reversed(self.history))))
if consecutive_errors_len <= 1:
return 0
backoff_value = self.backoff_factor * (2 ** (consecutive_errors_len - 1))
return min(self.BACKOFF_MAX, backoff_value)
def parse_retry_after(self, retry_after):
# Whitespace: https://tools.ietf.org/html/rfc7230#section-3.2.4
if re.match(r"^\s*[0-9]+\s*$", retry_after):
seconds = int(retry_after)
else:
retry_date_tuple = email.utils.parsedate(retry_after)
if retry_date_tuple is None:
raise InvalidHeader("Invalid Retry-After header: %s" % retry_after)
retry_date = time.mktime(retry_date_tuple)
seconds = retry_date - time.time()
if seconds < 0:
seconds = 0
return seconds
def get_retry_after(self, response):
""" Get the value of Retry-After in seconds. """
retry_after = response.getheader("Retry-After")
if retry_after is None:
return None
return self.parse_retry_after(retry_after)
def sleep_for_retry(self, response=None):
retry_after = self.get_retry_after(response)
if retry_after:
time.sleep(retry_after)
return True
return False
def _sleep_backoff(self):
backoff = self.get_backoff_time()
if backoff <= 0:
return
time.sleep(backoff)
def sleep(self, response=None):
""" Sleep between retry attempts.
This method will respect a server's ``Retry-After`` response header
and sleep the duration of the time requested. If that is not present, it
will use an exponential backoff. By default, the backoff factor is 0 and
this method will return immediately.
"""
if response:
slept = self.sleep_for_retry(response)
if slept:
return
self._sleep_backoff()
def _is_connection_error(self, err):
""" Errors when we're fairly sure that the server did not receive the
request, so it should be safe to retry.
"""
return isinstance(err, ConnectTimeoutError)
def _is_read_error(self, err):
""" Errors that occur after the request has been started, so we should
assume that the server began processing it.
"""
return isinstance(err, (ReadTimeoutError, ProtocolError))
def _is_method_retryable(self, method):
""" Checks if a given HTTP method should be retried upon, depending if
it is included on the method whitelist.
"""
if self.method_whitelist and method.upper() not in self.method_whitelist:
return False
return True
def is_retry(self, method, status_code, has_retry_after=False):
""" Is this method/status code retryable? (Based on whitelists and control
variables such as the number of total retries to allow, whether to
respect the Retry-After header, whether this header is present, and
whether the returned status code is on the list of status codes to
be retried upon on the presence of the aforementioned header)
"""
if not self._is_method_retryable(method):
return False
if self.status_forcelist and status_code in self.status_forcelist:
return True
return (self.total and self.respect_retry_after_header and
has_retry_after and (status_code in self.RETRY_AFTER_STATUS_CODES))
def is_exhausted(self):
""" Are we out of retries? """
retry_counts = (self.total, self.connect, self.read, self.redirect)
retry_counts = list(filter(None, retry_counts))
if not retry_counts:
return False
return min(retry_counts) < 0
def increment(self, method=None, url=None, response=None, error=None,
_pool=None, _stacktrace=None):
""" Return a new Retry object with incremented retry counters.
:param response: A response object, or None, if the server did not
return a response.
:type response: :class:`~urllib3.response.HTTPResponse`
:param Exception error: An error encountered during the request, or
None if the response was received successfully.
:return: A new ``Retry`` object.
"""
if self.total is False and error:
# Disabled, indicate to re-raise the error.
raise six.reraise(type(error), error, _stacktrace)
total = self.total
if total is not None:
total -= 1
connect = self.connect
read = self.read
redirect = self.redirect
cause = 'unknown'
status = None
redirect_location = None
if error and self._is_connection_error(error):
# Connect retry?
if connect is False:
raise six.reraise(type(error), error, _stacktrace)
elif connect is not None:
connect -= 1
elif error and self._is_read_error(error):
# Read retry?
if read is False or not self._is_method_retryable(method):
raise six.reraise(type(error), error, _stacktrace)
elif read is not None:
read -= 1
elif response and response.get_redirect_location():
# Redirect retry?
if redirect is not None:
redirect -= 1
cause = 'too many redirects'
redirect_location = response.get_redirect_location()
status = response.status
else:
# Incrementing because of a server error like a 500 in
# status_forcelist and a the given method is in the whitelist
cause = ResponseError.GENERIC_ERROR
if response and response.status:
cause = ResponseError.SPECIFIC_ERROR.format(
status_code=response.status)
status = response.status
history = self.history + (RequestHistory(method, url, error, status, redirect_location),)
new_retry = self.new(
total=total,
connect=connect, read=read, redirect=redirect,
history=history)
if new_retry.is_exhausted():
raise MaxRetryError(_pool, url, error or ResponseError(cause))
log.debug("Incremented Retry for (url='%s'): %r", url, new_retry)
return new_retry
def __repr__(self):
return ('{cls.__name__}(total={self.total}, connect={self.connect}, '
'read={self.read}, redirect={self.redirect})').format(
cls=type(self), self=self)
# For backwards compatibility (equivalent to pre-v1.9):
Retry.DEFAULT = Retry(3)
|
rickerc/heat_audit | refs/heads/cis-havana-staging | heat/tests/test_autoscaling.py | 3 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import copy
import mox
from testtools import skipIf
from oslo.config import cfg
from heat.common import template_format
from heat.common import exception
from heat.engine.resources import autoscaling as asc
from heat.engine.resources import loadbalancer
from heat.engine.resources import instance
from heat.engine.resources.neutron import loadbalancer as neutron_lb
from heat.engine import parser
from heat.engine import resource
from heat.engine import scheduler
from heat.engine.resource import Metadata
from heat.openstack.common import timeutils
from heat.openstack.common.importutils import try_import
from heat.tests.common import HeatTestCase
from heat.tests import fakes
from heat.tests import utils
neutronclient = try_import('neutronclient.v2_0.client')
as_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "AutoScaling Test",
"Parameters" : {
"ImageId": {"Type": "String"},
"KeyName": {"Type": "String"}
},
"Resources" : {
"WebServerGroup" : {
"Type" : "AWS::AutoScaling::AutoScalingGroup",
"Properties" : {
"AvailabilityZones" : ["nova"],
"LaunchConfigurationName" : { "Ref" : "LaunchConfig" },
"MinSize" : "1",
"MaxSize" : "5",
"LoadBalancerNames" : [ { "Ref" : "ElasticLoadBalancer" } ]
}
},
"WebServerScaleUpPolicy" : {
"Type" : "AWS::AutoScaling::ScalingPolicy",
"Properties" : {
"AdjustmentType" : "ChangeInCapacity",
"AutoScalingGroupName" : { "Ref" : "WebServerGroup" },
"Cooldown" : "60",
"ScalingAdjustment" : "1"
}
},
"WebServerScaleDownPolicy" : {
"Type" : "AWS::AutoScaling::ScalingPolicy",
"Properties" : {
"AdjustmentType" : "ChangeInCapacity",
"AutoScalingGroupName" : { "Ref" : "WebServerGroup" },
"Cooldown" : "60",
"ScalingAdjustment" : "-1"
}
},
"ElasticLoadBalancer" : {
"Type" : "AWS::ElasticLoadBalancing::LoadBalancer",
"Properties" : {
"AvailabilityZones" : ["nova"],
"Listeners" : [ {
"LoadBalancerPort" : "80",
"InstancePort" : "80",
"Protocol" : "HTTP"
}]
}
},
"LaunchConfig" : {
"Type" : "AWS::AutoScaling::LaunchConfiguration",
"Properties": {
"ImageId" : {"Ref": "ImageId"},
"InstanceType" : "bar",
}
}
}
}
'''
class AutoScalingTest(HeatTestCase):
dummy_instance_id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
params = {'KeyName': 'test', 'ImageId': 'foo'}
def setUp(self):
super(AutoScalingTest, self).setUp()
utils.setup_dummy_db()
cfg.CONF.set_default('heat_waitcondition_server_url',
'http://server.test:8000/v1/waitcondition')
self.fc = fakes.FakeKeystoneClient()
def create_scaling_group(self, t, stack, resource_name):
# create the launch configuration resource
conf = stack.resources['LaunchConfig']
self.assertEqual(None, conf.validate())
scheduler.TaskRunner(conf.create)()
self.assertEqual((conf.CREATE, conf.COMPLETE), conf.state)
# create the group resource
rsrc = stack.resources[resource_name]
self.assertEqual(None, rsrc.validate())
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
return rsrc
def create_scaling_policy(self, t, stack, resource_name):
rsrc = stack.resources[resource_name]
self.assertEqual(None, rsrc.validate())
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
return rsrc
def _stub_validate(self):
self.m.StubOutWithMock(parser.Stack, 'validate')
parser.Stack.validate().MultipleTimes()
def _stub_create(self, num):
self._stub_validate()
self.m.StubOutWithMock(instance.Instance, 'handle_create')
self.m.StubOutWithMock(instance.Instance, 'check_create_complete')
cookie = object()
for x in range(num):
instance.Instance.handle_create().AndReturn(cookie)
instance.Instance.check_create_complete(cookie).AndReturn(False)
instance.Instance.check_create_complete(
cookie).MultipleTimes().AndReturn(True)
def _stub_lb_reload(self, num, unset=True, nochange=False):
expected_list = [self.dummy_instance_id] * num
if unset:
self.m.VerifyAll()
self.m.UnsetStubs()
if num > 0:
self.m.StubOutWithMock(instance.Instance, 'FnGetRefId')
instance.Instance.FnGetRefId().MultipleTimes().AndReturn(
self.dummy_instance_id)
self.m.StubOutWithMock(loadbalancer.LoadBalancer, 'handle_update')
if nochange:
loadbalancer.LoadBalancer.handle_update(
mox.IgnoreArg(), mox.IgnoreArg(), {}).AndReturn(None)
else:
loadbalancer.LoadBalancer.handle_update(
mox.IgnoreArg(), mox.IgnoreArg(),
{'Instances': expected_list}).AndReturn(None)
def _stub_meta_expected(self, now, data, nmeta=1):
# Stop time at now
self.m.StubOutWithMock(timeutils, 'utcnow')
timeutils.utcnow().MultipleTimes().AndReturn(now)
# Then set a stub to ensure the metadata update is as
# expected based on the timestamp and data
self.m.StubOutWithMock(Metadata, '__set__')
expected = {timeutils.strtime(now): data}
# Note for ScalingPolicy, we expect to get a metadata
# update for the policy and autoscaling group, so pass nmeta=2
for x in range(nmeta):
Metadata.__set__(mox.IgnoreArg(), expected).AndReturn(None)
def test_scaling_delete_empty(self):
t = template_format.parse(as_template)
properties = t['Resources']['WebServerGroup']['Properties']
properties['MinSize'] = '0'
properties['MaxSize'] = '0'
stack = utils.parse_stack(t, params=self.params)
self._stub_lb_reload(0)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
self.assertEqual(None, rsrc.FnGetAtt("InstanceList"))
rsrc.delete()
self.m.VerifyAll()
def test_scaling_adjust_down_empty(self):
t = template_format.parse(as_template)
properties = t['Resources']['WebServerGroup']['Properties']
properties['MinSize'] = '1'
properties['MaxSize'] = '1'
stack = utils.parse_stack(t, params=self.params)
self._stub_lb_reload(1)
now = timeutils.utcnow()
self._stub_meta_expected(now, 'ExactCapacity : 1')
self._stub_create(1)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
self.assertEqual(['WebServerGroup-0'], rsrc.get_instance_names())
# Reduce the min size to 0, should complete without adjusting
update_snippet = copy.deepcopy(rsrc.parsed_template())
update_snippet['Properties']['MinSize'] = '0'
scheduler.TaskRunner(rsrc.update, update_snippet)()
self.assertEqual(['WebServerGroup-0'], rsrc.get_instance_names())
# trigger adjustment to reduce to 0, there should be no more instances
self._stub_lb_reload(0)
self._stub_meta_expected(now, 'ChangeInCapacity : -1')
self.m.ReplayAll()
rsrc.adjust(-1)
self.assertEqual([], rsrc.get_instance_names())
rsrc.delete()
self.m.VerifyAll()
def test_scaling_group_update_replace(self):
t = template_format.parse(as_template)
stack = utils.parse_stack(t, params=self.params)
self._stub_lb_reload(1)
now = timeutils.utcnow()
self._stub_meta_expected(now, 'ExactCapacity : 1')
self._stub_create(1)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
self.assertEqual(utils.PhysName(stack.name, rsrc.name),
rsrc.FnGetRefId())
self.assertEqual(['WebServerGroup-0'], rsrc.get_instance_names())
update_snippet = copy.deepcopy(rsrc.parsed_template())
update_snippet['Properties']['AvailabilityZones'] = ['foo']
updater = scheduler.TaskRunner(rsrc.update, update_snippet)
self.assertRaises(resource.UpdateReplace, updater)
rsrc.delete()
self.m.VerifyAll()
def test_scaling_group_suspend(self):
t = template_format.parse(as_template)
stack = utils.parse_stack(t, params=self.params)
self._stub_lb_reload(1)
now = timeutils.utcnow()
self._stub_meta_expected(now, 'ExactCapacity : 1')
self._stub_create(1)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
self.assertEqual(utils.PhysName(stack.name, rsrc.name),
rsrc.FnGetRefId())
self.assertEqual(['WebServerGroup-0'], rsrc.get_instance_names())
self.assertEqual(rsrc.state, (rsrc.CREATE, rsrc.COMPLETE))
self.m.VerifyAll()
self.m.UnsetStubs()
self.m.StubOutWithMock(instance.Instance, 'handle_suspend')
self.m.StubOutWithMock(instance.Instance, 'check_suspend_complete')
inst_cookie = (object(), object(), object())
instance.Instance.handle_suspend().AndReturn(inst_cookie)
instance.Instance.check_suspend_complete(inst_cookie).AndReturn(False)
instance.Instance.check_suspend_complete(inst_cookie).AndReturn(True)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.suspend)()
self.assertEqual(rsrc.state, (rsrc.SUSPEND, rsrc.COMPLETE))
rsrc.delete()
self.m.VerifyAll()
def test_scaling_group_resume(self):
t = template_format.parse(as_template)
stack = utils.parse_stack(t, params=self.params)
self._stub_lb_reload(1)
now = timeutils.utcnow()
self._stub_meta_expected(now, 'ExactCapacity : 1')
self._stub_create(1)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
self.assertEqual(utils.PhysName(stack.name, rsrc.name),
rsrc.FnGetRefId())
self.assertEqual(['WebServerGroup-0'], rsrc.get_instance_names())
self.assertEqual(rsrc.state, (rsrc.CREATE, rsrc.COMPLETE))
self.m.VerifyAll()
self.m.UnsetStubs()
self.m.StubOutWithMock(instance.Instance, 'handle_resume')
self.m.StubOutWithMock(instance.Instance, 'check_resume_complete')
inst_cookie = (object(), object(), object())
instance.Instance.handle_resume().AndReturn(inst_cookie)
instance.Instance.check_resume_complete(inst_cookie).AndReturn(False)
instance.Instance.check_resume_complete(inst_cookie).AndReturn(True)
self.m.ReplayAll()
rsrc.state_set(rsrc.SUSPEND, rsrc.COMPLETE)
for i in rsrc.nested().resources.values():
i.state_set(rsrc.SUSPEND, rsrc.COMPLETE)
scheduler.TaskRunner(rsrc.resume)()
self.assertEqual(rsrc.state, (rsrc.RESUME, rsrc.COMPLETE))
rsrc.delete()
self.m.VerifyAll()
def test_scaling_group_suspend_multiple(self):
t = template_format.parse(as_template)
properties = t['Resources']['WebServerGroup']['Properties']
properties['DesiredCapacity'] = '2'
stack = utils.parse_stack(t, params=self.params)
self._stub_lb_reload(2)
now = timeutils.utcnow()
self._stub_meta_expected(now, 'ExactCapacity : 2')
self._stub_create(2)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
self.assertEqual(utils.PhysName(stack.name, rsrc.name),
rsrc.FnGetRefId())
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1'],
rsrc.get_instance_names())
self.assertEqual(rsrc.state, (rsrc.CREATE, rsrc.COMPLETE))
self.m.VerifyAll()
self.m.UnsetStubs()
self.m.StubOutWithMock(instance.Instance, 'handle_suspend')
self.m.StubOutWithMock(instance.Instance, 'check_suspend_complete')
inst_cookie1 = ('foo1', 'foo2', 'foo3')
inst_cookie2 = ('bar1', 'bar2', 'bar3')
instance.Instance.handle_suspend().InAnyOrder().AndReturn(inst_cookie1)
instance.Instance.handle_suspend().InAnyOrder().AndReturn(inst_cookie2)
instance.Instance.check_suspend_complete(inst_cookie1).InAnyOrder(
).AndReturn(True)
instance.Instance.check_suspend_complete(inst_cookie2).InAnyOrder(
).AndReturn(True)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.suspend)()
self.assertEqual(rsrc.state, (rsrc.SUSPEND, rsrc.COMPLETE))
rsrc.delete()
self.m.VerifyAll()
def test_scaling_group_resume_multiple(self):
t = template_format.parse(as_template)
properties = t['Resources']['WebServerGroup']['Properties']
properties['DesiredCapacity'] = '2'
stack = utils.parse_stack(t, params=self.params)
self._stub_lb_reload(2)
now = timeutils.utcnow()
self._stub_meta_expected(now, 'ExactCapacity : 2')
self._stub_create(2)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
self.assertEqual(utils.PhysName(stack.name, rsrc.name),
rsrc.FnGetRefId())
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1'],
rsrc.get_instance_names())
self.assertEqual(rsrc.state, (rsrc.CREATE, rsrc.COMPLETE))
self.m.VerifyAll()
self.m.UnsetStubs()
self.m.StubOutWithMock(instance.Instance, 'handle_resume')
self.m.StubOutWithMock(instance.Instance, 'check_resume_complete')
inst_cookie1 = ('foo1', 'foo2', 'foo3')
inst_cookie2 = ('bar1', 'bar2', 'bar3')
instance.Instance.handle_resume().InAnyOrder().AndReturn(inst_cookie1)
instance.Instance.handle_resume().InAnyOrder().AndReturn(inst_cookie2)
instance.Instance.check_resume_complete(inst_cookie1).InAnyOrder(
).AndReturn(True)
instance.Instance.check_resume_complete(inst_cookie2).InAnyOrder(
).AndReturn(True)
self.m.ReplayAll()
rsrc.state_set(rsrc.SUSPEND, rsrc.COMPLETE)
for i in rsrc.nested().resources.values():
i.state_set(rsrc.SUSPEND, rsrc.COMPLETE)
scheduler.TaskRunner(rsrc.resume)()
self.assertEqual(rsrc.state, (rsrc.RESUME, rsrc.COMPLETE))
rsrc.delete()
self.m.VerifyAll()
def test_scaling_group_suspend_fail(self):
t = template_format.parse(as_template)
stack = utils.parse_stack(t, params=self.params)
self._stub_lb_reload(1)
now = timeutils.utcnow()
self._stub_meta_expected(now, 'ExactCapacity : 1')
self._stub_create(1)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
self.assertEqual(utils.PhysName(stack.name, rsrc.name),
rsrc.FnGetRefId())
self.assertEqual(['WebServerGroup-0'], rsrc.get_instance_names())
self.assertEqual(rsrc.state, (rsrc.CREATE, rsrc.COMPLETE))
self.m.VerifyAll()
self.m.UnsetStubs()
self.m.StubOutWithMock(instance.Instance, 'handle_suspend')
self.m.StubOutWithMock(instance.Instance, 'check_suspend_complete')
instance.Instance.handle_suspend().AndRaise(Exception('oops'))
self.m.ReplayAll()
sus_task = scheduler.TaskRunner(rsrc.suspend)
self.assertRaises(exception.ResourceFailure, sus_task, ())
self.assertEqual(rsrc.state, (rsrc.SUSPEND, rsrc.FAILED))
self.assertEqual(rsrc.status_reason,
'Error: Resource suspend failed: Exception: oops')
rsrc.delete()
self.m.VerifyAll()
def test_scaling_group_resume_fail(self):
t = template_format.parse(as_template)
stack = utils.parse_stack(t, params=self.params)
self._stub_lb_reload(1)
now = timeutils.utcnow()
self._stub_meta_expected(now, 'ExactCapacity : 1')
self._stub_create(1)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
self.assertEqual(utils.PhysName(stack.name, rsrc.name),
rsrc.FnGetRefId())
self.assertEqual(['WebServerGroup-0'], rsrc.get_instance_names())
self.assertEqual(rsrc.state, (rsrc.CREATE, rsrc.COMPLETE))
self.m.VerifyAll()
self.m.UnsetStubs()
self.m.StubOutWithMock(instance.Instance, 'handle_resume')
self.m.StubOutWithMock(instance.Instance, 'check_resume_complete')
instance.Instance.handle_resume().AndRaise(Exception('oops'))
self.m.ReplayAll()
rsrc.state_set(rsrc.SUSPEND, rsrc.COMPLETE)
for i in rsrc.nested().resources.values():
i.state_set(rsrc.SUSPEND, rsrc.COMPLETE)
sus_task = scheduler.TaskRunner(rsrc.resume)
self.assertRaises(exception.ResourceFailure, sus_task, ())
self.assertEqual(rsrc.state, (rsrc.RESUME, rsrc.FAILED))
self.assertEqual(rsrc.status_reason,
'Error: Resource resume failed: Exception: oops')
rsrc.delete()
self.m.VerifyAll()
def test_scaling_group_create_error(self):
t = template_format.parse(as_template)
stack = utils.parse_stack(t, params=self.params)
self._stub_validate()
self.m.StubOutWithMock(instance.Instance, 'handle_create')
self.m.StubOutWithMock(instance.Instance, 'check_create_complete')
instance.Instance.handle_create().AndRaise(Exception)
self.m.ReplayAll()
conf = stack.resources['LaunchConfig']
self.assertEqual(None, conf.validate())
scheduler.TaskRunner(conf.create)()
self.assertEqual((conf.CREATE, conf.COMPLETE), conf.state)
rsrc = stack.resources['WebServerGroup']
self.assertEqual(None, rsrc.validate())
self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(rsrc.create))
self.assertEqual((rsrc.CREATE, rsrc.FAILED), rsrc.state)
self.assertEqual([], rsrc.get_instance_names())
self.m.VerifyAll()
def test_scaling_group_update_ok_maxsize(self):
t = template_format.parse(as_template)
properties = t['Resources']['WebServerGroup']['Properties']
properties['MinSize'] = '1'
properties['MaxSize'] = '3'
stack = utils.parse_stack(t, params=self.params)
self._stub_lb_reload(1)
now = timeutils.utcnow()
self._stub_meta_expected(now, 'ExactCapacity : 1')
self._stub_create(1)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
self.assertEqual(['WebServerGroup-0'], rsrc.get_instance_names())
# Reduce the max size to 2, should complete without adjusting
update_snippet = copy.deepcopy(rsrc.parsed_template())
update_snippet['Properties']['MaxSize'] = '2'
scheduler.TaskRunner(rsrc.update, update_snippet)()
self.assertEqual(['WebServerGroup-0'], rsrc.get_instance_names())
self.assertEqual('2', rsrc.properties['MaxSize'])
rsrc.delete()
self.m.VerifyAll()
def test_scaling_group_update_ok_minsize(self):
t = template_format.parse(as_template)
properties = t['Resources']['WebServerGroup']['Properties']
properties['MinSize'] = '1'
properties['MaxSize'] = '3'
stack = utils.parse_stack(t, params=self.params)
self._stub_lb_reload(1)
now = timeutils.utcnow()
self._stub_meta_expected(now, 'ExactCapacity : 1')
self._stub_create(1)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
self.assertEqual(['WebServerGroup-0'], rsrc.get_instance_names())
# Increase min size to 2, should trigger an ExactCapacity adjust
self._stub_lb_reload(2)
self._stub_meta_expected(now, 'ExactCapacity : 2')
self._stub_create(1)
self.m.ReplayAll()
update_snippet = copy.deepcopy(rsrc.parsed_template())
update_snippet['Properties']['MinSize'] = '2'
scheduler.TaskRunner(rsrc.update, update_snippet)()
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1'],
rsrc.get_instance_names())
self.assertEqual('2', rsrc.properties['MinSize'])
rsrc.delete()
self.m.VerifyAll()
def test_scaling_group_update_ok_desired(self):
t = template_format.parse(as_template)
properties = t['Resources']['WebServerGroup']['Properties']
properties['MinSize'] = '1'
properties['MaxSize'] = '3'
stack = utils.parse_stack(t, params=self.params)
self._stub_lb_reload(1)
now = timeutils.utcnow()
self._stub_meta_expected(now, 'ExactCapacity : 1')
self._stub_create(1)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
self.assertEqual(['WebServerGroup-0'], rsrc.get_instance_names())
# Increase min size to 2 via DesiredCapacity, should adjust
self._stub_lb_reload(2)
self._stub_meta_expected(now, 'ExactCapacity : 2')
self._stub_create(1)
self.m.ReplayAll()
update_snippet = copy.deepcopy(rsrc.parsed_template())
update_snippet['Properties']['DesiredCapacity'] = '2'
scheduler.TaskRunner(rsrc.update, update_snippet)()
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1'],
rsrc.get_instance_names())
self.assertEqual('2', rsrc.properties['DesiredCapacity'])
rsrc.delete()
self.m.VerifyAll()
def test_scaling_group_update_ok_desired_remove(self):
t = template_format.parse(as_template)
properties = t['Resources']['WebServerGroup']['Properties']
properties['DesiredCapacity'] = '2'
stack = utils.parse_stack(t, params=self.params)
self._stub_lb_reload(2)
now = timeutils.utcnow()
self._stub_meta_expected(now, 'ExactCapacity : 2')
self._stub_create(2)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1'],
rsrc.get_instance_names())
# Remove DesiredCapacity from the updated template, which should
# have no effect, it's an optional parameter
update_snippet = copy.deepcopy(rsrc.parsed_template())
del(update_snippet['Properties']['DesiredCapacity'])
scheduler.TaskRunner(rsrc.update, update_snippet)()
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1'],
rsrc.get_instance_names())
self.assertEqual(None, rsrc.properties['DesiredCapacity'])
rsrc.delete()
self.m.VerifyAll()
def test_scaling_group_update_ok_cooldown(self):
t = template_format.parse(as_template)
properties = t['Resources']['WebServerGroup']['Properties']
properties['Cooldown'] = '60'
stack = utils.parse_stack(t, params=self.params)
self._stub_lb_reload(1)
now = timeutils.utcnow()
self._stub_meta_expected(now, 'ExactCapacity : 1')
self._stub_create(1)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
self.assertEqual(utils.PhysName(stack.name, rsrc.name),
rsrc.FnGetRefId())
self.assertEqual(['WebServerGroup-0'], rsrc.get_instance_names())
update_snippet = copy.deepcopy(rsrc.parsed_template())
update_snippet['Properties']['Cooldown'] = '61'
scheduler.TaskRunner(rsrc.update, update_snippet)()
self.assertEqual('61', rsrc.properties['Cooldown'])
rsrc.delete()
self.m.VerifyAll()
def test_lb_reload_static_resolve(self):
t = template_format.parse(as_template)
properties = t['Resources']['ElasticLoadBalancer']['Properties']
properties['AvailabilityZones'] = {'Fn::GetAZs': ''}
self.m.StubOutWithMock(parser.Stack, 'get_availability_zones')
parser.Stack.get_availability_zones().MultipleTimes().AndReturn(
['abc', 'xyz'])
# Check that the Fn::GetAZs is correctly resolved
expected = {u'Type': u'AWS::ElasticLoadBalancing::LoadBalancer',
u'Properties': {'Instances': ['WebServerGroup-0'],
u'Listeners': [{u'InstancePort': u'80',
u'LoadBalancerPort': u'80',
u'Protocol': u'HTTP'}],
u'AvailabilityZones': ['abc', 'xyz']}}
now = timeutils.utcnow()
self._stub_meta_expected(now, 'ExactCapacity : 1')
self._stub_create(1)
self.m.ReplayAll()
stack = utils.parse_stack(t, params=self.params)
lb = stack['ElasticLoadBalancer']
self.m.StubOutWithMock(lb, 'handle_update')
lb.handle_update(expected,
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(None)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
self.assertEqual(utils.PhysName(stack.name, rsrc.name),
rsrc.FnGetRefId())
self.assertEqual(['WebServerGroup-0'], rsrc.get_instance_names())
update_snippet = copy.deepcopy(rsrc.parsed_template())
update_snippet['Properties']['Cooldown'] = '61'
scheduler.TaskRunner(rsrc.update, update_snippet)()
rsrc.delete()
self.m.VerifyAll()
@skipIf(neutronclient is None, 'neutronclient unavailable')
def test_lb_reload_members(self):
t = template_format.parse(as_template)
t['Resources']['ElasticLoadBalancer'] = {
'Type': 'OS::Neutron::LoadBalancer',
'Properties': {
'protocol_port': 8080,
'pool_id': 'pool123'
}
}
expected = {
'Type': 'OS::Neutron::LoadBalancer',
'Properties': {
'protocol_port': 8080,
'pool_id': 'pool123',
'members': [u'WebServerGroup-0']}
}
self.m.StubOutWithMock(neutron_lb.LoadBalancer, 'handle_update')
neutron_lb.LoadBalancer.handle_update(expected,
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(None)
now = timeutils.utcnow()
self._stub_meta_expected(now, 'ExactCapacity : 1')
self._stub_create(1)
self.m.ReplayAll()
stack = utils.parse_stack(t, params=self.params)
self.create_scaling_group(t, stack, 'WebServerGroup')
self.m.VerifyAll()
@skipIf(neutronclient is None, 'neutronclient unavailable')
def test_lb_reload_invalid_resource(self):
t = template_format.parse(as_template)
t['Resources']['ElasticLoadBalancer'] = {
'Type': 'AWS::EC2::Volume',
'Properties': {
'AvailabilityZone': 'nova'
}
}
self._stub_create(1)
self.m.ReplayAll()
stack = utils.parse_stack(t, params=self.params)
error = self.assertRaises(
exception.ResourceFailure,
self.create_scaling_group, t, stack, 'WebServerGroup')
self.assertEqual(
"Error: Unsupported resource 'ElasticLoadBalancer' in "
"LoadBalancerNames",
str(error))
self.m.VerifyAll()
def test_scaling_group_adjust(self):
t = template_format.parse(as_template)
stack = utils.parse_stack(t, params=self.params)
# start with 3
properties = t['Resources']['WebServerGroup']['Properties']
properties['DesiredCapacity'] = '3'
self._stub_lb_reload(3)
now = timeutils.utcnow()
self._stub_meta_expected(now, 'ExactCapacity : 3')
self._stub_create(3)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1',
'WebServerGroup-2'],
rsrc.get_instance_names())
# reduce to 1
self._stub_lb_reload(1)
self._stub_validate()
self._stub_meta_expected(now, 'ChangeInCapacity : -2')
self.m.ReplayAll()
rsrc.adjust(-2)
self.assertEqual(['WebServerGroup-0'], rsrc.get_instance_names())
# raise to 3
self._stub_lb_reload(3)
self._stub_meta_expected(now, 'ChangeInCapacity : 2')
self._stub_create(2)
self.m.ReplayAll()
rsrc.adjust(2)
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1',
'WebServerGroup-2'],
rsrc.get_instance_names())
# set to 2
self._stub_lb_reload(2)
self._stub_validate()
self._stub_meta_expected(now, 'ExactCapacity : 2')
self.m.ReplayAll()
rsrc.adjust(2, 'ExactCapacity')
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1'],
rsrc.get_instance_names())
self.m.VerifyAll()
def test_scaling_group_scale_up_failure(self):
t = template_format.parse(as_template)
stack = utils.parse_stack(t, params=self.params)
# Create initial group
self._stub_lb_reload(1)
now = timeutils.utcnow()
self._stub_meta_expected(now, 'ExactCapacity : 1')
self._stub_create(1)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
self.assertEqual(['WebServerGroup-0'], rsrc.get_instance_names())
self.m.VerifyAll()
self.m.UnsetStubs()
# Scale up one 1 instance with resource failure
self.m.StubOutWithMock(instance.Instance, 'handle_create')
instance.Instance.handle_create().AndRaise(exception.Error())
self._stub_lb_reload(1, unset=False, nochange=True)
self._stub_validate()
self.m.ReplayAll()
self.assertRaises(exception.Error, rsrc.adjust, 1)
self.assertEqual(['WebServerGroup-0'], rsrc.get_instance_names())
self.m.VerifyAll()
def test_scaling_group_truncate_adjustment(self):
t = template_format.parse(as_template)
stack = utils.parse_stack(t, params=self.params)
# Create initial group, 2 instances
properties = t['Resources']['WebServerGroup']['Properties']
properties['DesiredCapacity'] = '2'
self._stub_lb_reload(2)
now = timeutils.utcnow()
self._stub_meta_expected(now, 'ExactCapacity : 2')
self._stub_create(2)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
stack.resources['WebServerGroup'] = rsrc
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1'],
rsrc.get_instance_names())
# raise above the max
self._stub_lb_reload(5)
self._stub_meta_expected(now, 'ChangeInCapacity : 4')
self._stub_create(3)
self.m.ReplayAll()
rsrc.adjust(4)
self._verify_instance_names(rsrc, 5)
# lower below the min
self._stub_lb_reload(1)
self._stub_validate()
self._stub_meta_expected(now, 'ChangeInCapacity : -5')
self.m.ReplayAll()
rsrc.adjust(-5)
self._verify_instance_names(rsrc, 1)
# no change
rsrc.adjust(0)
self._verify_instance_names(rsrc, 1)
rsrc.delete()
self.m.VerifyAll()
def _verify_instance_names(self, group, bound):
instance_names = group.get_instance_names()
self.assertEqual(len(instance_names), bound)
for i in xrange(bound):
self.assertEqual('WebServerGroup-%d' % i, instance_names[i])
def _do_test_scaling_group_percent(self, decrease, lowest,
increase, create, highest):
t = template_format.parse(as_template)
stack = utils.parse_stack(t, params=self.params)
# Create initial group, 2 instances
properties = t['Resources']['WebServerGroup']['Properties']
properties['DesiredCapacity'] = '2'
self._stub_lb_reload(2)
self._stub_create(2)
now = timeutils.utcnow()
self._stub_meta_expected(now, 'ExactCapacity : 2')
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
stack.resources['WebServerGroup'] = rsrc
self._verify_instance_names(rsrc, 2)
# reduce by decrease %
self._stub_lb_reload(lowest)
adjust = 'PercentChangeInCapacity : %d' % decrease
self._stub_meta_expected(now, adjust)
self._stub_validate()
self.m.ReplayAll()
rsrc.adjust(decrease, 'PercentChangeInCapacity')
self._verify_instance_names(rsrc, lowest)
# raise by increase %
self._stub_lb_reload(highest)
adjust = 'PercentChangeInCapacity : %d' % increase
self._stub_meta_expected(now, adjust)
self._stub_create(create)
self.m.ReplayAll()
rsrc.adjust(increase, 'PercentChangeInCapacity')
self._verify_instance_names(rsrc, highest)
rsrc.delete()
def test_scaling_group_percent(self):
self._do_test_scaling_group_percent(-50, 1, 200, 2, 3)
def test_scaling_group_percent_round_up(self):
self._do_test_scaling_group_percent(-33, 1, 33, 1, 2)
def test_scaling_group_percent_round_down(self):
self._do_test_scaling_group_percent(-66, 1, 225, 2, 3)
def test_scaling_group_cooldown_toosoon(self):
t = template_format.parse(as_template)
stack = utils.parse_stack(t, params=self.params)
# Create initial group, 2 instances, Cooldown 60s
properties = t['Resources']['WebServerGroup']['Properties']
properties['DesiredCapacity'] = '2'
properties['Cooldown'] = '60'
self._stub_lb_reload(2)
now = timeutils.utcnow()
self._stub_meta_expected(now, 'ExactCapacity : 2')
self._stub_create(2)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
stack.resources['WebServerGroup'] = rsrc
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1'],
rsrc.get_instance_names())
# reduce by 50%
self._stub_lb_reload(1)
self._stub_validate()
self._stub_meta_expected(now, 'PercentChangeInCapacity : -50')
self.m.ReplayAll()
rsrc.adjust(-50, 'PercentChangeInCapacity')
self.assertEqual(['WebServerGroup-0'],
rsrc.get_instance_names())
# Now move time on 10 seconds - Cooldown in template is 60
# so this should not update the policy metadata, and the
# scaling group instances should be unchanged
# Note we have to stub Metadata.__get__ since up_policy isn't
# stored in the DB (because the stack hasn't really been created)
previous_meta = {timeutils.strtime(now):
'PercentChangeInCapacity : -50'}
self.m.VerifyAll()
self.m.UnsetStubs()
now = now + datetime.timedelta(seconds=10)
self.m.StubOutWithMock(timeutils, 'utcnow')
timeutils.utcnow().MultipleTimes().AndReturn(now)
self.m.StubOutWithMock(Metadata, '__get__')
Metadata.__get__(mox.IgnoreArg(), rsrc, mox.IgnoreArg()
).AndReturn(previous_meta)
self.m.ReplayAll()
# raise by 200%, too soon for Cooldown so there should be no change
rsrc.adjust(200, 'PercentChangeInCapacity')
self.assertEqual(['WebServerGroup-0'], rsrc.get_instance_names())
rsrc.delete()
def test_scaling_group_cooldown_ok(self):
t = template_format.parse(as_template)
stack = utils.parse_stack(t, params=self.params)
# Create initial group, 2 instances, Cooldown 60s
properties = t['Resources']['WebServerGroup']['Properties']
properties['DesiredCapacity'] = '2'
properties['Cooldown'] = '60'
self._stub_lb_reload(2)
self._stub_create(2)
now = timeutils.utcnow()
self._stub_meta_expected(now, 'ExactCapacity : 2')
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
stack.resources['WebServerGroup'] = rsrc
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1'],
rsrc.get_instance_names())
# reduce by 50%
self._stub_lb_reload(1)
self._stub_validate()
self._stub_meta_expected(now, 'PercentChangeInCapacity : -50')
self.m.ReplayAll()
rsrc.adjust(-50, 'PercentChangeInCapacity')
self.assertEqual(['WebServerGroup-0'],
rsrc.get_instance_names())
# Now move time on 61 seconds - Cooldown in template is 60
# so this should update the policy metadata, and the
# scaling group instances updated
previous_meta = {timeutils.strtime(now):
'PercentChangeInCapacity : -50'}
self.m.VerifyAll()
self.m.UnsetStubs()
now = now + datetime.timedelta(seconds=61)
self.m.StubOutWithMock(Metadata, '__get__')
Metadata.__get__(mox.IgnoreArg(), rsrc, mox.IgnoreArg()
).AndReturn(previous_meta)
#stub for the metadata accesses while creating the two instances
Metadata.__get__(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
Metadata.__get__(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
# raise by 200%, should work
self._stub_lb_reload(3, unset=False)
self._stub_create(2)
self._stub_meta_expected(now, 'PercentChangeInCapacity : 200')
self.m.ReplayAll()
rsrc.adjust(200, 'PercentChangeInCapacity')
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1',
'WebServerGroup-2'],
rsrc.get_instance_names())
rsrc.delete()
def test_scaling_group_cooldown_zero(self):
t = template_format.parse(as_template)
stack = utils.parse_stack(t, params=self.params)
# Create initial group, 2 instances, Cooldown 0
properties = t['Resources']['WebServerGroup']['Properties']
properties['DesiredCapacity'] = '2'
properties['Cooldown'] = '0'
self._stub_lb_reload(2)
now = timeutils.utcnow()
self._stub_meta_expected(now, 'ExactCapacity : 2')
self._stub_create(2)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
stack.resources['WebServerGroup'] = rsrc
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1'],
rsrc.get_instance_names())
# reduce by 50%
self._stub_lb_reload(1)
self._stub_meta_expected(now, 'PercentChangeInCapacity : -50')
self._stub_validate()
self.m.ReplayAll()
rsrc.adjust(-50, 'PercentChangeInCapacity')
self.assertEqual(['WebServerGroup-0'],
rsrc.get_instance_names())
# Don't move time, since cooldown is zero, it should work
previous_meta = {timeutils.strtime(now):
'PercentChangeInCapacity : -50'}
self.m.VerifyAll()
self.m.UnsetStubs()
self.m.StubOutWithMock(Metadata, '__get__')
Metadata.__get__(mox.IgnoreArg(), rsrc, mox.IgnoreArg()
).AndReturn(previous_meta)
#stub for the metadata accesses while creating the two instances
Metadata.__get__(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
Metadata.__get__(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
# raise by 200%, should work
self._stub_lb_reload(3, unset=False)
self._stub_meta_expected(now, 'PercentChangeInCapacity : 200')
self._stub_create(2)
self.m.ReplayAll()
rsrc.adjust(200, 'PercentChangeInCapacity')
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1',
'WebServerGroup-2'],
rsrc.get_instance_names())
rsrc.delete()
self.m.VerifyAll()
def test_scaling_policy_up(self):
t = template_format.parse(as_template)
stack = utils.parse_stack(t, params=self.params)
# Create initial group
self._stub_lb_reload(1)
now = timeutils.utcnow()
self._stub_meta_expected(now, 'ExactCapacity : 1')
self._stub_create(1)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
stack.resources['WebServerGroup'] = rsrc
self.assertEqual(['WebServerGroup-0'], rsrc.get_instance_names())
# Scale up one
self._stub_lb_reload(2)
self._stub_meta_expected(now, 'ChangeInCapacity : 1', 2)
self._stub_create(1)
self.m.StubOutWithMock(asc.ScalingPolicy, 'keystone')
asc.ScalingPolicy.keystone().MultipleTimes().AndReturn(
self.fc)
self.m.ReplayAll()
up_policy = self.create_scaling_policy(t, stack,
'WebServerScaleUpPolicy')
alarm_url = up_policy.FnGetAtt('AlarmUrl')
self.assertNotEqual(None, alarm_url)
up_policy.signal()
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1'],
rsrc.get_instance_names())
rsrc.delete()
self.m.VerifyAll()
def test_scaling_policy_down(self):
t = template_format.parse(as_template)
stack = utils.parse_stack(t, params=self.params)
# Create initial group, 2 instances
properties = t['Resources']['WebServerGroup']['Properties']
properties['DesiredCapacity'] = '2'
self._stub_lb_reload(2)
now = timeutils.utcnow()
self._stub_meta_expected(now, 'ExactCapacity : 2')
self._stub_create(2)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
stack.resources['WebServerGroup'] = rsrc
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1'],
rsrc.get_instance_names())
# Scale down one
self._stub_lb_reload(1)
self._stub_validate()
self._stub_meta_expected(now, 'ChangeInCapacity : -1', 2)
self.m.StubOutWithMock(asc.ScalingPolicy, 'keystone')
asc.ScalingPolicy.keystone().MultipleTimes().AndReturn(
self.fc)
self.m.ReplayAll()
down_policy = self.create_scaling_policy(t, stack,
'WebServerScaleDownPolicy')
down_policy.signal()
self.assertEqual(['WebServerGroup-0'], rsrc.get_instance_names())
rsrc.delete()
self.m.VerifyAll()
def test_scaling_policy_cooldown_toosoon(self):
t = template_format.parse(as_template)
stack = utils.parse_stack(t, params=self.params)
# Create initial group
self._stub_lb_reload(1)
now = timeutils.utcnow()
self._stub_meta_expected(now, 'ExactCapacity : 1')
self._stub_create(1)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
stack.resources['WebServerGroup'] = rsrc
self.assertEqual(['WebServerGroup-0'], rsrc.get_instance_names())
# Scale up one
self._stub_lb_reload(2)
self._stub_meta_expected(now, 'ChangeInCapacity : 1', 2)
self._stub_create(1)
self.m.StubOutWithMock(asc.ScalingPolicy, 'keystone')
asc.ScalingPolicy.keystone().MultipleTimes().AndReturn(
self.fc)
self.m.ReplayAll()
up_policy = self.create_scaling_policy(t, stack,
'WebServerScaleUpPolicy')
up_policy.signal()
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1'],
rsrc.get_instance_names())
# Now move time on 10 seconds - Cooldown in template is 60
# so this should not update the policy metadata, and the
# scaling group instances should be unchanged
# Note we have to stub Metadata.__get__ since up_policy isn't
# stored in the DB (because the stack hasn't really been created)
previous_meta = {timeutils.strtime(now): 'ChangeInCapacity : 1'}
self.m.VerifyAll()
self.m.UnsetStubs()
now = now + datetime.timedelta(seconds=10)
self.m.StubOutWithMock(timeutils, 'utcnow')
timeutils.utcnow().MultipleTimes().AndReturn(now)
self.m.StubOutWithMock(Metadata, '__get__')
Metadata.__get__(mox.IgnoreArg(), up_policy, mox.IgnoreArg()
).AndReturn(previous_meta)
self.m.ReplayAll()
up_policy.signal()
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1'],
rsrc.get_instance_names())
rsrc.delete()
self.m.VerifyAll()
def test_scaling_policy_cooldown_ok(self):
t = template_format.parse(as_template)
stack = utils.parse_stack(t, params=self.params)
# Create initial group
self._stub_lb_reload(1)
now = timeutils.utcnow()
self._stub_meta_expected(now, 'ExactCapacity : 1')
self._stub_create(1)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
stack.resources['WebServerGroup'] = rsrc
self.assertEqual(['WebServerGroup-0'], rsrc.get_instance_names())
# Scale up one
self._stub_lb_reload(2)
self._stub_meta_expected(now, 'ChangeInCapacity : 1', 2)
self._stub_create(1)
self.m.StubOutWithMock(asc.ScalingPolicy, 'keystone')
asc.ScalingPolicy.keystone().MultipleTimes().AndReturn(
self.fc)
self.m.ReplayAll()
up_policy = self.create_scaling_policy(t, stack,
'WebServerScaleUpPolicy')
up_policy.signal()
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1'],
rsrc.get_instance_names())
# Now move time on 61 seconds - Cooldown in template is 60
# so this should trigger a scale-up
previous_meta = {timeutils.strtime(now): 'ChangeInCapacity : 1'}
self.m.VerifyAll()
self.m.UnsetStubs()
self.m.StubOutWithMock(Metadata, '__get__')
Metadata.__get__(mox.IgnoreArg(), up_policy, mox.IgnoreArg()
).AndReturn(previous_meta)
Metadata.__get__(mox.IgnoreArg(), rsrc, mox.IgnoreArg()
).AndReturn(previous_meta)
#stub for the metadata accesses while creating the additional instance
Metadata.__get__(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
now = now + datetime.timedelta(seconds=61)
self._stub_lb_reload(3, unset=False)
self._stub_meta_expected(now, 'ChangeInCapacity : 1', 2)
self._stub_create(1)
self.m.ReplayAll()
up_policy.signal()
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1',
'WebServerGroup-2'],
rsrc.get_instance_names())
rsrc.delete()
self.m.VerifyAll()
def test_scaling_policy_cooldown_zero(self):
t = template_format.parse(as_template)
stack = utils.parse_stack(t, params=self.params)
# Create initial group
self._stub_lb_reload(1)
now = timeutils.utcnow()
self._stub_meta_expected(now, 'ExactCapacity : 1')
self._stub_create(1)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
stack.resources['WebServerGroup'] = rsrc
self.assertEqual(['WebServerGroup-0'], rsrc.get_instance_names())
# Create the scaling policy (with Cooldown=0) and scale up one
properties = t['Resources']['WebServerScaleUpPolicy']['Properties']
properties['Cooldown'] = '0'
self._stub_lb_reload(2)
self._stub_meta_expected(now, 'ChangeInCapacity : 1', 2)
self._stub_create(1)
self.m.StubOutWithMock(asc.ScalingPolicy, 'keystone')
asc.ScalingPolicy.keystone().MultipleTimes().AndReturn(
self.fc)
self.m.ReplayAll()
up_policy = self.create_scaling_policy(t, stack,
'WebServerScaleUpPolicy')
up_policy.signal()
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1'],
rsrc.get_instance_names())
# Now trigger another scale-up without changing time, should work
previous_meta = {timeutils.strtime(now): 'ChangeInCapacity : 1'}
self.m.VerifyAll()
self.m.UnsetStubs()
self.m.StubOutWithMock(Metadata, '__get__')
Metadata.__get__(mox.IgnoreArg(), up_policy, mox.IgnoreArg()
).AndReturn(previous_meta)
Metadata.__get__(mox.IgnoreArg(), rsrc, mox.IgnoreArg()
).AndReturn(previous_meta)
#stub for the metadata accesses while creating the additional instance
Metadata.__get__(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
self._stub_lb_reload(3, unset=False)
self._stub_meta_expected(now, 'ChangeInCapacity : 1', 2)
self._stub_create(1)
self.m.ReplayAll()
up_policy.signal()
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1',
'WebServerGroup-2'],
rsrc.get_instance_names())
rsrc.delete()
self.m.VerifyAll()
def test_scaling_policy_cooldown_none(self):
t = template_format.parse(as_template)
stack = utils.parse_stack(t, params=self.params)
# Create initial group
self._stub_lb_reload(1)
now = timeutils.utcnow()
self._stub_meta_expected(now, 'ExactCapacity : 1')
self._stub_create(1)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
stack.resources['WebServerGroup'] = rsrc
self.assertEqual(['WebServerGroup-0'], rsrc.get_instance_names())
# Create the scaling policy no Cooldown property, should behave the
# same as when Cooldown==0
properties = t['Resources']['WebServerScaleUpPolicy']['Properties']
del(properties['Cooldown'])
self._stub_lb_reload(2)
now = timeutils.utcnow()
self._stub_meta_expected(now, 'ChangeInCapacity : 1', 2)
self._stub_create(1)
self.m.StubOutWithMock(asc.ScalingPolicy, 'keystone')
asc.ScalingPolicy.keystone().MultipleTimes().AndReturn(
self.fc)
self.m.ReplayAll()
up_policy = self.create_scaling_policy(t, stack,
'WebServerScaleUpPolicy')
up_policy.signal()
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1'],
rsrc.get_instance_names())
# Now trigger another scale-up without changing time, should work
previous_meta = {timeutils.strtime(now): 'ChangeInCapacity : 1'}
self.m.VerifyAll()
self.m.UnsetStubs()
self.m.StubOutWithMock(Metadata, '__get__')
Metadata.__get__(mox.IgnoreArg(), up_policy, mox.IgnoreArg()
).AndReturn(previous_meta)
Metadata.__get__(mox.IgnoreArg(), rsrc, mox.IgnoreArg()
).AndReturn(previous_meta)
#stub for the metadata accesses while creating the addtional instance
Metadata.__get__(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
self._stub_lb_reload(3, unset=False)
self._stub_meta_expected(now, 'ChangeInCapacity : 1', 2)
self._stub_create(1)
self.m.ReplayAll()
up_policy.signal()
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1',
'WebServerGroup-2'],
rsrc.get_instance_names())
rsrc.delete()
self.m.VerifyAll()
def test_scaling_policy_update(self):
t = template_format.parse(as_template)
stack = utils.parse_stack(t, params=self.params)
# Create initial group
self._stub_lb_reload(1)
now = timeutils.utcnow()
self._stub_meta_expected(now, 'ExactCapacity : 1')
self._stub_create(1)
self.m.StubOutWithMock(asc.ScalingPolicy, 'keystone')
asc.ScalingPolicy.keystone().MultipleTimes().AndReturn(
self.fc)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
stack.resources['WebServerGroup'] = rsrc
self.assertEqual(['WebServerGroup-0'], rsrc.get_instance_names())
# Create initial scaling policy
up_policy = self.create_scaling_policy(t, stack,
'WebServerScaleUpPolicy')
# Scale up one
self._stub_lb_reload(2)
self._stub_meta_expected(now, 'ChangeInCapacity : 1', 2)
self._stub_create(1)
self.m.StubOutWithMock(asc.ScalingPolicy, 'keystone')
asc.ScalingPolicy.keystone().MultipleTimes().AndReturn(
self.fc)
self.m.ReplayAll()
# Trigger alarm
up_policy.signal()
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1'],
rsrc.get_instance_names())
# Update scaling policy
update_snippet = copy.deepcopy(up_policy.parsed_template())
update_snippet['Properties']['ScalingAdjustment'] = '2'
scheduler.TaskRunner(up_policy.update, update_snippet)()
self.assertEqual('2',
up_policy.properties['ScalingAdjustment'])
# Now move time on 61 seconds - Cooldown in template is 60
# so this should trigger a scale-up
previous_meta = {timeutils.strtime(now): 'ChangeInCapacity : 1'}
self.m.VerifyAll()
self.m.UnsetStubs()
self.m.StubOutWithMock(Metadata, '__get__')
Metadata.__get__(mox.IgnoreArg(), up_policy, mox.IgnoreArg()
).AndReturn(previous_meta)
Metadata.__get__(mox.IgnoreArg(), rsrc, mox.IgnoreArg()
).AndReturn(previous_meta)
#stub for the metadata accesses while creating the two instances
Metadata.__get__(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
Metadata.__get__(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
now = now + datetime.timedelta(seconds=61)
self._stub_lb_reload(4, unset=False)
self._stub_meta_expected(now, 'ChangeInCapacity : 2', 2)
self._stub_create(2)
self.m.ReplayAll()
# Trigger alarm
up_policy.signal()
self.assertEqual(['WebServerGroup-0', 'WebServerGroup-1',
'WebServerGroup-2', 'WebServerGroup-3'],
rsrc.get_instance_names())
rsrc.delete()
self.m.VerifyAll()
def test_vpc_zone_identifier(self):
t = template_format.parse(as_template)
properties = t['Resources']['WebServerGroup']['Properties']
properties['VPCZoneIdentifier'] = ['xxxx']
stack = utils.parse_stack(t, params=self.params)
self._stub_lb_reload(1)
now = timeutils.utcnow()
self._stub_meta_expected(now, 'ExactCapacity : 1')
self._stub_create(1)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
instances = rsrc.get_instances()
self.assertEqual(1, len(instances))
self.assertEqual('xxxx', instances[0].properties['SubnetId'])
rsrc.delete()
self.m.VerifyAll()
def test_invalid_vpc_zone_identifier(self):
t = template_format.parse(as_template)
properties = t['Resources']['WebServerGroup']['Properties']
properties['VPCZoneIdentifier'] = ['xxxx', 'yyyy']
stack = utils.parse_stack(t, params=self.params)
self.assertRaises(exception.NotSupported, self.create_scaling_group, t,
stack, 'WebServerGroup')
|
spennihana/h2o-3 | refs/heads/master | h2o-py/tests/testdir_munging/pyunit_entropy.py | 7 | import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
def entropy_check():
for parse_type in ('string', 'enum'):
frame = h2o.H2OFrame.from_python(["redrum"], column_types=[parse_type])
g = frame.entropy()
assert abs(g[0,0] - 2.25162916739) < 1e-6
#test NA values
string = h2o.H2OFrame.from_python([["nothing"],["NA"]], column_types=['string'], na_strings=["NA"])
enum = h2o.H2OFrame.from_python([["nothing"],["NA"]], column_types=['enum'], na_strings=["NA"])
assert ((string.entropy().isna()) == h2o.H2OFrame([[0],[1]])).all()
assert ((enum.entropy().isna()) == h2o.H2OFrame([[0],[1]])).all()
# #test empty strings
string = h2o.H2OFrame.from_python([''], column_types=['string'])
enum = h2o.H2OFrame.from_python([''], column_types=['enum'])
assert string.entropy()[0,0] == 0
assert enum.entropy()[0,0] == 0
if __name__ == "__main__":
pyunit_utils.standalone_test(entropy_check)
else:
entropy_check()
|
40223144/2015cdafinal | refs/heads/master | static/Brython3.1.0-20150301-090019/Lib/_socket.py | 742 | """Implementation module for socket operations.
See the socket module for documentation."""
AF_APPLETALK = 16
AF_DECnet = 12
AF_INET = 2
AF_INET6 = 23
AF_IPX = 6
AF_IRDA = 26
AF_SNA = 11
AF_UNSPEC = 0
AI_ADDRCONFIG = 1024
AI_ALL = 256
AI_CANONNAME = 2
AI_NUMERICHOST = 4
AI_NUMERICSERV = 8
AI_PASSIVE = 1
AI_V4MAPPED = 2048
CAPI = '<capsule object "_socket.CAPI" at 0x00BC4F38>'
EAI_AGAIN = 11002
EAI_BADFLAGS = 10022
EAI_FAIL = 11003
EAI_FAMILY = 10047
EAI_MEMORY = 8
EAI_NODATA = 11001
EAI_NONAME = 11001
EAI_SERVICE = 10109
EAI_SOCKTYPE = 10044
INADDR_ALLHOSTS_GROUP = -536870911
INADDR_ANY = 0
INADDR_BROADCAST = -1
INADDR_LOOPBACK = 2130706433
INADDR_MAX_LOCAL_GROUP = -536870657
INADDR_NONE = -1
INADDR_UNSPEC_GROUP = -536870912
IPPORT_RESERVED = 1024
IPPORT_USERRESERVED = 5000
IPPROTO_ICMP = 1
IPPROTO_IP = 0
IPPROTO_RAW = 255
IPPROTO_TCP = 6
IPPROTO_UDP = 17
IPV6_CHECKSUM = 26
IPV6_DONTFRAG = 14
IPV6_HOPLIMIT = 21
IPV6_HOPOPTS = 1
IPV6_JOIN_GROUP = 12
IPV6_LEAVE_GROUP = 13
IPV6_MULTICAST_HOPS = 10
IPV6_MULTICAST_IF = 9
IPV6_MULTICAST_LOOP = 11
IPV6_PKTINFO = 19
IPV6_RECVRTHDR = 38
IPV6_RECVTCLASS = 40
IPV6_RTHDR = 32
IPV6_TCLASS = 39
IPV6_UNICAST_HOPS = 4
IPV6_V6ONLY = 27
IP_ADD_MEMBERSHIP = 12
IP_DROP_MEMBERSHIP = 13
IP_HDRINCL = 2
IP_MULTICAST_IF = 9
IP_MULTICAST_LOOP = 11
IP_MULTICAST_TTL = 10
IP_OPTIONS = 1
IP_RECVDSTADDR = 25
IP_TOS = 3
IP_TTL = 4
MSG_BCAST = 1024
MSG_CTRUNC = 512
MSG_DONTROUTE = 4
MSG_MCAST = 2048
MSG_OOB = 1
MSG_PEEK = 2
MSG_TRUNC = 256
NI_DGRAM = 16
NI_MAXHOST = 1025
NI_MAXSERV = 32
NI_NAMEREQD = 4
NI_NOFQDN = 1
NI_NUMERICHOST = 2
NI_NUMERICSERV = 8
RCVALL_MAX = 3
RCVALL_OFF = 0
RCVALL_ON = 1
RCVALL_SOCKETLEVELONLY = 2
SHUT_RD = 0
SHUT_RDWR = 2
SHUT_WR = 1
SIO_KEEPALIVE_VALS = 2550136836
SIO_RCVALL = 2550136833
SOCK_DGRAM = 2
SOCK_RAW = 3
SOCK_RDM = 4
SOCK_SEQPACKET = 5
SOCK_STREAM = 1
SOL_IP = 0
SOL_SOCKET = 65535
SOL_TCP = 6
SOL_UDP = 17
SOMAXCONN = 2147483647
SO_ACCEPTCONN = 2
SO_BROADCAST = 32
SO_DEBUG = 1
SO_DONTROUTE = 16
SO_ERROR = 4103
SO_EXCLUSIVEADDRUSE = -5
SO_KEEPALIVE = 8
SO_LINGER = 128
SO_OOBINLINE = 256
SO_RCVBUF = 4098
SO_RCVLOWAT = 4100
SO_RCVTIMEO = 4102
SO_REUSEADDR = 4
SO_SNDBUF = 4097
SO_SNDLOWAT = 4099
SO_SNDTIMEO = 4101
SO_TYPE = 4104
SO_USELOOPBACK = 64
class SocketType:
pass
TCP_MAXSEG = 4
TCP_NODELAY = 1
__loader__ = '<_frozen_importlib.ExtensionFileLoader object at 0x00CA2D90>'
def dup(*args,**kw):
"""dup(integer) -> integer
Duplicate an integer socket file descriptor. This is like os.dup(), but for
sockets; on some platforms os.dup() won't work for socket file descriptors."""
pass
class error:
pass
class gaierror:
pass
def getaddrinfo(*args,**kw):
"""getaddrinfo(host, port [, family, socktype, proto, flags]) -> list of (family, socktype, proto, canonname, sockaddr)
Resolve host and port into addrinfo struct."""
pass
def getdefaulttimeout(*args,**kw):
"""getdefaulttimeout() -> timeout
Returns the default timeout in seconds (float) for new socket objects.
A value of None indicates that new socket objects have no timeout.
When the socket module is first imported, the default is None."""
pass
def gethostbyaddr(*args,**kw):
"""gethostbyaddr(host) -> (name, aliaslist, addresslist)
Return the true host name, a list of aliases, and a list of IP addresses,
for a host. The host argument is a string giving a host name or IP number."""
pass
def gethostbyname(*args,**kw):
"""gethostbyname(host) -> address
Return the IP address (a string of the form '255.255.255.255') for a host."""
pass
def gethostbyname_ex(*args,**kw):
"""gethostbyname_ex(host) -> (name, aliaslist, addresslist)
Return the true host name, a list of aliases, and a list of IP addresses,
for a host. The host argument is a string giving a host name or IP number."""
pass
def gethostname(*args,**kw):
"""gethostname() -> string
Return the current host name."""
pass
def getnameinfo(*args,**kw):
"""getnameinfo(sockaddr, flags) --> (host, port)
Get host and port for a sockaddr."""
pass
def getprotobyname(*args,**kw):
"""getprotobyname(name) -> integer
Return the protocol number for the named protocol. (Rarely used.)"""
pass
def getservbyname(*args,**kw):
"""getservbyname(servicename[, protocolname]) -> integer
Return a port number from a service name and protocol name.
The optional protocol name, if given, should be 'tcp' or 'udp',
otherwise any protocol will match."""
pass
def getservbyport(*args,**kw):
"""getservbyport(port[, protocolname]) -> string
Return the service name from a port number and protocol name.
The optional protocol name, if given, should be 'tcp' or 'udp',
otherwise any protocol will match."""
pass
has_ipv6 = True
class herror:
pass
def htonl(*args,**kw):
"""htonl(integer) -> integer
Convert a 32-bit integer from host to network byte order."""
pass
def htons(*args,**kw):
"""htons(integer) -> integer
Convert a 16-bit integer from host to network byte order."""
pass
def inet_aton(*args,**kw):
"""inet_aton(string) -> bytes giving packed 32-bit IP representation
Convert an IP address in string format (123.45.67.89) to the 32-bit packed
binary format used in low-level network functions."""
pass
def inet_ntoa(*args,**kw):
"""inet_ntoa(packed_ip) -> ip_address_string
Convert an IP address from 32-bit packed binary format to string format"""
pass
def ntohl(*args,**kw):
"""ntohl(integer) -> integer
Convert a 32-bit integer from network to host byte order."""
pass
def ntohs(*args,**kw):
"""ntohs(integer) -> integer
Convert a 16-bit integer from network to host byte order."""
pass
def setdefaulttimeout(*args,**kw):
"""setdefaulttimeout(timeout)
Set the default timeout in seconds (float) for new socket objects.
A value of None indicates that new socket objects have no timeout.
When the socket module is first imported, the default is None."""
pass
class socket:
def __init__(self,*args,**kw):
pass
def bind(self,*args,**kw):
pass
def close(self):
pass
class timeout:
pass
|
timm/timmnix | refs/heads/master | pypy3-v5.5.0-linux64/lib-python/3/ctypes/test/test_arrays.py | 1 | import unittest
from ctypes import *
from test.support import impl_detail
formats = "bBhHiIlLqQfd"
# c_longdouble commented out for PyPy, look at the commend in test_longdouble
formats = c_byte, c_ubyte, c_short, c_ushort, c_int, c_uint, \
c_long, c_ulonglong, c_float, c_double #, c_longdouble
class ArrayTestCase(unittest.TestCase):
@impl_detail('long double not supported by PyPy', pypy=False)
def test_longdouble(self):
"""
This test is empty. It's just here to remind that we commented out
c_longdouble in "formats". If pypy will ever supports c_longdouble, we
should kill this test and uncomment c_longdouble inside formats.
"""
def test_simple(self):
# create classes holding simple numeric types, and check
# various properties.
init = list(range(15, 25))
for fmt in formats:
alen = len(init)
int_array = ARRAY(fmt, alen)
ia = int_array(*init)
# length of instance ok?
self.assertEqual(len(ia), alen)
# slot values ok?
values = [ia[i] for i in range(len(init))]
self.assertEqual(values, init)
# change the items
from operator import setitem
new_values = list(range(42, 42+alen))
[setitem(ia, n, new_values[n]) for n in range(alen)]
values = [ia[i] for i in range(len(init))]
self.assertEqual(values, new_values)
# are the items initialized to 0?
ia = int_array()
values = [ia[i] for i in range(len(init))]
self.assertEqual(values, [0] * len(init))
# Too many initializers should be caught
self.assertRaises(IndexError, int_array, *range(alen*2))
CharArray = ARRAY(c_char, 3)
ca = CharArray(b"a", b"b", b"c")
# Should this work? It doesn't:
# CharArray("abc")
self.assertRaises(TypeError, CharArray, "abc")
self.assertEqual(ca[0], b"a")
self.assertEqual(ca[1], b"b")
self.assertEqual(ca[2], b"c")
self.assertEqual(ca[-3], b"a")
self.assertEqual(ca[-2], b"b")
self.assertEqual(ca[-1], b"c")
self.assertEqual(len(ca), 3)
# cannot delete items
from operator import delitem
self.assertRaises(TypeError, delitem, ca, 0)
def test_numeric_arrays(self):
alen = 5
numarray = ARRAY(c_int, alen)
na = numarray()
values = [na[i] for i in range(alen)]
self.assertEqual(values, [0] * alen)
na = numarray(*[c_int()] * alen)
values = [na[i] for i in range(alen)]
self.assertEqual(values, [0]*alen)
na = numarray(1, 2, 3, 4, 5)
values = [i for i in na]
self.assertEqual(values, [1, 2, 3, 4, 5])
na = numarray(*map(c_int, (1, 2, 3, 4, 5)))
values = [i for i in na]
self.assertEqual(values, [1, 2, 3, 4, 5])
def test_classcache(self):
self.assertIsNot(ARRAY(c_int, 3), ARRAY(c_int, 4))
self.assertIs(ARRAY(c_int, 3), ARRAY(c_int, 3))
def test_from_address(self):
# Failed with 0.9.8, reported by JUrner
p = create_string_buffer(b"foo")
sz = (c_char * 3).from_address(addressof(p))
self.assertEqual(sz[:], b"foo")
self.assertEqual(sz[::], b"foo")
self.assertEqual(sz[::-1], b"oof")
self.assertEqual(sz[::3], b"f")
self.assertEqual(sz[1:4:2], b"o")
self.assertEqual(sz.value, b"foo")
try:
create_unicode_buffer
except NameError:
pass
else:
def test_from_addressW(self):
p = create_unicode_buffer("foo")
sz = (c_wchar * 3).from_address(addressof(p))
self.assertEqual(sz[:], "foo")
self.assertEqual(sz[::], "foo")
self.assertEqual(sz[::-1], "oof")
self.assertEqual(sz[::3], "f")
self.assertEqual(sz[1:4:2], "o")
self.assertEqual(sz.value, "foo")
def test_cache(self):
# Array types are cached internally in the _ctypes extension,
# in a WeakValueDictionary. Make sure the array type is
# removed from the cache when the itemtype goes away. This
# test will not fail, but will show a leak in the testsuite.
# Create a new type:
class my_int(c_int):
pass
# Create a new array type based on it:
t1 = my_int * 1
t2 = my_int * 1
self.assertIs(t1, t2)
def test_subclass(self):
class T(Array):
_type_ = c_int
_length_ = 13
class U(T):
pass
class V(U):
pass
class W(V):
pass
class X(T):
_type_ = c_short
class Y(T):
_length_ = 187
for c in [T, U, V, W]:
self.assertEqual(c._type_, c_int)
self.assertEqual(c._length_, 13)
self.assertEqual(c()._type_, c_int)
self.assertEqual(c()._length_, 13)
self.assertEqual(X._type_, c_short)
self.assertEqual(X._length_, 13)
self.assertEqual(X()._type_, c_short)
self.assertEqual(X()._length_, 13)
self.assertEqual(Y._type_, c_int)
self.assertEqual(Y._length_, 187)
self.assertEqual(Y()._type_, c_int)
self.assertEqual(Y()._length_, 187)
def test_bad_subclass(self):
import sys
with self.assertRaises(AttributeError):
class T(Array):
pass
with self.assertRaises(AttributeError):
class T(Array):
_type_ = c_int
with self.assertRaises(AttributeError):
class T(Array):
_length_ = 13
with self.assertRaises(OverflowError):
class T(Array):
_type_ = c_int
_length_ = sys.maxsize * 2
with self.assertRaises((AttributeError, TypeError)):
class T(Array):
_type_ = c_int
_length_ = 1.87
if __name__ == '__main__':
unittest.main()
|
jounex/hue | refs/heads/master | desktop/libs/libzookeeper/setup.py | 30 | # Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
from hueversion import VERSION
setup(
name = "libzookeeper",
version = VERSION,
url = 'http://github.com/cloudera/hue',
description = "ZooKeeper Libraries",
packages = find_packages('src'),
package_dir = {'': 'src' },
install_requires = ['setuptools', 'desktop'],
# Even libraries need to be registered as desktop_apps,
# if they have configuration, like this one.
entry_points = { 'desktop.sdk.lib': 'libzookeeper=libzookeeper' },
)
|
vlinhd11/vlinhd11-android-scripting | refs/heads/master | python-build/python-libs/gdata/samples/oauth/oauth_on_appengine/appengine_utilities/cron.py | 129 | """
Copyright (c) 2008, appengine-utilities project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
- Neither the name of the appengine-utilities project nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import os
import cgi
import re
import datetime
import pickle
from google.appengine.ext import db
from google.appengine.api import urlfetch
from google.appengine.api import memcache
APPLICATION_PORT = '8080'
CRON_PORT = '8081'
class _AppEngineUtilities_Cron(db.Model):
"""
Model for the tasks in the datastore. This contains the scheduling and
url information, as well as a field that sets the next time the instance
should run.
"""
cron_entry = db.StringProperty()
next_run = db.DateTimeProperty()
cron_compiled = db.BlobProperty()
url = db.LinkProperty()
class Cron(object):
"""
Cron is a scheduling utility built for appengine, modeled after
crontab for unix systems. While true scheduled tasks are not
possible within the Appengine environment currently, this
is an attmempt to provide a request based alternate. You
configure the tasks in an included interface, and the import
the class on any request you want capable of running tasks.
On each request where Cron is imported, the list of tasks
that need to be run will be pulled and run. A task is a url
within your application. It's important to make sure that these
requests fun quickly, or you could risk timing out the actual
request.
See the documentation for more information on configuring
your application to support Cron and setting up tasks.
"""
def __init__(self):
# Check if any tasks need to be run
query = _AppEngineUtilities_Cron.all()
query.filter('next_run <= ', datetime.datetime.now())
results = query.fetch(1000)
if len(results) > 0:
one_second = datetime.timedelta(seconds = 1)
before = datetime.datetime.now()
for r in results:
if re.search(':' + APPLICATION_PORT, r.url):
r.url = re.sub(':' + APPLICATION_PORT, ':' + CRON_PORT, r.url)
#result = urlfetch.fetch(r.url)
diff = datetime.datetime.now() - before
if int(diff.seconds) < 1:
if memcache.add(str(r.key), "running"):
result = urlfetch.fetch(r.url)
r.next_run = self._get_next_run(pickle.loads(r.cron_compiled))
r.put()
memcache.delete(str(r.key))
else:
break
def add_cron(self, cron_string):
cron = cron_string.split(" ")
if len(cron) is not 6:
raise ValueError, 'Invalid cron string. Format: * * * * * url'
cron = {
'min': cron[0],
'hour': cron[1],
'day': cron[2],
'mon': cron[3],
'dow': cron[4],
'url': cron[5],
}
cron_compiled = self._validate_cron(cron)
next_run = self._get_next_run(cron_compiled)
cron_entry = _AppEngineUtilities_Cron()
cron_entry.cron_entry = cron_string
cron_entry.next_run = next_run
cron_entry.cron_compiled = pickle.dumps(cron_compiled)
cron_entry.url = cron["url"]
cron_entry.put()
def _validate_cron(self, cron):
"""
Parse the field to determine whether it is an integer or lists,
also converting strings to integers where necessary. If passed bad
values, raises a ValueError.
"""
parsers = {
'dow': self._validate_dow,
'mon': self._validate_mon,
'day': self._validate_day,
'hour': self._validate_hour,
'min': self._validate_min,
'url': self. _validate_url,
}
for el in cron:
parse = parsers[el]
cron[el] = parse(cron[el])
return cron
def _validate_type(self, v, t):
"""
Validates that the number (v) passed is in the correct range for the
type (t). Raise ValueError, if validation fails.
Valid ranges:
day of week = 0-7
month = 1-12
day = 1-31
hour = 0-23
minute = 0-59
All can * which will then return the range for that entire type.
"""
if t == "dow":
if v >= 0 and v <= 7:
return [v]
elif v == "*":
return "*"
else:
raise ValueError, "Invalid day of week."
elif t == "mon":
if v >= 1 and v <= 12:
return [v]
elif v == "*":
return range(1, 12)
else:
raise ValueError, "Invalid month."
elif t == "day":
if v >= 1 and v <= 31:
return [v]
elif v == "*":
return range(1, 31)
else:
raise ValueError, "Invalid day."
elif t == "hour":
if v >= 0 and v <= 23:
return [v]
elif v == "*":
return range(0, 23)
else:
raise ValueError, "Invalid hour."
elif t == "min":
if v >= 0 and v <= 59:
return [v]
elif v == "*":
return range(0, 59)
else:
raise ValueError, "Invalid minute."
def _validate_list(self, l, t):
"""
Validates a crontab list. Lists are numerical values seperated
by a comma with no spaces. Ex: 0,5,10,15
Arguments:
l: comma seperated list of numbers
t: type used for validation, valid values are
dow, mon, day, hour, min
"""
elements = l.split(",")
return_list = []
# we have a list, validate all of them
for e in elements:
if "-" in e:
return_list.extend(self._validate_range(e, t))
else:
try:
v = int(e)
self._validate_type(v, t)
return_list.append(v)
except:
raise ValueError, "Names are not allowed in lists."
# return a list of integers
return return_list
def _validate_range(self, r, t):
"""
Validates a crontab range. Ranges are 2 numerical values seperated
by a dash with no spaces. Ex: 0-10
Arguments:
r: dash seperated list of 2 numbers
t: type used for validation, valid values are
dow, mon, day, hour, min
"""
elements = r.split('-')
# a range should be 2 elements
if len(elements) is not 2:
raise ValueError, "Invalid range passed: " + str(r)
# validate the minimum and maximum are valid for the type
for e in elements:
self._validate_type(int(e), t)
# return a list of the numbers in the range.
# +1 makes sure the end point is included in the return value
return range(int(elements[0]), int(elements[1]) + 1)
def _validate_step(self, s, t):
"""
Validates a crontab step. Steps are complicated. They can
be based on a range 1-10/2 or just step through all valid
*/2. When parsing times you should always check for step first
and see if it has a range or not, before checking for ranges because
this will handle steps of ranges returning the final list. Steps
of lists is not supported.
Arguments:
s: slash seperated string
t: type used for validation, valid values are
dow, mon, day, hour, min
"""
elements = s.split('/')
# a range should be 2 elements
if len(elements) is not 2:
raise ValueError, "Invalid step passed: " + str(s)
try:
step = int(elements[1])
except:
raise ValueError, "Invalid step provided " + str(s)
r_list = []
# if the first element is *, use all valid numbers
if elements[0] is "*" or elements[0] is "":
r_list.extend(self._validate_type('*', t))
# check and see if there is a list of ranges
elif "," in elements[0]:
ranges = elements[0].split(",")
for r in ranges:
# if it's a range, we need to manage that
if "-" in r:
r_list.extend(self._validate_range(r, t))
else:
try:
r_list.extend(int(r))
except:
raise ValueError, "Invalid step provided " + str(s)
elif "-" in elements[0]:
r_list.extend(self._validate_range(elements[0], t))
return range(r_list[0], r_list[-1] + 1, step)
def _validate_dow(self, dow):
"""
"""
# if dow is * return it. This is for date parsing where * does not mean
# every day for crontab entries.
if dow is "*":
return dow
days = {
'mon': 1,
'tue': 2,
'wed': 3,
'thu': 4,
'fri': 5,
'sat': 6,
# per man crontab sunday can be 0 or 7.
'sun': [0, 7],
}
if dow in days:
dow = days[dow]
return [dow]
# if dow is * return it. This is for date parsing where * does not mean
# every day for crontab entries.
elif dow is "*":
return dow
elif "/" in dow:
return(self._validate_step(dow, "dow"))
elif "," in dow:
return(self._validate_list(dow, "dow"))
elif "-" in dow:
return(self._validate_range(dow, "dow"))
else:
valid_numbers = range(0, 8)
if not int(dow) in valid_numbers:
raise ValueError, "Invalid day of week " + str(dow)
else:
return [int(dow)]
def _validate_mon(self, mon):
months = {
'jan': 1,
'feb': 2,
'mar': 3,
'apr': 4,
'may': 5,
'jun': 6,
'jul': 7,
'aug': 8,
'sep': 9,
'oct': 10,
'nov': 11,
'dec': 12,
}
if mon in months:
mon = months[mon]
return [mon]
elif mon is "*":
return range(1, 13)
elif "/" in mon:
return(self._validate_step(mon, "mon"))
elif "," in mon:
return(self._validate_list(mon, "mon"))
elif "-" in mon:
return(self._validate_range(mon, "mon"))
else:
valid_numbers = range(1, 13)
if not int(mon) in valid_numbers:
raise ValueError, "Invalid month " + str(mon)
else:
return [int(mon)]
def _validate_day(self, day):
if day is "*":
return range(1, 32)
elif "/" in day:
return(self._validate_step(day, "day"))
elif "," in day:
return(self._validate_list(day, "day"))
elif "-" in day:
return(self._validate_range(day, "day"))
else:
valid_numbers = range(1, 31)
if not int(day) in valid_numbers:
raise ValueError, "Invalid day " + str(day)
else:
return [int(day)]
def _validate_hour(self, hour):
if hour is "*":
return range(0, 24)
elif "/" in hour:
return(self._validate_step(hour, "hour"))
elif "," in hour:
return(self._validate_list(hour, "hour"))
elif "-" in hour:
return(self._validate_range(hour, "hour"))
else:
valid_numbers = range(0, 23)
if not int(hour) in valid_numbers:
raise ValueError, "Invalid hour " + str(hour)
else:
return [int(hour)]
def _validate_min(self, min):
if min is "*":
return range(0, 60)
elif "/" in min:
return(self._validate_step(min, "min"))
elif "," in min:
return(self._validate_list(min, "min"))
elif "-" in min:
return(self._validate_range(min, "min"))
else:
valid_numbers = range(0, 59)
if not int(min) in valid_numbers:
raise ValueError, "Invalid min " + str(min)
else:
return [int(min)]
def _validate_url(self, url):
# kludge for issue 842, right now we use request headers
# to set the host.
if url[0] is not "/":
url = "/" + url
url = 'http://' + str(os.environ['HTTP_HOST']) + url
return url
# content below is for when that issue gets fixed
#regex = re.compile("^(http|https):\/\/([a-z0-9-]\.+)*", re.IGNORECASE)
#if regex.match(url) is not None:
# return url
#else:
# raise ValueError, "Invalid url " + url
def _calc_month(self, next_run, cron):
while True:
if cron["mon"][-1] < next_run.month:
next_run = next_run.replace(year=next_run.year+1, \
month=cron["mon"][0], \
day=1,hour=0,minute=0)
else:
if next_run.month in cron["mon"]:
return next_run
else:
one_month = datetime.timedelta(months=1)
next_run = next_run + one_month
def _calc_day(self, next_run, cron):
# start with dow as per cron if dow and day are set
# then dow is used if it comes before day. If dow
# is *, then ignore it.
if str(cron["dow"]) != str("*"):
# convert any integers to lists in order to easily compare values
m = next_run.month
while True:
if next_run.month is not m:
next_run = next_run.replace(hour=0, minute=0)
next_run = self._calc_month(next_run, cron)
if next_run.weekday() in cron["dow"] or next_run.day in cron["day"]:
return next_run
else:
one_day = datetime.timedelta(days=1)
next_run = next_run + one_day
else:
m = next_run.month
while True:
if next_run.month is not m:
next_run = next_run.replace(hour=0, minute=0)
next_run = self._calc_month(next_run, cron)
# if cron["dow"] is next_run.weekday() or cron["day"] is next_run.day:
if next_run.day in cron["day"]:
return next_run
else:
one_day = datetime.timedelta(days=1)
next_run = next_run + one_day
def _calc_hour(self, next_run, cron):
m = next_run.month
d = next_run.day
while True:
if next_run.month is not m:
next_run = next_run.replace(hour=0, minute=0)
next_run = self._calc_month(next_run, cron)
if next_run.day is not d:
next_run = next_run.replace(hour=0)
next_run = self._calc_day(next_run, cron)
if next_run.hour in cron["hour"]:
return next_run
else:
m = next_run.month
d = next_run.day
one_hour = datetime.timedelta(hours=1)
next_run = next_run + one_hour
def _calc_minute(self, next_run, cron):
one_minute = datetime.timedelta(minutes=1)
m = next_run.month
d = next_run.day
h = next_run.hour
while True:
if next_run.month is not m:
next_run = next_run.replace(minute=0)
next_run = self._calc_month(next_run, cron)
if next_run.day is not d:
next_run = next_run.replace(minute=0)
next_run = self._calc_day(next_run, cron)
if next_run.hour is not h:
next_run = next_run.replace(minute=0)
next_run = self._calc_day(next_run, cron)
if next_run.minute in cron["min"]:
return next_run
else:
m = next_run.month
d = next_run.day
h = next_run.hour
next_run = next_run + one_minute
def _get_next_run(self, cron):
one_minute = datetime.timedelta(minutes=1)
# go up 1 minute because it shouldn't happen right when added
now = datetime.datetime.now() + one_minute
next_run = now.replace(second=0, microsecond=0)
# start with month, which will also help calculate year
next_run = self._calc_month(next_run, cron)
next_run = self._calc_day(next_run, cron)
next_run = self._calc_hour(next_run, cron)
next_run = self._calc_minute(next_run, cron)
return next_run
|
Ch00k/ansible | refs/heads/devel | lib/ansible/playbook/vars.py | 7690 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
|
pombredanne/click | refs/heads/master | tests/conftest.py | 50 | from click.testing import CliRunner
import pytest
@pytest.fixture(scope='function')
def runner(request):
return CliRunner()
|
martinghunt/Scaffolder-evaluation | refs/heads/master | Analysis-scripts/scaff_test_ctg_to_perfect_ctg.py | 1 | #!/usr/bin/env python3
import argparse
import os
import copy
import shutil
import sys
from pyfastaq import sequences, utils, intervals, tasks
# check required nucmer programs are in path
progs = ['nucmer', 'delta-filter', 'show-coords']
not_in_path = [p for p in progs if shutil.which(p) is None]
if len(not_in_path):
print('Error! Need these programs to be in your path:', file=sys.stderr)
print('\n'.join(not_in_path), file=sys.stderr)
print('Cannot continue', file=sys.stderr)
sys.exit(1)
def nucmer_file_reader(fname):
f = utils.open_file_read(fname)
in_header = True
for line in f:
if in_header:
if line.startswith('['):
in_header = False
continue
yield NucmerHit(line)
utils.close(f)
class NucmerHit:
def __init__(self, line):
# [S1] [E1] [S2] [E2] [LEN 1] [LEN 2] [% IDY] [LEN R] [LEN Q] [FRM] [TAGS]
#1162 25768 24536 4 24607 24533 99.32 640851 24536 1 -1 MAL1 NODE_25757_length_24482_cov_18.920391 [CONTAINS]
try:
l = line.rstrip().split('\t')
self.ref_start = int(l[0])
self.ref_end = int(l[1])
self.qry_start = int(l[2])
self.qry_end = int(l[3])
self.hit_length_ref = int(l[4])
self.hit_length_qry = int(l[5])
self.percent_identity = float(l[6])
self.ref_length = int(l[7])
self.qry_length = int(l[8])
self.frame = int(l[9])
self.strand = int(l[10])
self.ref_name = l[11]
self.qry_name = l[12]
if len(l) == 14:
self.tag = l[13][1:-1]
else:
self.tag = None
except:
print('Error reading this nucmer line:\n' + line, file=sys.stderr)
def update_perfect_contigs(nucmer_hit, ref_fasta, contigs):
id = nucmer_hit.ref_name + ":" + str(nucmer_hit.ref_start) + '-' + str(nucmer_hit.ref_end)
contig = sequences.Fasta('x', ref_fasta[nucmer_hit.ref_start-1:nucmer_hit.ref_end])
contigs[(nucmer_hit.ref_name, nucmer_hit.ref_start, nucmer_hit.ref_end)] = contig
parser = argparse.ArgumentParser(
description = 'Takes contigs and a reference sequence. Makes a new fasta file of the contigs, but they are now perfect sequences by using the reference instead',
usage = '%(prog)s [options] <contigs.fa> <reference.fa> <outprefix>')
parser.add_argument('--min_seq_length', type=int, help='Minimum length of contig to output [%(default)s]', default=200)
parser.add_argument('--nucmer_options', help='Options when running nucmer [%(default)s]', default='')
parser.add_argument('contigs_fa', help='Name of contigs fasta file', metavar='contigs.fa')
parser.add_argument('ref_fa', help='Name of reference fasta file', metavar='reference.fa')
parser.add_argument('outprefix', help='Prefix of output files')
options = parser.parse_args()
ref_seqs = {}
tasks.file_to_dict(options.ref_fa, ref_seqs)
nucmer_out_prefix = options.outprefix + '.nucmer'
nucmer_out_delta = nucmer_out_prefix + '.delta'
nucmer_out_filter = nucmer_out_prefix + '.delta-filter'
nucmer_out_coords = nucmer_out_filter + '.coords'
# run nucmer of contigs vs ref
utils.syscall(' '.join(['nucmer', options.nucmer_options, '-p', nucmer_out_prefix, options.ref_fa, options.contigs_fa]))
utils.syscall(' '.join(['delta-filter', '-i 98 -l 180 -q', nucmer_out_delta, '>', nucmer_out_filter]))
utils.syscall(' '.join(['show-coords', '-dTlro', nucmer_out_filter, '>', nucmer_out_coords]))
# load hits into hash. key=ref_name, value=another hash with key=qry_name, value=list of hit positions in that ref seq
nucmer_hits = {}
contigs_to_print = {}
nucmer_reader = nucmer_file_reader(nucmer_out_coords)
for hit in nucmer_reader:
if hit.ref_name not in nucmer_hits:
nucmer_hits[hit.ref_name] = {}
if hit.qry_name not in nucmer_hits[hit.ref_name]:
nucmer_hits[hit.ref_name][hit.qry_name] = []
nucmer_hits[hit.ref_name][hit.qry_name].append(intervals.Interval(min(hit.ref_start, hit.ref_end), max(hit.ref_start, hit.ref_end)))
# merge all the overalpping hits for each list of hits corresponding to one contig
for ref_name, d in nucmer_hits.items():
for qry_name, hits in d.items():
intervals.merge_overlapping_in_list(hits)
for hit in hits:
if hit.end - hit.start + 1 >= options.min_seq_length:
if ref_name not in contigs_to_print:
contigs_to_print[ref_name] = []
contigs_to_print[ref_name].append(copy.copy(hit))
# remove any contigs that are completely contained in another contig
for ref, l in contigs_to_print.items():
intervals.remove_contained_in_list(l)
# print the final perfect contigs
f_out = utils.open_file_write(options.outprefix + '.fa')
counter = 1
last_id = None
for ref_name in sorted(contigs_to_print):
counter = 1
for interval in contigs_to_print[ref_name]:
id = ':'.join([str(x) for x in [ref_name, counter, interval.start, interval.end]])
print(sequences.Fasta(id, ref_seqs[ref_name][interval.start - 1: interval.end]), file=f_out)
counter += 1
utils.close(f_out)
|
huylu/support-tools | refs/heads/master | googlecode-issues-exporter/github_issue_converter.py | 4 | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for uploading Google Code issues to GitHub.
Issue migration from Google Code to GitHub.
This tools allows you to easily move your downloaded Google Code issues to
GitHub.
To use this tool:
1. Follow the instructions at https://code.google.com/p/support-tools/ to
download your issues from Google.
2. Go to https://github.com/settings/applications and create a new "Personal
Access Token".
3. Get the GitHub username of the owner of the repository and the repositories
name you wish to add the issues to. For example username: TheDoctor and
repository: SonicScrewdriver
4. (Optional) If this option is skipped all issues will be assigned to the
owner of the repo. Make a file that contains a mapping from the Google
Code email address to the GitHub username for each user you wish to assign
issues too. The file should be newline seperated with one user per line.
The email address and username should be colon (':') seperated. For example
a file may look like this:
<Google Code Email>:<GitHub Username>
myemail@gmail.com:coolperson
otheruser@gmail.com:userother
5. Then run the command:
python ./issue_migration.py \
--github_oauth_token=<oauth-token> \
--github_owner_username=<your-github-username> \
--github_repo_name=<repository-name> \
--issue_file_path=<path-to-issue-file> \
--user_file_path="<optional-path-to-user-mapping-file>"
"""
import argparse
import json
import sys
import time
import urllib
import httplib2
import issues
# The URL used for calls to GitHub.
GITHUB_API_URL = "https://api.github.com"
# The maximum number of retries to make for an HTTP request that has failed.
MAX_HTTP_REQUESTS = 3
# The time (in seconds) to wait before trying to see if more requests are
# available.
REQUEST_CHECK_TIME = 60 * 5
# A real kludge. GitHub orders the comments based on time alone, and because
# we upload ours relatively quickly we need at least a second in between
# comments to keep them in chronological order.
COMMENT_DELAY = 2
def _CheckSuccessful(response):
"""Checks if the request was successful.
Args:
response: An HTTP response that contains a mapping from 'status' to an
HTTP response code integer.
Returns:
True if the request was succesful.
"""
return "status" in response and 200 <= int(response["status"]) < 300
class GitHubService(object):
"""A connection to GitHub.
Handles basic HTTP operations to the GitHub API.
Attributes:
github_owner_username: The username of the owner of the repository.
github_repo_name: The GitHub repository name.
rate_limit: Whether or not to rate limit API calls.
"""
def __init__(self, github_owner_username, github_repo_name,
github_oauth_token, rate_limit, http_instance=None):
"""Initialize the GitHubService.
Args:
github_owner_username: The username of the owner of the repository.
github_repo_name: The GitHub repository name.
github_oauth_token: The oauth token to use for the requests.
http_instance: The HTTP instance to use, if not set a default will be
used.
"""
self.github_owner_username = github_owner_username
self.github_repo_name = github_repo_name
self._github_oauth_token = github_oauth_token
self._rate_limit = rate_limit
self._http = http_instance if http_instance else httplib2.Http()
def _PerformHttpRequest(self, method, url, body="{}", params=None):
"""Attemps to make an HTTP request for given method, url, body and params.
If the request fails try again 'MAX_HTTP_REQUESTS' number of times. If the
request fails due to the the request limit being hit, wait until more
requests can be made.
Args:
method: The HTTP request method as a string ('GET', 'POST', etc.).
url: The URL to make the call to.
body: The body of the request.
params: A dictionary of parameters to be used in the http call.
Returns:
A tuple of an HTTP response (https://developer.github.com/v3/#schema) and
its content from the server which is decoded JSON.
"""
headers = { "User-Agent": "GoogleCodeIssueExporter/1.0" }
query = params.copy() if params else {}
query["access_token"] = self._github_oauth_token
request_url = "%s%s?%s" % (GITHUB_API_URL, url, urllib.urlencode(query))
requests = 0
while requests < MAX_HTTP_REQUESTS:
requests += 1
response, content = self._http.request(request_url, method,
headers=headers, body=body)
if _CheckSuccessful(response):
return response, json.loads(content)
elif self._RequestLimitReached():
requests -= 1
self._WaitForApiThrottlingToEnd()
return response, json.loads(content)
def PerformGetRequest(self, url, params=None):
"""Makes a GET request.
Args:
url: The URL to make the call to.
params: A dictionary of parameters to be used in the http call.
Returns:
A tuple of an HTTP response (https://developer.github.com/v3/#schema) and
its content from the server which is decoded JSON.
"""
return self._PerformHttpRequest("GET", url, params=params)
def PerformPostRequest(self, url, body):
"""Makes a POST request.
Args:
url: The URL to make the call to.
body: The body of the request.
Returns:
A tuple of an HTTP response (https://developer.github.com/v3/#schema) and
its content from the server which is decoded JSON.
"""
if self._rate_limit and self._rate_limit in ["True", "true"]:
# Add a delay to all outgoing request to GitHub, as to not trigger their
# anti-abuse mechanism. This is separate from your typical rate limit, and
# only applies to certain API calls (like creating issues). And, alas, the
# exact quota is undocumented. So the value below is simply a guess. See:
# https://developer.github.com/v3/#abuse-rate-limits
req_min = 15
time.sleep(60 / req_min)
return self._PerformHttpRequest("POST", url, body)
def PerformPatchRequest(self, url, body):
"""Makes a PATCH request.
Args:
url: The URL to make the call to.
body: The body of the request.
Returns:
A tuple of an HTTP response (https://developer.github.com/v3/#schema) and
its content from the server which is decoded JSON.
"""
return self._PerformHttpRequest("PATCH", url, body)
def _GetRemainingRequests(self):
"""Gets the number of remaining requests the user has this hour.
Makes GET request to GitHub to get the number of remaining requests before
the hourly request limit is reached.
Returns:
The number of remaining requests.
"""
url = ("%s/rate_limit?access_token=%s" %
(GITHUB_API_URL, self._github_oauth_token))
_, content = self._http.request(url, "GET")
content = json.loads(content)
if "rate" in content and "remaining" in content["rate"]:
return int(content["rate"]["remaining"])
return 0
def _RequestLimitReached(self):
"""Returns true if the request limit has been reached."""
return self._GetRemainingRequests() == 0
def _WaitForApiThrottlingToEnd(self):
"""Waits until the user is allowed to make more requests."""
sys.stdout.write("Hourly request limit reached. Waiting for new limit, "
"checking every %d minutes" % (REQUEST_CHECK_TIME/60))
while True:
sys.stdout.write(".")
sys.stdout.flush()
time.sleep(REQUEST_CHECK_TIME)
if not self._RequestLimitReached():
return
class UserService(issues.UserService):
"""GitHub user operations.
Handles user operations on the GitHub API.
"""
GITHUB_USERS_URL = "/users"
def __init__(self, github_service):
"""Initialize the UserService.
Args:
github_service: The GitHub service.
"""
self._github_service = github_service
def _GetUser(self, username):
"""Gets a GitHub user.
Args:
username: The GitHub username to get.
Returns:
A tuple of an HTTP response (https://developer.github.com/v3/#schema) and
its content from the server which is decoded JSON.
"""
user_url = "%s/%s" % (self.GITHUB_USERS_URL, username)
return self._github_service.PerformGetRequest(user_url)
def IsUser(self, username):
"""Checks if the GitHub user exists.
Args:
username: The GitHub username to check.
Returns:
True if the username exists.
"""
response, _ = self._GetUser(username)
return _CheckSuccessful(response)
class IssueService(issues.IssueService):
"""GitHub issue operations.
Handles creating and updating issues and comments on the GitHub API.
"""
def __init__(self, github_service, comment_delay=COMMENT_DELAY):
"""Initialize the IssueService.
Args:
github_service: The GitHub service.
"""
self._github_service = github_service
self._comment_delay = comment_delay
# If the repo is of the form "login/reponame" then don't inject the
# username as it (or the organization) is already embedded.
if '/' in self._github_service.github_repo_name:
self._github_issues_url = "/repos/%s/issues" % \
self._github_service.github_repo_name
else:
self._github_issues_url = ("/repos/%s/%s/issues" %
(self._github_service.github_owner_username,
self._github_service.github_repo_name))
def GetIssues(self, state="open"):
"""Gets all of the issue for the GitHub repository.
Args:
state: The state of the repository can be either 'open' or 'closed'.
Returns:
The list of all of the issues for the given repository.
Raises:
IOError: An error occurred accessing previously created issues.
"""
github_issues = []
params = {"state": state, "per_page": 100, "page": 0}
while True:
params["page"] += 1
response, content = self._github_service.PerformGetRequest(
self._github_issues_url, params=params)
if not _CheckSuccessful(response):
raise IOError("Failed to retrieve previous issues.\n\n%s" % content)
if not content:
return github_issues
else:
github_issues += content
return github_issues
def CreateIssue(self, googlecode_issue):
"""Creates a GitHub issue.
Args:
googlecode_issue: An instance of GoogleCodeIssue
Returns:
The issue number of the new issue.
Raises:
issues.ServiceError: An error occurred creating the issue.
"""
issue_title = googlecode_issue.GetTitle()
# It is not possible to create a Google Code issue without a title, but you
# can edit an issue to remove its title afterwards.
if issue_title.isspace():
issue_title = "<empty title>"
issue = {
"title": issue_title,
"body": googlecode_issue.GetDescription(),
"assignee": googlecode_issue.GetOwner(),
"labels": googlecode_issue.GetLabels(),
}
response, content = self._github_service.PerformPostRequest(
self._github_issues_url, json.dumps(issue))
if not _CheckSuccessful(response):
# Newline character at the beginning of the line to allows for in-place
# updating of the counts of the issues and comments.
raise issues.ServiceError(
"\nFailed to create issue #%d '%s'.\n\n\n"
"Response:\n%s\n\n\nContent:\n%s" % (
googlecode_issue.GetId(), issue_title, response, content))
return self._GetIssueNumber(content)
def CloseIssue(self, issue_number):
"""Closes a GitHub issue.
Args:
issue_number: The issue number.
Raises:
issues.ServiceError: An error occurred closing the issue.
"""
issue_url = "%s/%d" % (self._github_issues_url, issue_number)
json_state = json.dumps({"state": "closed"})
response, content = self._github_service.PerformPatchRequest(
issue_url, json_state)
if not _CheckSuccessful(response):
raise issues.ServiceError("\nFailed to close issue #%s.\n%s" % (
issue_number, content))
def CreateComment(self, issue_number, source_issue_id,
googlecode_comment, project_name):
"""Creates a comment on a GitHub issue.
Args:
issue_number: The issue number.
source_issue_id: The Google Code issue id.
googlecode_comment: A GoogleCodeComment instance.
project_name: The Google Code project name.
Raises:
issues.ServiceError: An error occurred creating the comment.
"""
comment_url = "%s/%d/comments" % (self._github_issues_url, issue_number)
comment = googlecode_comment.GetDescription()
json_body = json.dumps({"body": comment})
response, content = self._github_service.PerformPostRequest(
comment_url, json_body)
if not _CheckSuccessful(response):
raise issues.ServiceError(
"\nFailed to create issue comment for issue #%d\n\n"
"Response:\n%s\n\nContent:\n%s\n\n" %
(issue_number, response, content))
time.sleep(self._comment_delay)
def _GetIssueNumber(self, content):
"""Get the issue number from a newly created GitHub issue.
Args:
content: The content from an HTTP response.
Returns:
The GitHub issue number.
"""
assert "number" in content, "Getting issue number from: %s" % content
return content["number"]
def ExportIssues(github_owner_username, github_repo_name, github_oauth_token,
issue_file_path, project_name, user_file_path, rate_limit):
"""Exports all issues for a given project.
"""
github_service = GitHubService(
github_owner_username, github_repo_name, github_oauth_token,
rate_limit)
issue_service = IssueService(github_service)
user_service = UserService(github_service)
issue_data = issues.LoadIssueData(issue_file_path, project_name)
user_map = issues.LoadUserData(user_file_path, user_service)
# Add a special "user_requesting_export" user, which comes in handy.
user_map["user_requesting_export"] = github_owner_username
issue_exporter = issues.IssueExporter(
issue_service, user_service, issue_data, project_name, user_map)
try:
issue_exporter.Init()
issue_exporter.Start()
print "\nDone!\n"
except IOError, e:
print "[IOError] ERROR: %s" % e
except issues.InvalidUserError, e:
print "[InvalidUserError] ERROR: %s" % e
def main(args):
"""The main function.
Args:
args: The command line arguments.
Raises:
ProjectNotFoundError: The user passed in an invalid project name.
"""
parser = argparse.ArgumentParser()
parser.add_argument("--github_oauth_token", required=True,
help="You can generate an oauth token here: "
"https://github.com/settings/applications")
parser.add_argument("--github_owner_username", required=True,
help="The project ownsers GitHub username")
parser.add_argument("--github_repo_name", required=True,
help="The GitHub repository you wish to add the issues"
"to.")
parser.add_argument("--issue_file_path", required=True,
help="The path to the file containing the issues from"
"Google Code.")
parser.add_argument("--project_name", required=True,
help="The name of the Google Code project you wish to"
"export")
parser.add_argument("--user_file_path", required=False,
help="The path to the file containing a mapping from"
"email address to github username.")
parser.add_argument("--rate_limit", required=False, default="True",
help="Rate limit GitHub requests to not run into"
"anti-abuse limits.")
parsed_args, _ = parser.parse_known_args(args)
ExportIssues(
parsed_args.github_owner_username, parsed_args.github_repo_name,
parsed_args.github_oauth_token, parsed_args.issue_file_path,
parsed_args.project_name, parsed_args.user_file_path,
parsed_args.rate_limit)
if __name__ == "__main__":
main(sys.argv)
|
Echelon85/volatility | refs/heads/master | volatility/plugins/linux/psxview.py | 44 | # Volatility
# Copyright (C) 2007-2013 Volatility Foundation
# Copyright (c) 2010, 2011, 2012 Michael Ligh <michael.ligh@mnin.org>
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
import volatility.obj as obj
import volatility.plugins.linux.pslist as linux_pslist
import volatility.plugins.linux.pidhashtable as linux_pidhashtable
import volatility.plugins.linux.pslist_cache as linux_pslist_cache
import volatility.plugins.linux.common as linux_common
#based off the windows version from mhl
#
#INFO:
# 'pslist' does not get threads
# 'pid_hash' does
# 'kmem_cache' does
# 'runqueue' does
class linux_psxview(linux_common.AbstractLinuxCommand):
"Find hidden processes with various process listings"
def get_pslist(self):
return [x.obj_offset for x in linux_pslist.linux_pslist(self._config).calculate()]
def get_pid_hash(self):
return [x.obj_offset for x in linux_pidhashtable.linux_pidhashtable(self._config).calculate()]
def get_kmem_cache(self):
return [x.obj_offset for x in linux_pslist_cache.linux_pslist_cache(self._config).calculate()]
def calculate(self):
linux_common.set_plugin_members(self)
ps_sources = {}
# The keys are names of process sources
# The values are the virtual offset of the task_struct
ps_sources['pslist'] = self.get_pslist()
ps_sources['pid_hash'] = self.get_pid_hash()
ps_sources['kmem_cache'] = self.get_kmem_cache()
# FUTURE
# ps_sources['run_queue'] =
# Build a list of offsets from all sources
seen_offsets = []
for source in ps_sources:
tasks = ps_sources[source]
for offset in tasks:
if offset not in seen_offsets:
seen_offsets.append(offset)
yield offset, obj.Object("task_struct", offset = offset, vm = self.addr_space), ps_sources
def render_text(self, outfd, data):
self.table_header(outfd, [('Offset(V)', '[addrpad]'),
('Name', '<20'),
('PID', '>6'),
('pslist', '5'),
('pid_hash', '5'),
('kmem_cache', '5'),
])
for offset, process, ps_sources in data:
self.table_row(outfd,
offset,
process.comm,
process.pid,
str(ps_sources['pslist'].__contains__(offset)),
str(ps_sources['pid_hash'].__contains__(offset)),
str(ps_sources['kmem_cache'].__contains__(offset))
)
|
tectronics/cortex-vfx | refs/heads/master | test/IECore/CachedReader.py | 5 | ##########################################################################
#
# Copyright (c) 2007-2011, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import threading
from IECore import *
import os
class CachedReaderTest( unittest.TestCase ) :
def test( self ) :
r = CachedReader( SearchPath( "./", ":" ), 100 * 1024 * 1024 )
o = r.read( "test/IECore/data/cobFiles/compoundData.cob" )
self.assertEqual( o.typeName(), "CompoundData" )
self.assertEqual( r.memoryUsage(), o.memoryUsage() )
self.failUnless( r.cached( "test/IECore/data/cobFiles/compoundData.cob" ) )
oo = r.read( "test/IECore/data/pdcFiles/particleShape1.250.pdc" )
self.assertEqual( r.memoryUsage(), o.memoryUsage() + oo.memoryUsage() )
self.failUnless( r.cached( "test/IECore/data/cobFiles/compoundData.cob" ) )
self.failUnless( r.cached( "test/IECore/data/pdcFiles/particleShape1.250.pdc" ) )
oo = r.read( "test/IECore/data/pdcFiles/particleShape1.250.pdc" )
self.assertEqual( r.memoryUsage(), o.memoryUsage() + oo.memoryUsage() )
self.failUnless( r.cached( "test/IECore/data/cobFiles/compoundData.cob" ) )
self.failUnless( r.cached( "test/IECore/data/pdcFiles/particleShape1.250.pdc" ) )
self.assertRaises( RuntimeError, r.read, "doesNotExist" )
self.assertRaises( RuntimeError, r.read, "doesNotExist" )
# Here, the cache should throw away "o" (because there isn't enough room for it, and it was the least
# recently used) leaving us with just "oo"
r.maxMemory = oo.memoryUsage() + ( o.memoryUsage() / 2 )
self.assertEqual( r.memoryUsage(), oo.memoryUsage() )
self.failIf( r.cached( "test/IECore/data/cobFiles/compoundData.cob" ) )
self.failUnless( r.cached( "test/IECore/data/pdcFiles/particleShape1.250.pdc" ) )
# Here, the cache should throw away "oo" (because there isn't enough room for it, and it was the least
# recently used) leaving us empty
r.maxMemory = oo.memoryUsage() / 2
self.assertEqual( r.memoryUsage(), 0 )
self.failIf( r.cached( "test/IECore/data/pdcFiles/particleShape1.250.pdc" ) )
r.maxMemory = oo.memoryUsage() * 2
oo = r.read( "test/IECore/data/pdcFiles/particleShape1.250.pdc" )
r.clear()
self.assertEqual( r.memoryUsage(), 0 )
self.failIf( r.cached( "test/IECore/data/pdcFiles/particleShape1.250.pdc" ) )
oo = r.read( "test/IECore/data/pdcFiles/particleShape1.250.pdc" )
self.assertEqual( r.memoryUsage(), oo.memoryUsage() )
self.failUnless( r.cached( "test/IECore/data/pdcFiles/particleShape1.250.pdc" ) )
r.clear( "I don't exist" )
self.failUnless( r.cached( "test/IECore/data/pdcFiles/particleShape1.250.pdc" ) )
self.assertEqual( r.memoryUsage(), oo.memoryUsage() )
r.clear( "test/IECore/data/pdcFiles/particleShape1.250.pdc" )
self.assertEqual( r.memoryUsage(), 0 )
self.failIf( r.cached( "test/IECore/data/pdcFiles/particleShape1.250.pdc" ) )
# testing insert.
r.insert( "test/IECore/data/pdcFiles/particleShape1.250.pdc", oo )
self.assertEqual( r.memoryUsage(), oo.memoryUsage() )
o2 = r.read( "test/IECore/data/pdcFiles/particleShape1.250.pdc" )
self.assertEqual( oo, o2 )
# testing overwritting existing item with insert
r.insert( "test/IECore/data/pdcFiles/particleShape1.250.pdc", o )
self.assertEqual( r.memoryUsage(), o.memoryUsage() )
o2 = r.read( "test/IECore/data/pdcFiles/particleShape1.250.pdc" )
self.assertEqual( o, o2 )
# test clear after failed load
self.assertRaises( RuntimeError, r.read, "/i/dont/exist" )
r.clear( "/i/dont/exist" )
def testRepeatedFailures( self ) :
def check( fileName ) :
r = CachedReader( SearchPath( "./", ":" ), 100 * 1024 * 1024 )
firstException = None
try :
r.read( fileName )
except Exception, e :
firstException = str( e )
self.assert_( firstException )
secondException = None
try :
r.read( fileName )
except Exception, e :
secondException = str( e )
self.assert_( secondException )
# we want the second exception to be different, as the CachedReader
# shouldn't be wasting time attempting to load files again when
# it failed the first time
self.assertNotEqual( firstException, secondException )
check( "iDontExist" )
check( "include/IECore/IECore.h" )
def testChangeSearchPaths( self ) :
# read a file from one path
r = CachedReader( SearchPath( "./test/IECore/data/cachedReaderPath1", ":" ), 100 * 1024 * 1024 )
o1 = r.read( "file.cob" )
self.assertEqual( o1.value, 1 )
self.failUnless( r.cached( "file.cob" ) )
# read a file of the same name from a different path
r.searchPath = SearchPath( "./test/IECore/data/cachedReaderPath2", ":" )
self.failIf( r.cached( "file.cob" ) )
o2 = r.read( "file.cob" )
self.assertEqual( o2.value, 2 )
self.failUnless( r.cached( "file.cob" ) )
# set the paths to the same as before and check we didn't obliterate the cache unecessarily
r.searchPath = SearchPath( "./test/IECore/data/cachedReaderPath2", ":" )
self.failUnless( r.cached( "file.cob" ) )
def testDefault( self ) :
os.environ["IECORE_CACHEDREADER_PATHS"] = "a:test:path"
os.environ["IECORE_CACHEDREADER_MEMORY"] = "200"
r = CachedReader.defaultCachedReader()
r2 = CachedReader.defaultCachedReader()
self.assert_( r.isSame( r2 ) )
self.assertEqual( r.maxMemory, 1024 * 1024 * 200 )
self.assertEqual( r.searchPath, SearchPath( "a:test:path", ":" ) )
def testPostProcessing( self ) :
r = CachedReader( SearchPath( "./test/IECore/data/cobFiles", ":" ), 100 * 1024 * 1024 )
m = r.read( "polySphereQuads.cob" )
self.failUnless( 4 in m.verticesPerFace )
r = CachedReader( SearchPath( "./test/IECore/data/cobFiles", ":" ), 100 * 1024 * 1024, TriangulateOp() )
m = r.read( "polySphereQuads.cob" )
for v in m.verticesPerFace :
self.assertEqual( v, 3 )
def testPostProcessingFailureMode( self ) :
class PostProcessor( ModifyOp ) :
def __init__( self ) :
ModifyOp.__init__( self, "", Parameter( "result", "", NullObject() ), Parameter( "input", "", NullObject() ) )
def modify( self, obj, args ) :
raise Exception( "I am a very naughty op" )
r = CachedReader( SearchPath( "./test/IECore/data/cobFiles", ":" ), 100 * 1024 * 1024, PostProcessor() )
firstException = None
try :
m = r.read( "polySphereQuads.cob" )
except Exception, e :
firstException = str( e )
self.failUnless( firstException is not None )
secondException = None
try :
m = r.read( "polySphereQuads.cob" )
except Exception, e :
secondException = str( e )
self.failUnless( secondException is not None )
# we want the second exception to be different, as the CachedReader
# shouldn't be wasting time attempting to load files again when
# it failed the first time
self.assertNotEqual( firstException, secondException )
def testThreadingAndClear( self ) :
# this tests a fix to the clear() method in the LRU cache,
# which was causing a crash (or at least a spurious "Previous attempt
# to get item failed" exception)
def func1( r, files ):
for i in range( 0,10000 ):
r.read( files[ i % len( files ) ] )
def func2( r ):
for i in range( 0,10000 ):
r.clear()
files = [
"test/IECore/data/cobFiles/compoundData.cob",
"test/IECore/data/pdcFiles/particleShape1.250.pdc",
"test/IECore/data/cobFiles/polySphereQuads.cob",
"test/IECore/data/cachedReaderPath2/file.cob",
]
r = CachedReader( SearchPath( "./", ":" ), 100 * 1024 * 1024 )
t1 = threading.Thread( target=func1, args = [ r, files ] )
t2 = threading.Thread( target=func1, args = [ r, files ] )
t3 = threading.Thread( target=func2, args = [ r ] )
t1.start()
t2.start()
t3.start()
t1.join()
t2.join()
t3.join()
if __name__ == "__main__":
unittest.main()
|
macs03/demo-cms | refs/heads/master | cms/lib/python2.7/site-packages/cms/tests/frontend.py | 12 | # -*- coding: utf-8 -*-
import sys
import datetime
import os
import time
from django.contrib.auth.models import Permission
from django.contrib.sites.models import Site
from django.core.cache import cache
from django.core.urlresolvers import clear_url_caches
from django.test import LiveServerTestCase
from django.utils import unittest
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
from selenium.common.exceptions import NoSuchElementException, NoAlertPresentException
from cms.api import create_page, create_title, add_plugin
from cms.appresolver import clear_app_resolvers
from cms.apphook_pool import apphook_pool
from cms.exceptions import AppAlreadyRegistered
from cms.models import Page, CMSPlugin
from cms.test_utils.project.placeholderapp.cms_app import Example1App
from cms.test_utils.project.placeholderapp.models import Example1
from cms.test_utils.testcases import SettingsOverrideTestCase
from cms.test_utils.util.context_managers import SettingsOverride
from cms.test_utils.testcases import CMSTestCase
from cms.utils.compat.dj import get_user_model
from cms.utils.conf import get_cms_setting
class CMSLiveTests(LiveServerTestCase, CMSTestCase):
@classmethod
def setUpClass(cls):
super(CMSLiveTests, cls).setUpClass()
cache.clear()
if os.environ.get('SELENIUM', '') != '':
#skip selenium tests
raise unittest.SkipTest("Selenium env is set to 0")
if os.environ.get("TRAVIS_BUILD_NUMBER"):
capabilities = webdriver.DesiredCapabilities.CHROME
capabilities['version'] = '31'
capabilities['platform'] = 'OS X 10.9'
capabilities['name'] = 'django CMS'
capabilities['build'] = os.environ.get("TRAVIS_BUILD_NUMBER")
capabilities['tags'] = [os.environ.get("TRAVIS_PYTHON_VERSION"), "CI"]
username = os.environ.get("SAUCE_USERNAME")
access_key = os.environ.get("SAUCE_ACCESS_KEY")
capabilities["tunnel-identifier"] = os.environ.get("TRAVIS_JOB_NUMBER")
hub_url = "http://%s:%s@ondemand.saucelabs.com/wd/hub" % (username, access_key)
cls.driver = webdriver.Remote(desired_capabilities=capabilities, command_executor=hub_url)
cls.driver.implicitly_wait(30)
else:
cls.driver = webdriver.Firefox()
cls.driver.implicitly_wait(5)
cls.accept_next_alert = True
@classmethod
def tearDownClass(cls):
cls.driver.quit()
super(CMSLiveTests, cls).tearDownClass()
def tearDown(self):
super(CMSLiveTests, self).tearDown()
Page.objects.all().delete() # somehow the sqlite transaction got lost.
cache.clear()
def wait_until(self, callback, timeout=10):
"""
Helper function that blocks the execution of the tests until the
specified callback returns a value that is not falsy. This function can
be called, for example, after clicking a link or submitting a form.
See the other public methods that call this function for more details.
"""
WebDriverWait(self.driver, timeout).until(callback)
def wait_loaded_tag(self, tag_name, timeout=10):
"""
Helper function that blocks until the element with the given tag name
is found on the page.
"""
self.wait_until(
lambda driver: driver.find_element_by_tag_name(tag_name),
timeout
)
def wait_page_loaded(self):
"""
Block until page has started to load.
"""
from selenium.common.exceptions import TimeoutException
try:
# Wait for the next page to be loaded
self.wait_loaded_tag('body')
except TimeoutException:
# IE7 occasionnally returns an error "Internet Explorer cannot
# display the webpage" and doesn't load the next page. We just
# ignore it.
pass
def is_element_present(self, how, what):
try:
self.driver.find_element(by=how, value=what)
except NoSuchElementException:
return False
return True
def is_alert_present(self):
try:
self.driver.switch_to_alert()
except NoAlertPresentException:
return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally:
self.accept_next_alert = True
def reload_urls(self):
"""
Code borrowed from ApphooksTestCase
"""
from django.conf import settings
url_modules = [
'cms.urls',
# TODO: Add here intermediary modules which may
# include() the 'cms.urls' if it isn't included
# directly in the root urlconf.
# '...',
'cms.test_utils.project.second_cms_urls_for_apphook_tests',
'cms.test_utils.project.urls_for_apphook_tests',
settings.ROOT_URLCONF,
]
clear_app_resolvers()
clear_url_caches()
for module in url_modules:
if module in sys.modules:
del sys.modules[module]
class ToolbarBasicTests(CMSLiveTests):
def setUp(self):
self.user = self.get_superuser()
Site.objects.create(domain='example.org', name='example.org')
self.base_url = self.live_server_url
self.driver.implicitly_wait(2)
super(ToolbarBasicTests, self).setUp()
def test_toolbar_login(self):
User = get_user_model()
create_page('Home', 'simple.html', 'en', published=True)
url = '%s/?%s' % (self.live_server_url, get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON'))
self.assertTrue(User.objects.all().count(), 1)
self.driver.get(url)
self.assertRaises(NoSuchElementException, self.driver.find_element_by_class_name, 'cms_toolbar-item_logout')
username_input = self.driver.find_element_by_id("id_cms-username")
username_input.send_keys(getattr(self.user, User.USERNAME_FIELD))
password_input = self.driver.find_element_by_id("id_cms-password")
password_input.send_keys(getattr(self.user, User.USERNAME_FIELD))
password_input.submit()
self.wait_page_loaded()
self.assertTrue(self.driver.find_element_by_class_name('cms_toolbar-item-navigation'))
def test_toolbar_login_view(self):
User = get_user_model()
create_page('Home', 'simple.html', 'en', published=True)
ex1 = Example1.objects.create(
char_1='char_1', char_2='char_1', char_3='char_3', char_4='char_4',
date_field=datetime.datetime.now()
)
try:
apphook_pool.register(Example1App)
except AppAlreadyRegistered:
pass
self.reload_urls()
create_page('apphook', 'simple.html', 'en', published=True,
apphook=Example1App)
url = '%s/%s/?%s' % (self.live_server_url, 'apphook/detail/%s' % ex1.pk, get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON'))
self.driver.get(url)
username_input = self.driver.find_element_by_id("id_cms-username")
username_input.send_keys(getattr(self.user, User.USERNAME_FIELD))
password_input = self.driver.find_element_by_id("id_cms-password")
password_input.send_keys("what")
password_input.submit()
self.wait_page_loaded()
self.assertTrue(self.driver.find_element_by_class_name('cms_error'))
def test_toolbar_login_cbv(self):
User = get_user_model()
try:
apphook_pool.register(Example1App)
except AppAlreadyRegistered:
pass
self.reload_urls()
create_page('Home', 'simple.html', 'en', published=True)
ex1 = Example1.objects.create(
char_1='char_1', char_2='char_1', char_3='char_3', char_4='char_4',
date_field=datetime.datetime.now()
)
create_page('apphook', 'simple.html', 'en', published=True,
apphook=Example1App)
url = '%s/%s/?%s' % (self.live_server_url, 'apphook/detail/class/%s' % ex1.pk, get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON'))
self.driver.get(url)
username_input = self.driver.find_element_by_id("id_cms-username")
username_input.send_keys(getattr(self.user, User.USERNAME_FIELD))
password_input = self.driver.find_element_by_id("id_cms-password")
password_input.send_keys("what")
password_input.submit()
self.wait_page_loaded()
self.assertTrue(self.driver.find_element_by_class_name('cms_error'))
def test_basic_add_pages(self):
with SettingsOverride(DEBUG=True):
User = get_user_model()
self.assertEqual(Page.objects.all().count(), 0)
self.assertTrue(User.objects.all().count(), 1)
driver = self.driver
driver.get(self.base_url + "/de/")
driver.find_element_by_id("add-page").click()
driver.find_element_by_id("id_username").clear()
driver.find_element_by_id("id_username").send_keys(getattr(self.user, User.USERNAME_FIELD))
driver.find_element_by_id("id_password").clear()
driver.find_element_by_id("id_password").send_keys(getattr(self.user, User.USERNAME_FIELD))
driver.find_element_by_css_selector("input[type=\"submit\"]").click()
driver.find_element_by_name("_save").click()
driver.find_element_by_link_text(u"Seite hinzufügen").click()
driver.find_element_by_id("id_title").clear()
driver.find_element_by_id("id_title").send_keys("SubPage")
driver.find_element_by_name("_save").click()
class PlaceholderBasicTests(CMSLiveTests, SettingsOverrideTestCase):
settings_overrides = {
'LANGUAGE_CODE': 'en',
'LANGUAGES': (('en', 'English'),
('it', 'Italian')),
'CMS_LANGUAGES': {
1: [ {'code' : 'en',
'name': 'English',
'public': True},
{'code': 'it',
'name': 'Italian',
'public': True},
],
'default': {
'public': True,
'hide_untranslated': False,
}
},
'SITE_ID': 1,
}
def setUp(self):
Site.objects.create(domain='example.org', name='example.org')
self.page = create_page('Home', 'simple.html', 'en', published=True)
self.italian_title = create_title('it', 'Home italian', self.page)
self.placeholder = self.page.placeholders.all()[0]
add_plugin(self.placeholder, 'TextPlugin', 'en', body='test')
self.base_url = self.live_server_url
self.user = self._create_user('admin', True, True, True)
self.driver.implicitly_wait(5)
super(PlaceholderBasicTests, self).setUp()
def _login(self):
url = '%s/?%s' % (self.live_server_url, get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON'))
self.driver.get(url)
self.assertRaises(NoSuchElementException, self.driver.find_element_by_class_name, 'cms_toolbar-item_logout')
username_input = self.driver.find_element_by_id("id_cms-username")
username_input.send_keys(getattr(self.user, get_user_model().USERNAME_FIELD))
password_input = self.driver.find_element_by_id("id_cms-password")
password_input.send_keys(getattr(self.user, get_user_model().USERNAME_FIELD))
password_input.submit()
self.wait_page_loaded()
self.assertTrue(self.driver.find_element_by_class_name('cms_toolbar-item-navigation'))
def test_copy_from_language(self):
self._login()
self.driver.get('%s/it/?%s' % (self.live_server_url, get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')))
# check if there are no plugins in italian version of the page
italian_plugins = self.page.placeholders.all()[0].get_plugins_list('it')
self.assertEqual(len(italian_plugins), 0)
build_button = self.driver.find_element_by_css_selector('.cms_toolbar-item-cms-mode-switcher a[href="?%s"]' % get_cms_setting('CMS_TOOLBAR_URL__BUILD'))
build_button.click()
submenu = self.driver.find_element_by_css_selector('.cms_dragbar .cms_submenu')
hov = ActionChains(self.driver).move_to_element(submenu)
hov.perform()
submenu_link_selector = '.cms_submenu-item a[data-rel="copy-lang"][data-language="en"]'
WebDriverWait(self.driver, 10).until(EC.visibility_of_element_located((By.CSS_SELECTOR, submenu_link_selector)))
copy_from_english = self.driver.find_element_by_css_selector(submenu_link_selector)
copy_from_english.click()
# Done, check if the text plugin was copied and it is only one
WebDriverWait(self.driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR, '.cms_draggable:nth-child(1)')))
italian_plugins = self.page.placeholders.all()[0].get_plugins_list('it')
self.assertEqual(len(italian_plugins), 1)
plugin_instance = italian_plugins[0].get_plugin_instance()[0]
self.assertEqual(plugin_instance.body, 'test')
def test_copy_to_from_clipboard(self):
self.assertEqual(CMSPlugin.objects.count(), 1)
self._login()
build_button = self.driver.find_element_by_css_selector('.cms_toolbar-item-cms-mode-switcher a[href="?%s"]' % get_cms_setting('CMS_TOOLBAR_URL__BUILD'))
build_button.click()
cms_draggable = self.driver.find_element_by_css_selector('.cms_draggable:nth-child(1)')
hov = ActionChains(self.driver).move_to_element(cms_draggable)
hov.perform()
submenu = cms_draggable.find_element_by_css_selector('.cms_submenu')
hov = ActionChains(self.driver).move_to_element(submenu)
hov.perform()
copy = submenu.find_element_by_css_selector('a[data-rel="copy"]')
copy.click()
time.sleep(0.2)
clipboard = self.driver.find_element_by_css_selector('.cms_clipboard')
WebDriverWait(self.driver, 10).until(lambda driver: clipboard.is_displayed())
hov = ActionChains(self.driver).move_to_element(clipboard)
hov.perform()
# necessary sleeps for making a "real" drag and drop, that works with the clipboard
time.sleep(0.1)
self.assertEqual(CMSPlugin.objects.count(), 2)
drag = ActionChains(self.driver).click_and_hold(
clipboard.find_element_by_css_selector('.cms_draggable:nth-child(1)')
);
drag.perform()
time.sleep(0.1)
drag = ActionChains(self.driver).move_to_element(
self.driver.find_element_by_css_selector('.cms_dragarea-1')
)
drag.perform()
time.sleep(0.2)
drag = ActionChains(self.driver).move_by_offset(
0, 10
).release()
drag.perform()
time.sleep(0.5)
self.assertEqual(CMSPlugin.objects.count(), 3)
plugins = self.page.placeholders.all()[0].get_plugins_list('en')
self.assertEqual(len(plugins), 2)
class StaticPlaceholderPermissionTests(CMSLiveTests, SettingsOverrideTestCase):
settings_overrides = {
'SITE_ID': 1,
'CMS_PERMISSION': False,
}
def setUp(self):
Site.objects.create(domain='example.org', name='example.org')
self.page = create_page('Home', 'static.html', 'en', published=True)
self.base_url = self.live_server_url
self.placeholder_name = 'cms_placeholder-5'
self.user = self._create_user("testuser", is_staff=True)
self.user.user_permissions = Permission.objects.exclude(codename="edit_static_placeholder")
self.driver.implicitly_wait(2)
super(StaticPlaceholderPermissionTests, self).setUp()
def test_static_placeholders_permissions(self):
# login
url = '%s/?%s' % (self.live_server_url, get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON'))
self.driver.get(url)
self.assertRaises(NoSuchElementException, self.driver.find_element_by_class_name, 'cms_toolbar-item_logout')
username_input = self.driver.find_element_by_id("id_cms-username")
username_input.send_keys(getattr(self.user, get_user_model().USERNAME_FIELD))
password_input = self.driver.find_element_by_id("id_cms-password")
password_input.send_keys(getattr(self.user, get_user_model().USERNAME_FIELD))
password_input.submit()
self.wait_page_loaded()
self.assertTrue(self.driver.find_element_by_class_name('cms_toolbar-item-navigation'))
# test static placeholder permission (content of static placeholders is NOT editable)
self.driver.get('%s/en/?%s' % (self.live_server_url, get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')))
self.assertRaises(NoSuchElementException, self.driver.find_element_by_class_name, self.placeholder_name)
# update userpermission
edit_permission = Permission.objects.get(codename="edit_static_placeholder")
self.user.user_permissions.add( edit_permission )
# test static placeholder permission (content of static placeholders is editable)
self.driver.get('%s/en/?%s' % (self.live_server_url, get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')))
self.assertTrue(self.driver.find_element_by_class_name(self.placeholder_name))
|
kevclarx/ansible | refs/heads/devel | lib/ansible/modules/monitoring/logicmonitor_facts.py | 9 | #!/usr/bin/python
# LogicMonitor Ansible module for managing Collectors, Hosts and Hostgroups
# Copyright (C) 2015 LogicMonitor
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: logicmonitor_facts
short_description: Collect facts about LogicMonitor objects
description:
- LogicMonitor is a hosted, full-stack, infrastructure monitoring platform.
- This module collects facts about hosts and host groups within your LogicMonitor account.
version_added: "2.2"
author: [Ethan Culler-Mayeno (@ethanculler), Jeff Wozniak (@woz5999)]
notes:
- You must have an existing LogicMonitor account for this module to function.
requirements: ["An existing LogicMonitor account", "Linux"]
options:
target:
description:
- The LogicMonitor object you wish to manage.
required: true
default: null
choices: ['host', 'hostgroup']
company:
description:
- The LogicMonitor account company name. If you would log in to your account at "superheroes.logicmonitor.com" you would use "superheroes".
required: true
default: null
user:
description:
- A LogicMonitor user name. The module will authenticate and perform actions on behalf of this user.
required: true
default: null
password:
description:
- The password for the chosen LogicMonitor User.
- If an md5 hash is used, the digest flag must be set to true.
required: true
default: null
collector:
description:
- The fully qualified domain name of a collector in your LogicMonitor account.
- This is optional for querying a LogicMonitor host when a displayname is specified.
- This is required for querying a LogicMonitor host when a displayname is not specified.
required: false
default: null
hostname:
description:
- The hostname of a host in your LogicMonitor account, or the desired hostname of a device to add into monitoring.
- Required for managing hosts (target=host).
required: false
default: 'hostname -f'
displayname:
description:
- The display name of a host in your LogicMonitor account or the desired display name of a device to add into monitoring.
required: false
default: 'hostname -f'
fullpath:
description:
- The fullpath of the hostgroup object you would like to manage.
- Recommend running on a single ansible host.
- Required for management of LogicMonitor host groups (target=hostgroup).
required: false
default: null
...
'''
EXAMPLES = '''
# Always run those modules on localhost using delegate_to:localhost, or localaction
- name: query a list of hosts
logicmonitor_facts:
target: host
company: yourcompany
user: Luigi
password: ImaLuigi,number1!
delegate_to: localhost
- name: query a host group
logicmonitor_facts:
target: hostgroup
fullpath: /servers/production
company: yourcompany
user: mario
password: itsame.Mario!
delegate_to: localhost
'''
RETURN = '''
---
ansible_facts:
description: LogicMonitor properties set for the specified object
returned: success
type: list
example: >
{
"name": "dc",
"value": "1"
},
{
"name": "type",
"value": "prod"
},
{
"name": "system.categories",
"value": ""
},
{
"name": "snmp.community",
"value": "********"
}
...
'''
import socket
import types
import urllib
HAS_LIB_JSON = True
try:
import json
# Detect the python-json library which is incompatible
# Look for simplejson if that's the case
try:
if (
not isinstance(json.loads, types.FunctionType) or
not isinstance(json.dumps, types.FunctionType)
):
raise ImportError
except AttributeError:
raise ImportError
except ImportError:
try:
import simplejson as json
except ImportError:
HAS_LIB_JSON = False
except SyntaxError:
HAS_LIB_JSON = False
class LogicMonitor(object):
def __init__(self, module, **params):
self.__version__ = "1.0-python"
self.module = module
self.module.debug("Instantiating LogicMonitor object")
self.check_mode = False
self.company = params["company"]
self.user = params["user"]
self.password = params["password"]
self.fqdn = socket.getfqdn()
self.lm_url = "logicmonitor.com/santaba"
self.__version__ = self.__version__ + "-ansible-module"
def rpc(self, action, params):
"""Make a call to the LogicMonitor RPC library
and return the response"""
self.module.debug("Running LogicMonitor.rpc")
param_str = urllib.urlencode(params)
creds = urllib.urlencode(
{"c": self.company,
"u": self.user,
"p": self.password})
if param_str:
param_str = param_str + "&"
param_str = param_str + creds
try:
url = ("https://" + self.company + "." + self.lm_url +
"/rpc/" + action + "?" + param_str)
# Set custom LogicMonitor header with version
headers = {"X-LM-User-Agent": self.__version__}
# Set headers
f = open_url(url, headers=headers)
raw = f.read()
resp = json.loads(raw)
if resp["status"] == 403:
self.module.debug("Authentication failed.")
self.fail(msg="Error: " + resp["errmsg"])
else:
return raw
except IOError:
ioe = get_exception()
self.fail(msg="Error: Exception making RPC call to " +
"https://" + self.company + "." + self.lm_url +
"/rpc/" + action + "\nException" + str(ioe))
def get_collectors(self):
"""Returns a JSON object containing a list of
LogicMonitor collectors"""
self.module.debug("Running LogicMonitor.get_collectors...")
self.module.debug("Making RPC call to 'getAgents'")
resp = self.rpc("getAgents", {})
resp_json = json.loads(resp)
if resp_json["status"] is 200:
self.module.debug("RPC call succeeded")
return resp_json["data"]
else:
self.fail(msg=resp)
def get_host_by_hostname(self, hostname, collector):
"""Returns a host object for the host matching the
specified hostname"""
self.module.debug("Running LogicMonitor.get_host_by_hostname...")
self.module.debug("Looking for hostname " + hostname)
self.module.debug("Making RPC call to 'getHosts'")
hostlist_json = json.loads(self.rpc("getHosts", {"hostGroupId": 1}))
if collector:
if hostlist_json["status"] == 200:
self.module.debug("RPC call succeeded")
hosts = hostlist_json["data"]["hosts"]
self.module.debug(
"Looking for host matching: hostname " + hostname +
" and collector " + str(collector["id"]))
for host in hosts:
if (host["hostName"] == hostname and
host["agentId"] == collector["id"]):
self.module.debug("Host match found")
return host
self.module.debug("No host match found")
return None
else:
self.module.debug("RPC call failed")
self.module.debug(hostlist_json)
else:
self.module.debug("No collector specified")
return None
def get_host_by_displayname(self, displayname):
"""Returns a host object for the host matching the
specified display name"""
self.module.debug("Running LogicMonitor.get_host_by_displayname...")
self.module.debug("Looking for displayname " + displayname)
self.module.debug("Making RPC call to 'getHost'")
host_json = (json.loads(self.rpc("getHost",
{"displayName": displayname})))
if host_json["status"] == 200:
self.module.debug("RPC call succeeded")
return host_json["data"]
else:
self.module.debug("RPC call failed")
self.module.debug(host_json)
return None
def get_collector_by_description(self, description):
"""Returns a JSON collector object for the collector
matching the specified FQDN (description)"""
self.module.debug(
"Running LogicMonitor.get_collector_by_description..."
)
collector_list = self.get_collectors()
if collector_list is not None:
self.module.debug("Looking for collector with description " +
description)
for collector in collector_list:
if collector["description"] == description:
self.module.debug("Collector match found")
return collector
self.module.debug("No collector match found")
return None
def get_group(self, fullpath):
"""Returns a JSON group object for the group matching the
specified path"""
self.module.debug("Running LogicMonitor.get_group...")
self.module.debug("Making RPC call to getHostGroups")
resp = json.loads(self.rpc("getHostGroups", {}))
if resp["status"] == 200:
self.module.debug("RPC called succeeded")
groups = resp["data"]
self.module.debug("Looking for group matching " + fullpath)
for group in groups:
if group["fullPath"] == fullpath.lstrip('/'):
self.module.debug("Group match found")
return group
self.module.debug("No group match found")
return None
else:
self.module.debug("RPC call failed")
self.module.debug(resp)
return None
def create_group(self, fullpath):
"""Recursively create a path of host groups.
Returns the id of the newly created hostgroup"""
self.module.debug("Running LogicMonitor.create_group...")
res = self.get_group(fullpath)
if res:
self.module.debug("Group " + fullpath + " exists.")
return res["id"]
if fullpath == "/":
self.module.debug("Specified group is root. Doing nothing.")
return 1
else:
self.module.debug("Creating group named " + fullpath)
self.module.debug("System changed")
self.change = True
if self.check_mode:
self.exit(changed=True)
parentpath, name = fullpath.rsplit('/', 1)
parentgroup = self.get_group(parentpath)
parentid = 1
if parentpath == "":
parentid = 1
elif parentgroup:
parentid = parentgroup["id"]
else:
parentid = self.create_group(parentpath)
h = None
# Determine if we're creating a group from host or hostgroup class
if hasattr(self, '_build_host_group_hash'):
h = self._build_host_group_hash(
fullpath,
self.description,
self.properties,
self.alertenable)
h["name"] = name
h["parentId"] = parentid
else:
h = {"name": name,
"parentId": parentid,
"alertEnable": True,
"description": ""}
self.module.debug("Making RPC call to 'addHostGroup'")
resp = json.loads(
self.rpc("addHostGroup", h))
if resp["status"] == 200:
self.module.debug("RPC call succeeded")
return resp["data"]["id"]
elif resp["errmsg"] == "The record already exists":
self.module.debug("The hostgroup already exists")
group = self.get_group(fullpath)
return group["id"]
else:
self.module.debug("RPC call failed")
self.fail(
msg="Error: unable to create new hostgroup \"" + name +
"\".\n" + resp["errmsg"])
def fail(self, msg):
self.module.fail_json(msg=msg, changed=self.change)
def exit(self, changed):
self.module.debug("Changed: " + changed)
self.module.exit_json(changed=changed)
def output_info(self, info):
self.module.debug("Registering properties as Ansible facts")
self.module.exit_json(changed=False, ansible_facts=info)
class Host(LogicMonitor):
def __init__(self, params, module=None):
"""Initializor for the LogicMonitor host object"""
self.change = False
self.params = params
self.collector = None
LogicMonitor.__init__(self, module, **self.params)
self.module.debug("Instantiating Host object")
if self.params["hostname"]:
self.module.debug("Hostname is " + self.params["hostname"])
self.hostname = self.params['hostname']
else:
self.module.debug("No hostname specified. Using " + self.fqdn)
self.hostname = self.fqdn
if self.params["displayname"]:
self.module.debug("Display name is " + self.params["displayname"])
self.displayname = self.params['displayname']
else:
self.module.debug("No display name specified. Using " + self.fqdn)
self.displayname = self.fqdn
# Attempt to host information via display name of host name
self.module.debug("Attempting to find host by displayname " +
self.displayname)
info = self.get_host_by_displayname(self.displayname)
if info is not None:
self.module.debug("Host found by displayname")
# Used the host information to grab the collector description
# if not provided
if (not hasattr(self.params, "collector") and
"agentDescription" in info):
self.module.debug("Setting collector from host response. " +
"Collector " + info["agentDescription"])
self.params["collector"] = info["agentDescription"]
else:
self.module.debug("Host not found by displayname")
# At this point, a valid collector description is required for success
# Check that the description exists or fail
if self.params["collector"]:
self.module.debug("Collector specified is " +
self.params["collector"])
self.collector = (self.get_collector_by_description(
self.params["collector"]))
else:
self.fail(msg="No collector specified.")
# If the host wasn't found via displayname, attempt by hostname
if info is None:
self.module.debug("Attempting to find host by hostname " +
self.hostname)
info = self.get_host_by_hostname(self.hostname, self.collector)
self.info = info
def get_properties(self):
"""Returns a hash of the properties
associated with this LogicMonitor host"""
self.module.debug("Running Host.get_properties...")
if self.info:
self.module.debug("Making RPC call to 'getHostProperties'")
properties_json = (json.loads(self.rpc("getHostProperties",
{'hostId': self.info["id"],
"filterSystemProperties": True})))
if properties_json["status"] == 200:
self.module.debug("RPC call succeeded")
return properties_json["data"]
else:
self.module.debug("Error: there was an issue retrieving the " +
"host properties")
self.module.debug(properties_json["errmsg"])
self.fail(msg=properties_json["status"])
else:
self.module.debug(
"Unable to find LogicMonitor host which matches " +
self.displayname + " (" + self.hostname + ")"
)
return None
def site_facts(self):
"""Output current properties information for the Host"""
self.module.debug("Running Host.site_facts...")
if self.info:
self.module.debug("Host exists")
props = self.get_properties()
self.output_info(props)
else:
self.fail(msg="Error: Host doesn't exit.")
class Hostgroup(LogicMonitor):
def __init__(self, params, module=None):
"""Initializor for the LogicMonitor host object"""
self.change = False
self.params = params
LogicMonitor.__init__(self, module, **self.params)
self.module.debug("Instantiating Hostgroup object")
self.fullpath = self.params["fullpath"]
self.info = self.get_group(self.fullpath)
def get_properties(self, final=False):
"""Returns a hash of the properties
associated with this LogicMonitor host"""
self.module.debug("Running Hostgroup.get_properties...")
if self.info:
self.module.debug("Group found")
self.module.debug("Making RPC call to 'getHostGroupProperties'")
properties_json = json.loads(self.rpc(
"getHostGroupProperties",
{'hostGroupId': self.info["id"],
"finalResult": final}))
if properties_json["status"] == 200:
self.module.debug("RPC call succeeded")
return properties_json["data"]
else:
self.module.debug("RPC call failed")
self.fail(msg=properties_json["status"])
else:
self.module.debug("Group not found")
return None
def site_facts(self):
"""Output current properties information for the Hostgroup"""
self.module.debug("Running Hostgroup.site_facts...")
if self.info:
self.module.debug("Group exists")
props = self.get_properties(True)
self.output_info(props)
else:
self.fail(msg="Error: Group doesn't exit.")
def selector(module):
"""Figure out which object and which actions
to take given the right parameters"""
if module.params["target"] == "host":
target = Host(module.params, module)
target.site_facts()
elif module.params["target"] == "hostgroup":
# Validate target specific required parameters
if module.params["fullpath"] is not None:
target = Hostgroup(module.params, module)
target.site_facts()
else:
module.fail_json(
msg="Parameter 'fullpath' required for target 'hostgroup'")
else:
module.fail_json(
msg="Error: Unexpected target \"" + module.params["target"] +
"\" was specified.")
def main():
TARGETS = [
"host",
"hostgroup"]
module = AnsibleModule(
argument_spec=dict(
target=dict(required=True, default=None, choices=TARGETS),
company=dict(required=True, default=None),
user=dict(required=True, default=None),
password=dict(required=True, default=None, no_log=True),
collector=dict(require=False, default=None),
hostname=dict(required=False, default=None),
displayname=dict(required=False, default=None),
fullpath=dict(required=False, default=None)
),
supports_check_mode=True
)
if HAS_LIB_JSON is not True:
module.fail_json(msg="Unable to load JSON library")
selector(module)
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
from ansible.module_utils.urls import open_url
if __name__ == "__main__":
main()
|
luwei0917/awsemmd_script | refs/heads/master | archive/tensorFlow_next_gen.py | 1 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DNNRegressor with custom input_fn for Housing dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import argparse
import pandas as pd
import tensorflow as tf
# parser = argparse.ArgumentParser(description="This is my TensorFlow program")
# parser.add_argument("protein", help="The name of the protein")
# args = parser.parse_args()
tf.logging.set_verbosity(tf.logging.INFO)
# tf.logging.set_verbosity(tf.logging.ERROR)
# COLUMNS = ["crim", "zn", "indus", "nox", "rm", "age",
# "dis", "tax", "ptratio", "medv"]
FEATURES = ["Rama", "Chain", "Chi", "DSSP", "P_AP", "Water", "Burial",
"Helix", "Frag_Mem", "QGO", "VTotal", "Rw", "GDT"]
# FEATURES = ["VTotal", "Rw"]
LABEL = "Qw"
def input_fn(data_set):
feature_cols = {k: tf.constant(data_set[k].values, shape=[data_set[k].size, 1]) for k in FEATURES}
labels = tf.constant(data_set[LABEL].values, shape=[data_set[LABEL].size, 1])
return feature_cols, labels
def main(unused_argv):
# Load datasets
training_set = pd.read_csv("~/Research/data/tensorFlow/train.csv", skipinitialspace=True)
test_set = pd.read_csv("~/Research/data/tensorFlow/test.csv", skipinitialspace=True)
# Set of 6 examples for which to predict median house values
# name = args.protein
# prediction_set = pd.read_csv("~/Research/data/tensorFlow/test.csv", skipinitialspace=True)
# Feature cols
feature_cols = [tf.contrib.layers.real_valued_column(k)
for k in FEATURES]
# Build 2 layer fully connected DNN with 10, 10 units respectively.
regressor = tf.contrib.learn.DNNRegressor(feature_columns=feature_cols,
hidden_units=[100],
model_dir="/tmp/awsem")
# Fit
regressor.fit(input_fn=lambda: input_fn(training_set), steps=50000)
# Score accuracy
ev = regressor.evaluate(input_fn=lambda: input_fn(test_set), steps=1)
loss_score = ev["loss"]
print("Loss: {0:f}".format(loss_score))
# Print out predictions
# y = regressor.predict(input_fn=lambda: input_fn(prediction_set))
name_list = ["1MBA", "T251", "T0766", "T0784", "T0792", "T0803", "T0815", "T0833"]
for name in name_list:
prediction_set = pd.read_csv("~/Research/data/tensorFlow/{}.csv".format(name), skipinitialspace=True)
y = regressor.predict_scores(input_fn=lambda: input_fn(prediction_set))
predictions = list(y)
with open("/Users/weilu/Research/data/tensorFlow/{}_results.csv".format(name), "w") as f:
f.write("Result\n")
for i in predictions:
f.write(str(i) + "\n")
if __name__ == "__main__":
tf.app.run()
|
gitprouser/appengine-bottle-skeleton | refs/heads/master | lib/bs4/builder/_htmlparser.py | 41 | """Use the HTMLParser library to parse HTML files that aren't too bad."""
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
__all__ = [
'HTMLParserTreeBuilder',
]
from HTMLParser import HTMLParser
try:
from HTMLParser import HTMLParseError
except ImportError, e:
# HTMLParseError is removed in Python 3.5. Since it can never be
# thrown in 3.5, we can just define our own class as a placeholder.
class HTMLParseError(Exception):
pass
import sys
import warnings
# Starting in Python 3.2, the HTMLParser constructor takes a 'strict'
# argument, which we'd like to set to False. Unfortunately,
# http://bugs.python.org/issue13273 makes strict=True a better bet
# before Python 3.2.3.
#
# At the end of this file, we monkeypatch HTMLParser so that
# strict=True works well on Python 3.2.2.
major, minor, release = sys.version_info[:3]
CONSTRUCTOR_TAKES_STRICT = major == 3 and minor == 2 and release >= 3
CONSTRUCTOR_STRICT_IS_DEPRECATED = major == 3 and minor == 3
CONSTRUCTOR_TAKES_CONVERT_CHARREFS = major == 3 and minor >= 4
from bs4.element import (
CData,
Comment,
Declaration,
Doctype,
ProcessingInstruction,
)
from bs4.dammit import EntitySubstitution, UnicodeDammit
from bs4.builder import (
HTML,
HTMLTreeBuilder,
STRICT,
)
HTMLPARSER = 'html.parser'
class BeautifulSoupHTMLParser(HTMLParser):
def handle_starttag(self, name, attrs):
# XXX namespace
attr_dict = {}
for key, value in attrs:
# Change None attribute values to the empty string
# for consistency with the other tree builders.
if value is None:
value = ''
attr_dict[key] = value
attrvalue = '""'
self.soup.handle_starttag(name, None, None, attr_dict)
def handle_endtag(self, name):
self.soup.handle_endtag(name)
def handle_data(self, data):
self.soup.handle_data(data)
def handle_charref(self, name):
# XXX workaround for a bug in HTMLParser. Remove this once
# it's fixed in all supported versions.
# http://bugs.python.org/issue13633
if name.startswith('x'):
real_name = int(name.lstrip('x'), 16)
elif name.startswith('X'):
real_name = int(name.lstrip('X'), 16)
else:
real_name = int(name)
try:
data = unichr(real_name)
except (ValueError, OverflowError), e:
data = u"\N{REPLACEMENT CHARACTER}"
self.handle_data(data)
def handle_entityref(self, name):
character = EntitySubstitution.HTML_ENTITY_TO_CHARACTER.get(name)
if character is not None:
data = character
else:
data = "&%s;" % name
self.handle_data(data)
def handle_comment(self, data):
self.soup.endData()
self.soup.handle_data(data)
self.soup.endData(Comment)
def handle_decl(self, data):
self.soup.endData()
if data.startswith("DOCTYPE "):
data = data[len("DOCTYPE "):]
elif data == 'DOCTYPE':
# i.e. "<!DOCTYPE>"
data = ''
self.soup.handle_data(data)
self.soup.endData(Doctype)
def unknown_decl(self, data):
if data.upper().startswith('CDATA['):
cls = CData
data = data[len('CDATA['):]
else:
cls = Declaration
self.soup.endData()
self.soup.handle_data(data)
self.soup.endData(cls)
def handle_pi(self, data):
self.soup.endData()
self.soup.handle_data(data)
self.soup.endData(ProcessingInstruction)
class HTMLParserTreeBuilder(HTMLTreeBuilder):
is_xml = False
picklable = True
NAME = HTMLPARSER
features = [NAME, HTML, STRICT]
def __init__(self, *args, **kwargs):
if CONSTRUCTOR_TAKES_STRICT and not CONSTRUCTOR_STRICT_IS_DEPRECATED:
kwargs['strict'] = False
if CONSTRUCTOR_TAKES_CONVERT_CHARREFS:
kwargs['convert_charrefs'] = False
self.parser_args = (args, kwargs)
def prepare_markup(self, markup, user_specified_encoding=None,
document_declared_encoding=None, exclude_encodings=None):
"""
:return: A 4-tuple (markup, original encoding, encoding
declared within markup, whether any characters had to be
replaced with REPLACEMENT CHARACTER).
"""
if isinstance(markup, unicode):
yield (markup, None, None, False)
return
try_encodings = [user_specified_encoding, document_declared_encoding]
dammit = UnicodeDammit(markup, try_encodings, is_html=True,
exclude_encodings=exclude_encodings)
yield (dammit.markup, dammit.original_encoding,
dammit.declared_html_encoding,
dammit.contains_replacement_characters)
def feed(self, markup):
args, kwargs = self.parser_args
parser = BeautifulSoupHTMLParser(*args, **kwargs)
parser.soup = self.soup
try:
parser.feed(markup)
except HTMLParseError, e:
warnings.warn(RuntimeWarning(
"Python's built-in HTMLParser cannot parse the given document. This is not a bug in Beautiful Soup. The best solution is to install an external parser (lxml or html5lib), and use Beautiful Soup with that parser. See http://www.crummy.com/software/BeautifulSoup/bs4/doc/#installing-a-parser for help."))
raise e
# Patch 3.2 versions of HTMLParser earlier than 3.2.3 to use some
# 3.2.3 code. This ensures they don't treat markup like <p></p> as a
# string.
#
# XXX This code can be removed once most Python 3 users are on 3.2.3.
if major == 3 and minor == 2 and not CONSTRUCTOR_TAKES_STRICT:
import re
attrfind_tolerant = re.compile(
r'\s*((?<=[\'"\s])[^\s/>][^\s/=>]*)(\s*=+\s*'
r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?')
HTMLParserTreeBuilder.attrfind_tolerant = attrfind_tolerant
locatestarttagend = re.compile(r"""
<[a-zA-Z][-.a-zA-Z0-9:_]* # tag name
(?:\s+ # whitespace before attribute name
(?:[a-zA-Z_][-.:a-zA-Z0-9_]* # attribute name
(?:\s*=\s* # value indicator
(?:'[^']*' # LITA-enclosed value
|\"[^\"]*\" # LIT-enclosed value
|[^'\">\s]+ # bare value
)
)?
)
)*
\s* # trailing whitespace
""", re.VERBOSE)
BeautifulSoupHTMLParser.locatestarttagend = locatestarttagend
from html.parser import tagfind, attrfind
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
match = tagfind.match(rawdata, i+1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = rawdata[i+1:k].lower()
while k < endpos:
if self.strict:
m = attrfind.match(rawdata, k)
else:
m = attrfind_tolerant.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
if attrvalue:
attrvalue = self.unescape(attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = m.end()
end = rawdata[k:endpos].strip()
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + self.__starttag_text.count("\n")
offset = len(self.__starttag_text) \
- self.__starttag_text.rfind("\n")
else:
offset = offset + len(self.__starttag_text)
if self.strict:
self.error("junk characters in start tag: %r"
% (rawdata[k:endpos][:20],))
self.handle_data(rawdata[i:endpos])
return endpos
if end.endswith('/>'):
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode(tag)
return endpos
def set_cdata_mode(self, elem):
self.cdata_elem = elem.lower()
self.interesting = re.compile(r'</\s*%s\s*>' % self.cdata_elem, re.I)
BeautifulSoupHTMLParser.parse_starttag = parse_starttag
BeautifulSoupHTMLParser.set_cdata_mode = set_cdata_mode
CONSTRUCTOR_TAKES_STRICT = True
|
omco/geode | refs/heads/windows_vs2015 | geode/geometry/test_offset.py | 2 | #!/usr/bin/env python
from __future__ import division
from geode.geometry.platonic import *
from geode.vector import *
def test_offset():
random.seed(8123171)
offset = .1
alpha = 1/4
small = 1e-8
def single():
return TriangleSoup([(0,1,2)]),[(0,0,0),(1,0,0),(0,1,0)]
for name,(m,X) in ('single',single()),('tet',tetrahedron_mesh()),('cube',cube_mesh()),('ico',icosahedron_mesh()):
for i in xrange(10):
# Generate a random affine transform, and make it rather singular
if i:
A = random.randn(3,3)
for _ in xrange(2):
A = dot(A,A)
A *= linalg.det(A)**(-1/3)
AX = Matrix(A)*X
else:
AX = asarray(X,dtype=float)
for shell in 0,1:
top = TriangleTopology(m)
if not shell and top.has_boundary():
continue
print('%s : %s'%(name,('volume','shell')[shell]))
# Perform offset
om,oX = (rough_offset_shell if shell else rough_offset_mesh)(top,AX,offset)
assert om.is_manifold()
# Verify that random points on the surface have nice distances
ns = 100
w = random.rand(ns,3)
w /= w.sum(axis=-1)[:,None]
sX = (w[:,:,None]*oX[om.elements()[random.randint(0,om.n_faces,size=ns)]]).sum(axis=1)
phi,_,_,_ = surface_levelset(ParticleTree(sX),SimplexTree(m,AX),inf,not shell)
assert all(alpha*offset <= phi+small)
assert all(phi <= offset+small)
if __name__=='__main__':
test_offset()
|
MarvinCorro/linux-cmps107 | refs/heads/master | scripts/tracing/draw_functrace.py | 14679 | #!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
|
novic/AniDroid-JB-N7000-Kernel | refs/heads/master | scripts/tracing/draw_functrace.py | 14679 | #!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
|
rossnomann/playlog | refs/heads/master | tests/src/tests/library/test_overview.py | 1 | from datetime import date, timedelta
from tests.client import get
from tests.fixtures import fixture, refresh_db
def get_data():
return get('overview')
AVATAR_SRC = 'http://www.gravatar.com/avatar/9a22d09f92d50fa3d2a16766d0ba52f8?s=64'
USER_NAME = 'Fabien Potencier'
def test_without_data():
refresh_db()
today = date.today().isoformat()
month_ago = (date.today() - timedelta(days=30)).isoformat()
data = get_data()
current_streak = data.pop('current_streak')
assert today in current_streak.pop('start_date')
assert today in current_streak.pop('end_date')
assert current_streak == {
'days': 0.0,
'plays': 0
}
assert data.pop('longest_streak') is None
assert data.pop('biggest_day') is None
recently_added = data.pop('recently_added')
assert month_ago in recently_added.pop('start_date')
assert today in recently_added.pop('end_date')
assert recently_added == {
'artists': 0,
'albums': 0,
'tracks': 0,
}
assert data.pop('user') == {
'avatar_src': AVATAR_SRC,
'name': USER_NAME,
'listening_since': None
}
assert data.pop('nowplay') is None
assert data.pop('counters') == {
'artists': 0,
'albums': 0,
'tracks': 0,
'plays': 0
}
assert data.pop('recent_tracks') == []
assert not data
@fixture('overview')
def test_with_data():
today = date.today().isoformat()
month_ago = (date.today() - timedelta(days=30)).isoformat()
data = get_data()
current_streak = data.pop('current_streak')
assert today in current_streak.pop('start_date')
assert today in current_streak.pop('end_date')
assert current_streak == {
'days': 0.0,
'plays': 0
}
assert data.pop('longest_streak') == {
'start_date': '2017-01-02T00:00:00',
'end_date': '2017-01-04T00:00:00',
'days': 2,
'plays': 46
}
assert data.pop('biggest_day') == {
'day': '2017-01-04T00:00:00',
'plays': 36
}
recently_added = data.pop('recently_added')
assert month_ago in recently_added.pop('start_date')
assert today in recently_added.pop('end_date')
assert recently_added == {
'artists': 0,
'albums': 0,
'tracks': 0,
}
assert data.pop('user') == {
'avatar_src': AVATAR_SRC,
'name': USER_NAME,
'listening_since': 2017
}
assert data.pop('nowplay') is None
assert data.pop('counters') == {
'artists': 1,
'albums': 1,
'tracks': 10,
'plays': 46
}
recent_tracks = data.pop('recent_tracks')
assert len(recent_tracks) == 15
assert recent_tracks[0] == {
'artist': 'Analepsy',
'artist_id': 1,
'album': 'Atrocities From Beyond',
'album_id': 1,
'track': 'Omen Of Return (Instrumental)',
'track_id': 10,
'date': '2017-01-04T11:29:13.497261'
}
assert not data
def test_with_nowplay(set_current_track):
refresh_db()
track = {
'artist': 'Analepsy',
'album': 'Atrocities From Beyond',
'title': 'Eons In Vacuum'
}
set_current_track(**track)
data = get_data()
assert data['nowplay'] == track
|
ArcherCraftStore/ArcherVMPeridot | refs/heads/master | Python/Lib/test/test_smtpnet.py | 12 | import unittest
from test import support
import smtplib
import socket
ssl = support.import_module("ssl")
support.requires("network")
def check_ssl_verifiy(host, port):
context = ssl.create_default_context()
with socket.create_connection((host, port)) as sock:
try:
sock = context.wrap_socket(sock, server_hostname=host)
except Exception:
return False
else:
sock.close()
return True
class SmtpTest(unittest.TestCase):
testServer = 'smtp.gmail.com'
remotePort = 25
def test_connect_starttls(self):
support.get_attribute(smtplib, 'SMTP_SSL')
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with support.transient_internet(self.testServer):
server = smtplib.SMTP(self.testServer, self.remotePort)
try:
server.starttls(context=context)
except smtplib.SMTPException as e:
if e.args[0] == 'STARTTLS extension not supported by server.':
unittest.skip(e.args[0])
else:
raise
server.ehlo()
server.quit()
class SmtpSSLTest(unittest.TestCase):
testServer = 'smtp.gmail.com'
remotePort = 465
can_verify = check_ssl_verifiy(testServer, remotePort)
def test_connect(self):
support.get_attribute(smtplib, 'SMTP_SSL')
with support.transient_internet(self.testServer):
server = smtplib.SMTP_SSL(self.testServer, self.remotePort)
server.ehlo()
server.quit()
def test_connect_default_port(self):
support.get_attribute(smtplib, 'SMTP_SSL')
with support.transient_internet(self.testServer):
server = smtplib.SMTP_SSL(self.testServer)
server.ehlo()
server.quit()
def test_connect_using_sslcontext(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
support.get_attribute(smtplib, 'SMTP_SSL')
with support.transient_internet(self.testServer):
server = smtplib.SMTP_SSL(self.testServer, self.remotePort, context=context)
server.ehlo()
server.quit()
@unittest.skipUnless(can_verify, "SSL certificate can't be verified")
def test_connect_using_sslcontext_verified(self):
support.get_attribute(smtplib, 'SMTP_SSL')
context = ssl.create_default_context()
with support.transient_internet(self.testServer):
server = smtplib.SMTP_SSL(self.testServer, self.remotePort, context=context)
server.ehlo()
server.quit()
def test_main():
support.run_unittest(SmtpTest, SmtpSSLTest)
if __name__ == "__main__":
test_main()
|
ortoo/schooldata | refs/heads/master | dfe_data.py | 1 | import requests
import csv
import io
from datetime import datetime
DFE_DATA_URL = 'https://www.compare-school-performance.service.gov.uk/download-school-data'
def get_raw_dfe_data(school):
payload = {
'urn': school.urn
}
req = requests.get(DFE_DATA_URL, params=payload)
return req.text
def parse_dfe_data(data):
out = {}
csvreader = csv.DictReader(io.StringIO(data))
for row in csvreader:
if row['Variable'] == 'OFSTED_INSPDATE':
out['lastInspection'] = datetime.strptime(row['Value'], '%d/%m/%Y')
elif row['Variable'] == 'OFSTED_INSPOUTCOME':
out['outcome'] = int(row['Value'])
return out
def update_dfe_data(school):
rawdata = get_raw_dfe_data(school)
data = parse_dfe_data(rawdata)
data['reportUrl'] = 'http://reports.ofsted.gov.uk/inspection-reports/find-inspection-report/provider/ELS/' + school.urn
setattr(school, 'ofsted', data)
return school
|
xzturn/tensorflow | refs/heads/master | tensorflow/python/autograph/pyct/static_analysis/activity_py3_test.py | 8 | # python3
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for activity module, that only run in Python 3."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct.static_analysis import activity_test
from tensorflow.python.autograph.pyct.static_analysis import annos
from tensorflow.python.platform import test
NodeAnno = annos.NodeAnno
class ActivityAnalyzerTest(activity_test.ActivityAnalyzerTestBase):
"""Tests which can only run in Python 3."""
def test_nonlocal_symbol(self):
nonlocal_a = 3
nonlocal_b = 13
def test_fn(c):
nonlocal nonlocal_a
nonlocal nonlocal_b
nonlocal_a = nonlocal_b + c
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
body_scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(body_scope, ('nonlocal_b', 'c'), ('nonlocal_a',))
def test_annotated_assign(self):
b = int
def test_fn(c):
a: b = c
return a
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
body_scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(body_scope, ('b', 'c', 'a'), ('a',))
ann_assign_scope = anno.getanno(fn_node.body[0], anno.Static.SCOPE)
self.assertScopeIs(ann_assign_scope, ('b', 'c'), ('a',))
if __name__ == '__main__':
test.main()
|
harshilasu/GraphicMelon | refs/heads/master | y/google-cloud-sdk/platform/gsutil/third_party/boto/tests/integration/sts/test_cert_verification.py | 126 | # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Check that all of the certs on all service endpoints validate.
"""
import unittest
from tests.integration import ServiceCertVerificationTest
import boto.sts
class STSCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest):
sts = True
regions = boto.sts.regions()
def sample_service_call(self, conn):
conn.get_session_token()
|
rh-lab-q/rpg | refs/heads/master | rpg/__init__.py | 3 | import logging
from pathlib import Path
from rpg.plugin_engine import PluginEngine
from rpg.plugins.misc.files_to_pkgs import FilesToPkgsPlugin
from rpg.project_builder import ProjectBuilder
from copr.client import CoprClient
from rpg.package_builder import PackageBuilder, BuildException
from rpg.spec import Spec
from rpg.command import Command
from rpg.conf import Conf
from rpg.utils import path_to_str
from os.path import isdir, isfile
from os import makedirs, geteuid
import shutil
from shutil import rmtree
from tempfile import gettempdir, mkdtemp
class Base(object):
"""Base class that is controlled by RPM GUI
:Example:
>>> from rpg import Base
>>> base = Base()
>>> base.sack = base.load_dnf_sack()
>>> base.load_plugins()
>>> base.load_project_from_url("https://github.com/example/ex_repo")
>>> base.spec.Name = "Example"
>>> base.spec.Version = "0.6.11"
>>> base.spec.Release = "1%{?snapshot}%{?dist}"
>>> base.spec.License = "GPLv2"
>>> base.spec.Summary = "Example ..."
>>> base.spec.description = ("Example ...")
>>> base.spec.URL = "https://github.com/example/ex_repo"
>>> base.target_arch = "x86_64"
>>> base.target_distro = "fedora-22"
>>> base.fetch_repos(base.target_distro, base.target_arch)
>>> base.run_extracted_source_analysis()
>>> base.run_patched_source_analysis()
>>> base.build_project()
>>> base.run_compiled_source_analysis()
>>> base.install_project()
>>> base.run_installed_source_analysis()
>>> base.build_srpm()
>>> base.build_rpm_recover(self.base.target_distro, self.base.target_arch)
"""
def __init__(self):
self.conf = Conf()
self._setup_logging()
self._project_builder = ProjectBuilder()
self.spec = Spec()
self.sack = None
self._package_builder = PackageBuilder()
def load_dnf_sack(self):
logging.info('DNF sack is loading')
import dnf
with dnf.Base() as self._dnf_base:
self._dnf_base.conf.releasever = dnf.rpm.detect_releasever(
self._dnf_base.conf.installroot)
self._dnf_base.read_all_repos()
self._dnf_base.fill_sack()
return self._dnf_base.sack
def _setup_logging(self):
if geteuid() == 0:
log_dir = "/var/log/rpg/"
else:
log_dir = "/var/tmp/rpg/"
if not isdir(log_dir):
makedirs(log_dir)
logging.basicConfig(level=logging.DEBUG,
format='[%(asctime)s] {%(pathname)s:%(lineno)d} '
'%(levelname)s - %(message)s',
handlers=[logging.FileHandler(log_dir + "rpg.log"),
logging.StreamHandler()],
datefmt='%H:%M:%S')
def load_plugins(self):
""" This method sets up plugin engine and loads them """
self._plugin_engine = PluginEngine(self.spec, self.sack)
self._plugin_engine.load_plugins(
Path('rpg/plugins'),
self.conf.exclude)
for directory in self.conf.directories:
self._plugin_engine.load_plugins(
Path(directory),
self.conf.exclude)
def create_archive(self):
""" Creates archive (archvie_path) from Source folder """
self.spec.Source = self.spec.Name + "-" + self.spec.Version + ".tar.gz"
_tar = Command("tar zcf " + path_to_str(self.archive_path) +
" -C " + path_to_str(self.extracted_dir) +
" . --transform='s/^\./" +
self.spec.Name + "-" + self.spec.Version + "/g'")
_tar.execute()
logging.debug(str(_tar))
@property
def base_dir(self):
""" Returns path where compiled, extracted, installed
directories are """
try:
return Path("/tmp/rpg-%s" % self._hash)
except AttributeError:
msg = "`load_project_from_url` method needs to be called first"
raise RuntimeError(msg)
@property
def extracted_dir(self):
return self.base_dir / "extracted"
@property
def compiled_dir(self):
return self.base_dir / "compiled"
@property
def installed_dir(self):
return self.base_dir / "installed"
@property
def project_name(self):
return self.spec.Name
@property
def spec_path(self):
return self.base_dir / (self.project_name + ".spec")
@property
def archive_path(self):
return self.base_dir / self.spec.Source
@property
def srpm_path(self):
""" Returns path to SRPM only, if it is created.
You have to build srpm first. """
try:
return next(self.base_dir.glob(self.project_name + "*.src.rpm"))
except StopIteration:
raise RuntimeError(
"Can't find '{}'! You need to call build_srpm first."
.format(str(self.base_dir /
(self.project_name + "*.src.rpm"))))
@property
def rpm_path(self):
""" This is the same as it is in srpm_path. But this returns
list of rpms - there may be severals rpm packages like
debuginfo, binary rpm and so on. """
try:
_ret = [
_path
for _path in self.base_dir.glob(self.project_name + "*.rpm")
if not str(_path).endswith(".src.rpm")
]
if not _ret:
raise StopIteration
return _ret
except StopIteration:
raise RuntimeError(
"Can't find '{}'! You need to call build_rpm first."
.format(str(self.base_dir / (self.project_name + "*.rpm"))))
def load_project_from_url(self, path):
"""executed in background after dir/tarball/SRPM selection"""
if not isdir(str(path)) and not isfile(str(path)):
temp = Path(gettempdir()) / "rpg-download"
self._plugin_engine.execute_download(path, temp)
path = temp
self.source_path = path = Path(path)
self._hash = self._compute_checksum(path)
self._setup_workspace()
if isdir(str(path)):
Command("cp -pr " + str(path) + " " + str(self.extracted_dir))\
.execute()
else:
self._plugin_engine.execute_extraction(path, self.extracted_dir)
direc = [str(f) for f in self.extracted_dir.iterdir()]
if len(direc) == 1 and isdir(direc[0]):
direc = direc[0]
temp = mkdtemp()
Command('mv ' + direc + '/* ' + temp +
' && rm -rf ' + direc +
' && mv ' + temp + '/* ' + str(self.extracted_dir))\
.execute()
rmtree(temp)
logging.debug(str(direc))
self.spec.prep = Command("%autosetup")
def run_extracted_source_analysis(self):
"""executed in background after dir/tarball/SRPM selection"""
self._plugin_engine.execute_phase(PluginEngine.phases[0],
self.extracted_dir)
def run_patched_source_analysis(self):
"""executed in background after patches are applied"""
self._plugin_engine.execute_phase(PluginEngine.phases[1],
self.extracted_dir)
def run_compiled_source_analysis(self):
"""executed in background after patches are applied"""
self._plugin_engine.execute_phase(PluginEngine.phases[2],
self.compiled_dir)
def install_project(self):
"""executed in background after filled requires screen"""
self._project_builder.install(self.compiled_dir,
self.installed_dir,
self.spec.install)
def run_installed_source_analysis(self):
"""executed in background after successful project build"""
self._plugin_engine.execute_phase(PluginEngine.phases[3],
self.installed_dir)
def write_spec(self):
""" Creates spec file or rewrites old one. """
with open(path_to_str(self.spec_path), 'w') as spec_file:
spec_file.write(str(self.spec))
def build_srpm(self):
""" Builds srpm into base directory. """
if not self.spec.Source or not self.archive_path.exists():
self.create_archive()
self.write_spec()
self._package_builder.build_srpm(
self.spec_path, self.archive_path, self.base_dir)
def build_rpm(self, target_distro, target_arch):
""" Build rpm from srpm. If srpm does not exists,
it will be created. """
try:
self.srpm_path
except RuntimeError:
self.build_srpm()
self._package_builder.build_rpm(
str(self.srpm_path), target_distro, target_arch, self.base_dir)
def build_rpm_recover(self, distro, arch):
""" Repeatedly build rpm with mock and finds all build errors.
May raise RuntimeError on failed recover. """
def build():
self.build_srpm()
self.build_rpm(distro, arch)
def analyse():
_files_to_pkgs.installed(self.base_dir, self.spec, self.sack)
self.write_spec()
_files_to_pkgs = FilesToPkgsPlugin()
analyse()
while True:
try:
build()
except BuildException as be:
if not self._plugin_engine.execute_mock_recover(be.errors):
if be.return_code:
raise RuntimeError(
"Build failed! See logs in '{}'"
.format(self._package_builder.mock_logs))
break
Command("rm -rf {}".format(path_to_str(self.spec_path))).execute()
analyse()
def fetch_repos(self, dist, arch):
""" Initialize mock - should be called before build_rpm_recover """
self._package_builder.fetch_repos(dist, arch)
def build_project(self):
""" Executed in background after filled requires screen """
self._project_builder.build(self.extracted_dir,
self.compiled_dir,
self.spec.build)
def copr_set_config(self, username, login, token):
""" Logs into copr with username, login and token.
This has to be called before copr_create_project and copr_build
To sign up on copr go here: http://copr.fedoraproject.org """
self.cl = CoprClient(
username, login, token, copr_url="http://copr.fedoraproject.org")
def copr_create_project(self, name, chroots, desc, intro):
""" Creates metadata about project - won't build until
copr_build would be called """
self.cl.create_project(
name, chroots=chroots, description=desc, instructions=intro)
def copr_build(self, name, url):
""" Builds project on fedora copr server """
self.cl.create_new_build(name, pkgs=[url, ])
@staticmethod
def _compute_checksum(sources):
if sources.is_dir():
cmd = "find %s -type f -print0 | sort -z | xargs " \
"-0 sha1sum | sha1sum" % path_to_str(sources.resolve())
else:
cmd = "cat %s | sha1sum" % path_to_str(sources.resolve())
logging.error(str(cmd))
return Command([cmd]).execute()[:7]
@property
def all_dirs(self):
""" Returns Extracted, Compiled and Installed direcotry paths. """
return [
self.extracted_dir,
self.compiled_dir,
self.installed_dir
]
def _setup_workspace(self):
"""make sure all directories used later will exist"""
shutil.rmtree(str(self.base_dir), True)
for d in self.all_dirs:
d.mkdir(parents=True)
# predictor methods are used for autocompletion of the field,
# every guess_* method return list of strings matched ordered
# by their rank
def guess_name(self):
""" Returns guessed name from source path """
return ""
def guess_provide(self):
""" returns list of all known provides """
provides = set()
for pkg in self.sack.query():
provides.update(pkg.provides)
return sorted(provides)
def guess_changelog_data(self):
""" returns list of tuples (author, email) from git """
pass
def guess_dependency(self):
""" returns guess_provide() + all package names from repos """
names = map(lambda pkg: pkg.name, self.sack.query())
return sorted(set(names).union(set(self.guess_provide())))
def guess_license(self):
""" returns list of all known licenses """
licenses = set()
for pkg in self.sack.query():
licenses.update(pkg.license)
return sorted(licenses)
|
jfrank1500/minicurso_2015 | refs/heads/master | bin/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/gypsh.py | 2779 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""gypsh output module
gypsh is a GYP shell. It's not really a generator per se. All it does is
fire up an interactive Python session with a few local variables set to the
variables passed to the generator. Like gypd, it's intended as a debugging
aid, to facilitate the exploration of .gyp structures after being processed
by the input module.
The expected usage is "gyp -f gypsh -D OS=desired_os".
"""
import code
import sys
# All of this stuff about generator variables was lovingly ripped from gypd.py.
# That module has a much better description of what's going on and why.
_generator_identity_variables = [
'EXECUTABLE_PREFIX',
'EXECUTABLE_SUFFIX',
'INTERMEDIATE_DIR',
'PRODUCT_DIR',
'RULE_INPUT_ROOT',
'RULE_INPUT_DIRNAME',
'RULE_INPUT_EXT',
'RULE_INPUT_NAME',
'RULE_INPUT_PATH',
'SHARED_INTERMEDIATE_DIR',
]
generator_default_variables = {
}
for v in _generator_identity_variables:
generator_default_variables[v] = '<(%s)' % v
def GenerateOutput(target_list, target_dicts, data, params):
locals = {
'target_list': target_list,
'target_dicts': target_dicts,
'data': data,
}
# Use a banner that looks like the stock Python one and like what
# code.interact uses by default, but tack on something to indicate what
# locals are available, and identify gypsh.
banner='Python %s on %s\nlocals.keys() = %s\ngypsh' % \
(sys.version, sys.platform, repr(sorted(locals.keys())))
code.interact(banner, local=locals)
|
tneumann/splocs | refs/heads/master | import_ply.py | 1 | import argparse
from inout import convert_sequence_to_hdf5, load_ply
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Import a sequence of PLY files and convert to HDF5 file format')
parser.add_argument('input_filename_pattern',
help='Input filename pattern, e.g. "/tmp/file_*.ply", '
'notice that you may have to escape the pattern with " to prevent shell expansion')
parser.add_argument('output_filename',
help='Output file path, e.g. /tmp/face.hdf5')
args = parser.parse_args()
convert_sequence_to_hdf5(
args.input_filename_pattern,
load_ply,
args.output_filename)
|
ychen820/microblog | refs/heads/master | y/google-cloud-sdk/platform/google_appengine/lib/django-1.4/django/utils/timezone.py | 81 | """Timezone helper functions.
This module uses pytz when it's available and fallbacks when it isn't.
"""
from datetime import datetime, timedelta, tzinfo
from threading import local
import time as _time
try:
import pytz
except ImportError:
pytz = None
from django.conf import settings
__all__ = [
'utc', 'get_default_timezone', 'get_current_timezone',
'activate', 'deactivate', 'override',
'is_naive', 'is_aware', 'make_aware', 'make_naive',
]
# UTC and local time zones
ZERO = timedelta(0)
class UTC(tzinfo):
"""
UTC implementation taken from Python's docs.
Used only when pytz isn't available.
"""
def __repr__(self):
return "<UTC>"
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
class LocalTimezone(tzinfo):
"""
Local time implementation taken from Python's docs.
Used only when pytz isn't available, and most likely inaccurate. If you're
having trouble with this class, don't waste your time, just install pytz.
"""
def __init__(self):
# This code is moved in __init__ to execute it as late as possible
# See get_default_timezone().
self.STDOFFSET = timedelta(seconds=-_time.timezone)
if _time.daylight:
self.DSTOFFSET = timedelta(seconds=-_time.altzone)
else:
self.DSTOFFSET = self.STDOFFSET
self.DSTDIFF = self.DSTOFFSET - self.STDOFFSET
tzinfo.__init__(self)
def __repr__(self):
return "<LocalTimezone>"
def utcoffset(self, dt):
if self._isdst(dt):
return self.DSTOFFSET
else:
return self.STDOFFSET
def dst(self, dt):
if self._isdst(dt):
return self.DSTDIFF
else:
return ZERO
def tzname(self, dt):
return _time.tzname[self._isdst(dt)]
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, 0)
stamp = _time.mktime(tt)
tt = _time.localtime(stamp)
return tt.tm_isdst > 0
utc = pytz.utc if pytz else UTC()
"""UTC time zone as a tzinfo instance."""
# In order to avoid accessing the settings at compile time,
# wrap the expression in a function and cache the result.
# If you change settings.TIME_ZONE in tests, reset _localtime to None.
_localtime = None
def get_default_timezone():
"""
Returns the default time zone as a tzinfo instance.
This is the time zone defined by settings.TIME_ZONE.
See also :func:`get_current_timezone`.
"""
global _localtime
if _localtime is None:
if isinstance(settings.TIME_ZONE, basestring) and pytz is not None:
_localtime = pytz.timezone(settings.TIME_ZONE)
else:
_localtime = LocalTimezone()
return _localtime
# This function exists for consistency with get_current_timezone_name
def get_default_timezone_name():
"""
Returns the name of the default time zone.
"""
return _get_timezone_name(get_default_timezone())
_active = local()
def get_current_timezone():
"""
Returns the currently active time zone as a tzinfo instance.
"""
return getattr(_active, "value", get_default_timezone())
def get_current_timezone_name():
"""
Returns the name of the currently active time zone.
"""
return _get_timezone_name(get_current_timezone())
def _get_timezone_name(timezone):
"""
Returns the name of ``timezone``.
"""
try:
# for pytz timezones
return timezone.zone
except AttributeError:
# for regular tzinfo objects
local_now = datetime.now(timezone)
return timezone.tzname(local_now)
# Timezone selection functions.
# These functions don't change os.environ['TZ'] and call time.tzset()
# because it isn't thread safe.
def activate(timezone):
"""
Sets the time zone for the current thread.
The ``timezone`` argument must be an instance of a tzinfo subclass or a
time zone name. If it is a time zone name, pytz is required.
"""
if isinstance(timezone, tzinfo):
_active.value = timezone
elif isinstance(timezone, basestring) and pytz is not None:
_active.value = pytz.timezone(timezone)
else:
raise ValueError("Invalid timezone: %r" % timezone)
def deactivate():
"""
Unsets the time zone for the current thread.
Django will then use the time zone defined by settings.TIME_ZONE.
"""
if hasattr(_active, "value"):
del _active.value
class override(object):
"""
Temporarily set the time zone for the current thread.
This is a context manager that uses ``~django.utils.timezone.activate()``
to set the timezone on entry, and restores the previously active timezone
on exit.
The ``timezone`` argument must be an instance of a ``tzinfo`` subclass, a
time zone name, or ``None``. If is it a time zone name, pytz is required.
If it is ``None``, Django enables the default time zone.
"""
def __init__(self, timezone):
self.timezone = timezone
self.old_timezone = getattr(_active, 'value', None)
def __enter__(self):
if self.timezone is None:
deactivate()
else:
activate(self.timezone)
def __exit__(self, exc_type, exc_value, traceback):
if self.old_timezone is not None:
_active.value = self.old_timezone
else:
del _active.value
# Templates
def localtime(value, use_tz=None):
"""
Checks if value is a datetime and converts it to local time if necessary.
If use_tz is provided and is not None, that will force the value to
be converted (or not), overriding the value of settings.USE_TZ.
This function is designed for use by the template engine.
"""
if (isinstance(value, datetime)
and (settings.USE_TZ if use_tz is None else use_tz)
and not is_naive(value)
and getattr(value, 'convert_to_local_time', True)):
timezone = get_current_timezone()
value = value.astimezone(timezone)
if hasattr(timezone, 'normalize'):
# available for pytz time zones
value = timezone.normalize(value)
return value
# Utilities
def now():
"""
Returns an aware or naive datetime.datetime, depending on settings.USE_TZ.
"""
if settings.USE_TZ:
# timeit shows that datetime.now(tz=utc) is 24% slower
return datetime.utcnow().replace(tzinfo=utc)
else:
return datetime.now()
# By design, these four functions don't perform any checks on their arguments.
# The caller should ensure that they don't receive an invalid value like None.
def is_aware(value):
"""
Determines if a given datetime.datetime is aware.
The logic is described in Python's docs:
http://docs.python.org/library/datetime.html#datetime.tzinfo
"""
return value.tzinfo is not None and value.tzinfo.utcoffset(value) is not None
def is_naive(value):
"""
Determines if a given datetime.datetime is naive.
The logic is described in Python's docs:
http://docs.python.org/library/datetime.html#datetime.tzinfo
"""
return value.tzinfo is None or value.tzinfo.utcoffset(value) is None
def make_aware(value, timezone):
"""
Makes a naive datetime.datetime in a given time zone aware.
"""
if hasattr(timezone, 'localize'):
# available for pytz time zones
return timezone.localize(value, is_dst=None)
else:
# may be wrong around DST changes
return value.replace(tzinfo=timezone)
def make_naive(value, timezone):
"""
Makes an aware datetime.datetime naive in a given time zone.
"""
value = value.astimezone(timezone)
if hasattr(timezone, 'normalize'):
# available for pytz time zones
value = timezone.normalize(value)
return value.replace(tzinfo=None)
|
michaelgallacher/intellij-community | refs/heads/master | python/testData/findUsages/ClassUsages.py | 83 | class C<caret>ow:
def __init__(self):
pass
c = Cow()
|
SimonSapin/pycairo | refs/heads/master | examples/gtk/cairo-demo.py | 14 | #!/usr/bin/env python
"""Based on cairo-demo/X11/cairo-demo.c
"""
import cairo
import gtk
SIZE = 30
def triangle(ctx):
ctx.move_to(SIZE, 0)
ctx.rel_line_to(SIZE, 2*SIZE)
ctx.rel_line_to(-2*SIZE, 0)
ctx.close_path()
def square(ctx):
ctx.move_to(0, 0)
ctx.rel_line_to(2*SIZE, 0)
ctx.rel_line_to(0, 2*SIZE)
ctx.rel_line_to(-2*SIZE, 0)
ctx.close_path()
def bowtie(ctx):
ctx.move_to(0, 0)
ctx.rel_line_to(2*SIZE, 2*SIZE)
ctx.rel_line_to(-2*SIZE, 0)
ctx.rel_line_to(2*SIZE, -2*SIZE)
ctx.close_path()
def inf(ctx):
ctx.move_to(0, SIZE)
ctx.rel_curve_to(0,SIZE, SIZE,SIZE, 2*SIZE,0)
ctx.rel_curve_to(SIZE,-SIZE, 2*SIZE,-SIZE, 2*SIZE,0)
ctx.rel_curve_to(0,SIZE, -SIZE,SIZE, -2*SIZE,0)
ctx.rel_curve_to(-SIZE,-SIZE, -2*SIZE,-SIZE, -2*SIZE,0)
ctx.close_path()
def draw_shapes(ctx, x, y, fill):
ctx.save()
ctx.new_path()
ctx.translate(x+SIZE, y+SIZE)
bowtie(ctx)
if fill:
ctx.fill()
else:
ctx.stroke()
ctx.new_path()
ctx.translate(3*SIZE, 0)
square(ctx)
if fill:
ctx.fill()
else:
ctx.stroke()
ctx.new_path()
ctx.translate(3*SIZE, 0)
triangle(ctx)
if fill:
ctx.fill()
else:
ctx.stroke()
ctx.new_path()
ctx.translate(3*SIZE, 0)
inf(ctx)
if fill:
ctx.fill()
else:
ctx.stroke()
ctx.restore()
def fill_shapes(ctx, x, y):
draw_shapes(ctx, x, y, True)
def stroke_shapes(ctx, x, y):
draw_shapes(ctx, x, y, False)
def expose (da, event):
ctx = da.window.cairo_create()
ctx.set_source_rgb(0, 0, 0)
ctx.set_line_width(SIZE / 4)
ctx.set_tolerance(0.1)
ctx.set_line_join(cairo.LINE_JOIN_ROUND)
ctx.set_dash([SIZE/4.0, SIZE/4.0], 0)
stroke_shapes(ctx, 0, 0)
ctx.set_dash([], 0)
stroke_shapes(ctx, 0, 3*SIZE)
ctx.set_line_join(cairo.LINE_JOIN_BEVEL)
stroke_shapes(ctx, 0, 6*SIZE)
ctx.set_line_join(cairo.LINE_JOIN_MITER)
stroke_shapes(ctx, 0, 9*SIZE)
fill_shapes(ctx, 0, 12*SIZE)
ctx.set_line_join(cairo.LINE_JOIN_BEVEL)
fill_shapes(ctx, 0, 15*SIZE)
ctx.set_source_rgb(1,0,0)
stroke_shapes(ctx, 0, 15*SIZE)
def main():
win = gtk.Window()
win.connect('destroy', gtk.main_quit)
win.set_default_size(450, 550)
drawingarea = gtk.DrawingArea()
win.add(drawingarea)
drawingarea.connect('expose_event', expose)
win.show_all()
gtk.main()
if __name__ == '__main__':
main()
|
eayunstack/fuel-ostf | refs/heads/master | fuel_health/cleanup.py | 1 | #!/usr/bin/env python
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
import time
path = os.getcwd()
sys.path.append(path)
import logging
import requests
import traceback
from fuel_health import exceptions
import fuel_health.nmanager
LOG = logging.getLogger(__name__)
class CleanUpClientManager(fuel_health.nmanager.OfficialClientManager):
"""Manager that provides access to the official python clients for
calling various OpenStack APIs.
"""
def wait_for_server_termination(self, server, ignore_error=False):
"""Waits for server to reach termination."""
start_time = int(time.time())
while True:
try:
self._get_compute_client().servers.get(server)
except exceptions.NotFound:
return
server_status = server.status
if server_status == 'ERROR' and not ignore_error:
raise exceptions.BuildErrorException(server_id=server.id)
if int(time.time()) - start_time >= self.build_timeout:
raise exceptions.TimeoutException
time.sleep(self.build_interval)
def cleanup(cluster_deployment_info):
"""Function performs cleaning up for current cluster.
Because clusters can be deployed in different way
function uses cluster_deployment_info argument which
contains list of deployment tags of needed cluster.
This approach that consists in using one cleanup function
for all possible testsets is not so good because of
constant checking of component presence in deployment info.
More better way is to create separate functions for each
set of tests so refactoring of this chunk of code is higly
appreciated.
"""
manager = CleanUpClientManager()
if 'sahara' in cluster_deployment_info:
try:
sahara_client = manager._get_sahara_client()
if sahara_client is not None:
_delete_it(client=sahara_client.clusters,
log_message='Start sahara cluster deletion',
name='ostf-test-', delete_type='id')
_delete_it(client=sahara_client.cluster_templates,
log_message='Start sahara cluster'
' template deletion',
delete_type='id')
_delete_it(client=sahara_client.node_group_templates,
log_message='Start sahara node'
' group template deletion',
delete_type='id')
except Exception:
LOG.warning(traceback.format_exc())
if 'murano' in cluster_deployment_info:
try:
murano_client = manager._get_murano_client()
compute_client = manager._get_compute_client()
if murano_client is not None:
endpoint = manager.config.murano.api_url + '/v1/'
headers = {'X-Auth-Token': murano_client.auth_token,
'content-type': 'application/json'}
environments = requests.get(endpoint + 'environments',
headers=headers).json()
for e in environments["environments"]:
if e['name'].startswith('ostf_test-'):
try:
LOG.info('Start environment deletion.')
requests.delete('{0}environments/{1}'.format(
endpoint, e['id']), headers=headers)
except Exception:
LOG.warning('Failed to delete murano environment')
LOG.debug(traceback.format_exc())
if compute_client is not None:
flavors = compute_client.flavors.list()
for flavor in flavors:
if 'ostf_test_Murano' in flavor.name:
try:
LOG.info('Start flavor deletion.')
compute_client.flavors.delete(flavor.id)
except Exception:
LOG.warning('Failed to delete flavor')
LOG.debug(traceback.format_exc())
except Exception:
LOG.warning(traceback.format_exc())
if 'ceilometer' in cluster_deployment_info:
try:
ceilometer_client = manager._get_ceilometer_client()
if ceilometer_client is not None:
alarms = ceilometer_client.alarms.list()
for a in alarms:
if a.name.startswith('ost1_test-'):
try:
LOG.info('Start alarms deletion.')
ceilometer_client.alarms.delete(a.id)
except Exception as exc:
LOG.debug(exc)
except Exception as exc:
LOG.warning('Something wrong with ceilometer client. '
'Exception: {0}'.format(exc))
if 'heat' in cluster_deployment_info:
try:
heat_client = manager._get_heat_client()
if heat_client is not None:
stacks = heat_client.stacks.list()
for s in stacks:
if s.stack_name.startswith('ost1_test-'):
try:
LOG.info('Start stacks deletion.')
heat_client.stacks.delete(s.id)
except Exception:
LOG.debug(traceback.format_exc())
except Exception:
LOG.warning(traceback.format_exc())
if 'ironic' in cluster_deployment_info:
try:
ironic_client = manager._get_ironic_client()
if ironic_client is not None:
nodes = ironic_client.node.list()
for n in nodes:
if "NodeTest" in ironic_client.node.extra.items():
try:
LOG.info('Start nodes deletion.')
ironic_client.node.delete(n.uuid)
except Exception as exc:
LOG.debug(exc)
except Exception as exc:
LOG.warning('Something wrong with ironic client. '
'Exception: {0}'.format(exc))
instances_id = []
servers = manager._get_compute_client().servers.list()
floating_ips = manager._get_compute_client().floating_ips.list()
if servers:
for s in servers:
if s.name.startswith('ost1_test-'):
instances_id.append(s.id)
for f in floating_ips:
if f.instance_id in instances_id:
try:
LOG.info('Delete floating ip {0}'.format(f.ip))
manager._get_compute_client().floating_ips.delete(
f.id)
except Exception:
LOG.debug(traceback.format_exc())
try:
LOG.info('Delete server with name {0}'.format(s.name))
manager._get_compute_client().servers.delete(s.id)
except Exception:
LOG.debug(traceback.format_exc())
else:
LOG.info('No servers found')
for s in servers:
try:
LOG.info('Wait for server terminations')
manager.wait_for_server_termination(s)
except Exception:
LOG.debug(traceback.format_exc())
_delete_it(manager._get_compute_client().keypairs,
'Start keypair deletion')
_delete_it(manager._get_identity_client().users, 'Start deletion of users')
_delete_it(manager._get_identity_client().tenants, 'Start tenant deletion')
roles = manager._get_identity_client().roles.list()
if roles:
_delete_it(manager._get_identity_client().roles,
'Start roles deletion')
else:
LOG.info('no roles')
_delete_it(manager._get_compute_client().images, 'Start images deletion')
_delete_it(manager._get_volume_client().volumes, 'Start volumes deletion')
_delete_it(manager._get_compute_client().flavors, 'start flavors deletion')
_delete_it(manager._get_volume_client().volume_types,
'start deletion of volume types')
_delete_it(manager._get_compute_client().security_groups,
'Start deletion of security groups', delete_type='id')
def _delete_it(client, log_message, name='ost1_test-', delete_type='name'):
try:
for item in client.list():
try:
if item.name.startswith(name):
try:
LOG.info(log_message)
if delete_type == 'name':
client.delete(item)
else:
client.delete(item.id)
except Exception:
LOG.debug(traceback.format_exc())
except AttributeError:
if item.display_name.startswith(name):
client.delete(item)
except Exception:
LOG.warning(traceback.format_exc())
if __name__ == "__main__":
cleanup()
|
danielbruns-wf/rockstar | refs/heads/master | examples/dart_rockstar.py | 22 | from rockstar import RockStar
dart_code = "main() { print('Hello Word'); }"
rock_it_bro = RockStar(days=400, file_name='helloWorld.dart', code=dart_code)
rock_it_bro.make_me_a_rockstar()
|
mne-tools/mne-tools.github.io | refs/heads/main | 0.15/_downloads/plot_mne_inverse_psi_visual.py | 1 | """
=====================================================================
Compute Phase Slope Index (PSI) in source space for a visual stimulus
=====================================================================
This example demonstrates how the Phase Slope Index (PSI) [1]_ can be computed
in source space based on single trial dSPM source estimates. In addition,
the example shows advanced usage of the connectivity estimation routines
by first extracting a label time course for each epoch and then combining
the label time course with the single trial source estimates to compute the
connectivity.
The result clearly shows how the activity in the visual label precedes more
widespread activity (a postivive PSI means the label time course is leading).
References
----------
.. [1] Nolte et al. "Robustly Estimating the Flow Direction of Information in
Complex Physical Systems", Physical Review Letters, vol. 100, no. 23,
pp. 1-4, Jun. 2008.
"""
# Author: Martin Luessi <mluessi@nmr.mgh.harvard.edu>
#
# License: BSD (3-clause)
import numpy as np
import mne
from mne.datasets import sample
from mne.minimum_norm import read_inverse_operator, apply_inverse_epochs
from mne.connectivity import seed_target_indices, phase_slope_index
print(__doc__)
data_path = sample.data_path()
subjects_dir = data_path + '/subjects'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
fname_raw = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
fname_event = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
fname_label = data_path + '/MEG/sample/labels/Vis-lh.label'
event_id, tmin, tmax = 4, -0.2, 0.3
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
# Load data
inverse_operator = read_inverse_operator(fname_inv)
raw = mne.io.read_raw_fif(fname_raw)
events = mne.read_events(fname_event)
# pick MEG channels
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
exclude='bads')
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=dict(mag=4e-12, grad=4000e-13,
eog=150e-6))
# Compute inverse solution and for each epoch. Note that since we are passing
# the output to both extract_label_time_course and the phase_slope_index
# functions, we have to use "return_generator=False", since it is only possible
# to iterate over generators once.
snr = 1.0 # use lower SNR for single epochs
lambda2 = 1.0 / snr ** 2
stcs = apply_inverse_epochs(epochs, inverse_operator, lambda2, method,
pick_ori="normal", return_generator=True)
# Now, we generate seed time series by averaging the activity in the left
# visual corex
label = mne.read_label(fname_label)
src = inverse_operator['src'] # the source space used
seed_ts = mne.extract_label_time_course(stcs, label, src, mode='mean_flip',
verbose='error')
# Combine the seed time course with the source estimates. There will be a total
# of 7500 signals:
# index 0: time course extracted from label
# index 1..7499: dSPM source space time courses
stcs = apply_inverse_epochs(epochs, inverse_operator, lambda2, method,
pick_ori="normal", return_generator=True)
comb_ts = list(zip(seed_ts, stcs))
# Construct indices to estimate connectivity between the label time course
# and all source space time courses
vertices = [src[i]['vertno'] for i in range(2)]
n_signals_tot = 1 + len(vertices[0]) + len(vertices[1])
indices = seed_target_indices([0], np.arange(1, n_signals_tot))
# Compute the PSI in the frequency range 8Hz..30Hz. We exclude the baseline
# period from the connectivity estimation
fmin = 8.
fmax = 30.
tmin_con = 0.
sfreq = raw.info['sfreq'] # the sampling frequency
psi, freqs, times, n_epochs, _ = phase_slope_index(
comb_ts, mode='multitaper', indices=indices, sfreq=sfreq,
fmin=fmin, fmax=fmax, tmin=tmin_con)
# Generate a SourceEstimate with the PSI. This is simple since we used a single
# seed (inspect the indices variable to see how the PSI scores are arranged in
# the output)
psi_stc = mne.SourceEstimate(psi, vertices=vertices, tmin=0, tstep=1,
subject='sample')
# Now we can visualize the PSI using the plot method. We use a custom colormap
# to show signed values
v_max = np.max(np.abs(psi))
brain = psi_stc.plot(surface='inflated', hemi='lh',
time_label='Phase Slope Index (PSI)',
subjects_dir=subjects_dir,
clim=dict(kind='percent', pos_lims=(95, 97.5, 100)))
brain.show_view('medial')
brain.add_label(fname_label, color='green', alpha=0.7)
|
embest-tech/rowboat-external-wpa_supplicant_8 | refs/heads/TIOP-rowboat-ics | wpa_supplicant/examples/wpas-dbus-new-wps.py | 114 | #!/usr/bin/python
import dbus
import sys, os
import time
import gobject
from dbus.mainloop.glib import DBusGMainLoop
WPAS_DBUS_SERVICE = "fi.w1.wpa_supplicant1"
WPAS_DBUS_INTERFACE = "fi.w1.wpa_supplicant1"
WPAS_DBUS_OPATH = "/fi/w1/wpa_supplicant1"
WPAS_DBUS_INTERFACES_INTERFACE = "fi.w1.wpa_supplicant1.Interface"
WPAS_DBUS_WPS_INTERFACE = "fi.w1.wpa_supplicant1.Interface.WPS"
def propertiesChanged(properties):
if properties.has_key("State"):
print "PropertiesChanged: State: %s" % (properties["State"])
def scanDone(success):
print "Scan done: success=%s" % success
def bssAdded(bss, properties):
print "BSS added: %s" % (bss)
def bssRemoved(bss):
print "BSS removed: %s" % (bss)
def wpsEvent(name, args):
print "WPS event: %s" % (name)
print args
def credentials(cred):
print "WPS credentials: %s" % (cred)
def main():
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
global bus
bus = dbus.SystemBus()
wpas_obj = bus.get_object(WPAS_DBUS_SERVICE, WPAS_DBUS_OPATH)
if len(sys.argv) != 2:
print "Missing ifname argument"
os._exit(1)
wpas = dbus.Interface(wpas_obj, WPAS_DBUS_INTERFACE)
bus.add_signal_receiver(scanDone,
dbus_interface=WPAS_DBUS_INTERFACES_INTERFACE,
signal_name="ScanDone")
bus.add_signal_receiver(bssAdded,
dbus_interface=WPAS_DBUS_INTERFACES_INTERFACE,
signal_name="BSSAdded")
bus.add_signal_receiver(bssRemoved,
dbus_interface=WPAS_DBUS_INTERFACES_INTERFACE,
signal_name="BSSRemoved")
bus.add_signal_receiver(propertiesChanged,
dbus_interface=WPAS_DBUS_INTERFACES_INTERFACE,
signal_name="PropertiesChanged")
bus.add_signal_receiver(wpsEvent,
dbus_interface=WPAS_DBUS_WPS_INTERFACE,
signal_name="Event")
bus.add_signal_receiver(credentials,
dbus_interface=WPAS_DBUS_WPS_INTERFACE,
signal_name="Credentials")
ifname = sys.argv[1]
path = wpas.GetInterface(ifname)
if_obj = bus.get_object(WPAS_DBUS_SERVICE, path)
if_obj.Set(WPAS_DBUS_WPS_INTERFACE, 'ProcessCredentials',
dbus.Boolean(1),
dbus_interface=dbus.PROPERTIES_IFACE)
wps = dbus.Interface(if_obj, WPAS_DBUS_WPS_INTERFACE)
wps.Start({'Role': 'enrollee', 'Type': 'pbc'})
gobject.MainLoop().run()
if __name__ == "__main__":
main()
|
daGrevis/daGrevis.lv | refs/heads/master | dagrevis_lv/core/context_processors.py | 1 | from django.conf import settings as project_settings
def settings(request):
return {"settings": project_settings}
|
neharejanjeva/techstitution | refs/heads/master | app/logs/venv/lib/python2.7/site-packages/pip/_vendor/requests/certs.py | 516 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
requests.certs
~~~~~~~~~~~~~~
This module returns the preferred default CA certificate bundle.
If you are packaging Requests, e.g., for a Linux distribution or a managed
environment, you can change the definition of where() to return a separately
packaged CA bundle.
"""
import os.path
try:
from certifi import where
except ImportError:
def where():
"""Return the preferred certificate bundle."""
# vendored bundle inside Requests
return os.path.join(os.path.dirname(__file__), 'cacert.pem')
if __name__ == '__main__':
print(where())
|
nightjean/Deep-Learning | refs/heads/master | tensorflow/contrib/rnn/python/kernel_tests/rnn_cell_test.py | 20 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for RNN cells."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.contrib.rnn.python.ops import rnn_cell as contrib_rnn_cell
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.util import nest
class RNNCellTest(test.TestCase):
def testCoupledInputForgetGateLSTMCell(self):
with self.test_session() as sess:
num_units = 2
state_size = num_units * 2
batch_size = 3
input_size = 4
expected_output = np.array(
[[0.121753, 0.121753],
[0.103349, 0.103349],
[0.100178, 0.100178]],
dtype=np.float32)
expected_state = np.array(
[[0.137523, 0.137523, 0.121753, 0.121753],
[0.105450, 0.105450, 0.103349, 0.103349],
[0.100742, 0.100742, 0.100178, 0.100178]],
dtype=np.float32)
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([batch_size, input_size])
m = array_ops.zeros([batch_size, state_size])
output, state = contrib_rnn_cell.CoupledInputForgetGateLSTMCell(
num_units=num_units, forget_bias=1.0, state_is_tuple=False)(x, m)
sess.run([variables.global_variables_initializer()])
res = sess.run([output, state], {
x.name:
np.array([[1., 1., 1., 1.],
[2., 2., 2., 2.],
[3., 3., 3., 3.]]),
m.name:
0.1 * np.ones((batch_size, state_size))
})
# This is a smoke test: Only making sure expected values didn't change.
self.assertEqual(len(res), 2)
self.assertAllClose(res[0], expected_output)
self.assertAllClose(res[1], expected_state)
def testTimeFreqLSTMCell(self):
with self.test_session() as sess:
num_units = 8
state_size = num_units * 2
batch_size = 3
input_size = 4
feature_size = 2
frequency_skip = 1
num_shifts = (input_size - feature_size) // frequency_skip + 1
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([batch_size, input_size])
m = array_ops.zeros([batch_size, state_size * num_shifts])
output, state = contrib_rnn_cell.TimeFreqLSTMCell(
num_units=num_units,
feature_size=feature_size,
frequency_skip=frequency_skip,
forget_bias=1.0)(x, m)
sess.run([variables.global_variables_initializer()])
res = sess.run([output, state], {
x.name:
np.array([[1., 1., 1., 1.],
[2., 2., 2., 2.],
[3., 3., 3., 3.]]),
m.name:
0.1 * np.ones((batch_size, int(state_size * (num_shifts))))
})
self.assertEqual(len(res), 2)
# The numbers in results were not calculated, this is mostly just a
# smoke test.
self.assertEqual(res[0].shape, (batch_size, num_units * num_shifts))
self.assertEqual(res[1].shape, (batch_size, state_size * num_shifts))
# Different inputs so different outputs and states
for i in range(1, batch_size):
self.assertTrue(
float(np.linalg.norm((res[0][0, :] - res[0][i, :]))) > 1e-6)
self.assertTrue(
float(np.linalg.norm((res[1][0, :] - res[1][i, :]))) > 1e-6)
def testGridLSTMCell(self):
with self.test_session() as sess:
num_units = 8
batch_size = 3
input_size = 4
feature_size = 2
frequency_skip = 1
num_shifts = int((input_size - feature_size) / frequency_skip + 1)
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
cell = contrib_rnn_cell.GridLSTMCell(
num_units=num_units,
feature_size=feature_size,
frequency_skip=frequency_skip,
forget_bias=1.0,
num_frequency_blocks=[num_shifts],
couple_input_forget_gates=True,
state_is_tuple=True)
inputs = constant_op.constant(
np.array(
[[1., 1., 1., 1.],
[2., 2., 2., 2.],
[3., 3., 3., 3.]],
dtype=np.float32),
dtype=dtypes.float32)
state_value = constant_op.constant(
0.1 * np.ones(
(batch_size, num_units), dtype=np.float32),
dtype=dtypes.float32)
init_state = cell.state_tuple_type(
*([state_value, state_value] * num_shifts))
output, state = cell(inputs, init_state)
sess.run([variables.global_variables_initializer()])
res = sess.run([output, state])
self.assertEqual(len(res), 2)
# The numbers in results were not calculated, this is mostly just a
# smoke test.
self.assertEqual(res[0].shape, (batch_size, num_units * num_shifts * 2))
for ss in res[1]:
self.assertEqual(ss.shape, (batch_size, num_units))
# Different inputs so different outputs and states
for i in range(1, batch_size):
self.assertTrue(
float(np.linalg.norm((res[0][0, :] - res[0][i, :]))) > 1e-6)
self.assertTrue(
float(
np.linalg.norm((res[1].state_f00_b00_c[0, :] - res[1]
.state_f00_b00_c[i, :]))) > 1e-6)
def testGridLSTMCellWithFrequencyBlocks(self):
with self.test_session() as sess:
num_units = 8
batch_size = 3
feature_size = 2
frequency_skip = 1
num_frequency_blocks = [1, 1]
total_blocks = num_frequency_blocks[0] + num_frequency_blocks[1]
start_freqindex_list = [0, 2]
end_freqindex_list = [2, 4]
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
cell = contrib_rnn_cell.GridLSTMCell(
num_units=num_units,
feature_size=feature_size,
frequency_skip=frequency_skip,
forget_bias=1.0,
num_frequency_blocks=num_frequency_blocks,
start_freqindex_list=start_freqindex_list,
end_freqindex_list=end_freqindex_list,
couple_input_forget_gates=True,
state_is_tuple=True)
inputs = constant_op.constant(
np.array(
[[1., 1., 1., 1.], [2., 2., 2., 2.], [3., 3., 3., 3.]],
dtype=np.float32),
dtype=dtypes.float32)
state_value = constant_op.constant(
0.1 * np.ones(
(batch_size, num_units), dtype=np.float32),
dtype=dtypes.float32)
init_state = cell.state_tuple_type(
*([state_value, state_value] * total_blocks))
output, state = cell(inputs, init_state)
sess.run([variables.global_variables_initializer()])
res = sess.run([output, state])
self.assertEqual(len(res), 2)
# The numbers in results were not calculated, this is mostly just a
# smoke test.
self.assertEqual(res[0].shape,
(batch_size, num_units * total_blocks * 2))
for ss in res[1]:
self.assertEqual(ss.shape, (batch_size, num_units))
# Different inputs so different outputs and states
for i in range(1, batch_size):
self.assertTrue(
float(np.linalg.norm((res[0][0, :] - res[0][i, :]))) > 1e-6)
self.assertTrue(
float(
np.linalg.norm((res[1].state_f00_b00_c[0, :] - res[1]
.state_f00_b00_c[i, :]))) > 1e-6)
def testGridLstmCellWithCoupledInputForgetGates(self):
num_units = 2
batch_size = 3
input_size = 4
feature_size = 2
frequency_skip = 1
num_shifts = int((input_size - feature_size) / frequency_skip + 1)
expected_output = np.array(
[[0.416383, 0.416383, 0.403238, 0.403238, 0.524020, 0.524020,
0.565425, 0.565425, 0.557865, 0.557865, 0.609699, 0.609699],
[0.627331, 0.627331, 0.622393, 0.622393, 0.688342, 0.688342,
0.708078, 0.708078, 0.694245, 0.694245, 0.715171, 0.715171],
[0.711050, 0.711050, 0.709197, 0.709197, 0.736533, 0.736533,
0.744264, 0.744264, 0.737390, 0.737390, 0.745250, 0.745250]],
dtype=np.float32)
expected_state = np.array(
[[0.625556, 0.625556, 0.416383, 0.416383, 0.759134, 0.759134,
0.524020, 0.524020, 0.798795, 0.798795, 0.557865, 0.557865],
[0.875488, 0.875488, 0.627331, 0.627331, 0.936432, 0.936432,
0.688342, 0.688342, 0.941961, 0.941961, 0.694245, 0.694245],
[0.957327, 0.957327, 0.711050, 0.711050, 0.979522, 0.979522,
0.736533, 0.736533, 0.980245, 0.980245, 0.737390, 0.737390]],
dtype=np.float32)
for state_is_tuple in [False, True]:
with self.test_session() as sess:
with variable_scope.variable_scope(
"state_is_tuple" + str(state_is_tuple),
initializer=init_ops.constant_initializer(0.5)):
cell = contrib_rnn_cell.GridLSTMCell(
num_units=num_units,
feature_size=feature_size,
frequency_skip=frequency_skip,
forget_bias=1.0,
num_frequency_blocks=[num_shifts],
couple_input_forget_gates=True,
state_is_tuple=state_is_tuple)
inputs = constant_op.constant(
np.array([[1., 1., 1., 1.],
[2., 2., 2., 2.],
[3., 3., 3., 3.]],
dtype=np.float32),
dtype=dtypes.float32)
if state_is_tuple:
state_value = constant_op.constant(
0.1 * np.ones(
(batch_size, num_units), dtype=np.float32),
dtype=dtypes.float32)
init_state = cell.state_tuple_type(
*([state_value, state_value] * num_shifts))
else:
init_state = constant_op.constant(
0.1 * np.ones(
(batch_size, num_units * num_shifts * 2), dtype=np.float32),
dtype=dtypes.float32)
output, state = cell(inputs, init_state)
sess.run([variables.global_variables_initializer()])
res = sess.run([output, state])
# This is a smoke test: Only making sure expected values not change.
self.assertEqual(len(res), 2)
self.assertAllClose(res[0], expected_output)
if not state_is_tuple:
self.assertAllClose(res[1], expected_state)
else:
# There should be num_shifts * 2 states in the tuple.
self.assertEqual(len(res[1]), num_shifts * 2)
# Checking the shape of each state to be batch_size * num_units
for ss in res[1]:
self.assertEqual(ss.shape[0], batch_size)
self.assertEqual(ss.shape[1], num_units)
self.assertAllClose(np.concatenate(res[1], axis=1), expected_state)
def testBidirectionGridLSTMCell(self):
with self.test_session() as sess:
num_units = 2
batch_size = 3
input_size = 4
feature_size = 2
frequency_skip = 1
num_shifts = int((input_size - feature_size) / frequency_skip + 1)
expected_output = np.array(
[[0.464130, 0.464130, 0.419165, 0.419165, 0.593283, 0.593283,
0.738350, 0.738350, 0.661638, 0.661638, 0.866774, 0.866774,
0.520789, 0.520789, 0.476968, 0.476968, 0.604341, 0.604341,
0.760207, 0.760207, 0.635773, 0.635773, 0.850218, 0.850218],
[0.669636, 0.669636, 0.628966, 0.628966, 0.736057, 0.736057,
0.895927, 0.895927, 0.755559, 0.755559, 0.954359, 0.954359,
0.692621, 0.692621, 0.652363, 0.652363, 0.737517, 0.737517,
0.899558, 0.899558, 0.745984, 0.745984, 0.946840, 0.946840],
[0.751109, 0.751109, 0.711716, 0.711716, 0.778357, 0.778357,
0.940779, 0.940779, 0.784530, 0.784530, 0.980604, 0.980604,
0.759940, 0.759940, 0.720652, 0.720652, 0.778552, 0.778552,
0.941606, 0.941606, 0.781035, 0.781035, 0.977731, 0.977731]],
dtype=np.float32)
expected_state = np.array(
[[0.710660, 0.710660, 0.464130, 0.464130, 0.877293, 0.877293,
0.593283, 0.593283, 0.958505, 0.958505, 0.661638, 0.661638,
0.785405, 0.785405, 0.520789, 0.520789, 0.890836, 0.890836,
0.604341, 0.604341, 0.928512, 0.928512, 0.635773, 0.635773],
[0.967579, 0.967579, 0.669636, 0.669636, 1.038811, 1.038811,
0.736057, 0.736057, 1.058201, 1.058201, 0.755559, 0.755559,
0.993088, 0.993088, 0.692621, 0.692621, 1.040288, 1.040288,
0.737517, 0.737517, 1.048773, 1.048773, 0.745984, 0.745984],
[1.053842, 1.053842, 0.751109, 0.751109, 1.079919, 1.079919,
0.778357, 0.778357, 1.085620, 1.085620, 0.784530, 0.784530,
1.062455, 1.062455, 0.759940, 0.759940, 1.080101, 1.080101,
0.778552, 0.778552, 1.082402, 1.082402, 0.781035, 0.781035]],
dtype=np.float32)
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
cell = contrib_rnn_cell.BidirectionalGridLSTMCell(
num_units=num_units,
feature_size=feature_size,
share_time_frequency_weights=True,
frequency_skip=frequency_skip,
forget_bias=1.0,
num_frequency_blocks=[num_shifts])
inputs = constant_op.constant(
np.array([[1.0, 1.1, 1.2, 1.3],
[2.0, 2.1, 2.2, 2.3],
[3.0, 3.1, 3.2, 3.3]],
dtype=np.float32),
dtype=dtypes.float32)
state_value = constant_op.constant(
0.1 * np.ones(
(batch_size, num_units), dtype=np.float32),
dtype=dtypes.float32)
init_state = cell.state_tuple_type(
*([state_value, state_value] * num_shifts * 2))
output, state = cell(inputs, init_state)
sess.run([variables.global_variables_initializer()])
res = sess.run([output, state])
self.assertEqual(len(res), 2)
# The numbers in results were not calculated, this is mostly just a
# smoke test.
self.assertEqual(res[0].shape, (batch_size, num_units * num_shifts * 4))
self.assertAllClose(res[0], expected_output)
# There should be num_shifts * 4 states in the tuple.
self.assertEqual(len(res[1]), num_shifts * 4)
# Checking the shape of each state to be batch_size * num_units
for ss in res[1]:
self.assertEqual(ss.shape[0], batch_size)
self.assertEqual(ss.shape[1], num_units)
self.assertAllClose(np.concatenate(res[1], axis=1), expected_state)
def testBidirectionGridLSTMCellWithSliceOffset(self):
with self.test_session() as sess:
num_units = 2
batch_size = 3
input_size = 4
feature_size = 2
frequency_skip = 1
num_shifts = int((input_size - feature_size) / frequency_skip + 1)
expected_output = np.array(
[[0.464130, 0.464130, 0.419165, 0.419165, 0.593283, 0.593283,
0.738350, 0.738350, 0.661638, 0.661638, 0.866774, 0.866774,
0.322645, 0.322645, 0.276068, 0.276068, 0.584654, 0.584654,
0.690292, 0.690292, 0.640446, 0.640446, 0.840071, 0.840071],
[0.669636, 0.669636, 0.628966, 0.628966, 0.736057, 0.736057,
0.895927, 0.895927, 0.755559, 0.755559, 0.954359, 0.954359,
0.493625, 0.493625, 0.449236, 0.449236, 0.730828, 0.730828,
0.865996, 0.865996, 0.749429, 0.749429, 0.944958, 0.944958],
[0.751109, 0.751109, 0.711716, 0.711716, 0.778357, 0.778357,
0.940779, 0.940779, 0.784530, 0.784530, 0.980604, 0.980604,
0.608587, 0.608587, 0.566683, 0.566683, 0.777345, 0.777345,
0.925820, 0.925820, 0.782597, 0.782597, 0.976858, 0.976858]],
dtype=np.float32)
expected_state = np.array(
[[0.710660, 0.710660, 0.464130, 0.464130, 0.877293, 0.877293,
0.593283, 0.593283, 0.958505, 0.958505, 0.661638, 0.661638,
0.516575, 0.516575, 0.322645, 0.322645, 0.866628, 0.866628,
0.584654, 0.584654, 0.934002, 0.934002, 0.640446, 0.640446],
[0.967579, 0.967579, 0.669636, 0.669636, 1.038811, 1.038811,
0.736057, 0.736057, 1.058201, 1.058201, 0.755559, 0.755559,
0.749836, 0.749836, 0.493625, 0.493625, 1.033488, 1.033488,
0.730828, 0.730828, 1.052186, 1.052186, 0.749429, 0.749429],
[1.053842, 1.053842, 0.751109, 0.751109, 1.079919, 1.079919,
0.778357, 0.778357, 1.085620, 1.085620, 0.784530, 0.784530,
0.895999, 0.895999, 0.608587, 0.608587, 1.078978, 1.078978,
0.777345, 0.777345, 1.083843, 1.083843, 0.782597, 0.782597]],
dtype=np.float32)
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
cell = contrib_rnn_cell.BidirectionalGridLSTMCell(
num_units=num_units,
feature_size=feature_size,
share_time_frequency_weights=True,
frequency_skip=frequency_skip,
forget_bias=1.0,
num_frequency_blocks=[num_shifts],
backward_slice_offset=1)
inputs = constant_op.constant(
np.array([[1.0, 1.1, 1.2, 1.3],
[2.0, 2.1, 2.2, 2.3],
[3.0, 3.1, 3.2, 3.3]],
dtype=np.float32),
dtype=dtypes.float32)
state_value = constant_op.constant(
0.1 * np.ones(
(batch_size, num_units), dtype=np.float32),
dtype=dtypes.float32)
init_state = cell.state_tuple_type(
*([state_value, state_value] * num_shifts * 2))
output, state = cell(inputs, init_state)
sess.run([variables.global_variables_initializer()])
res = sess.run([output, state])
self.assertEqual(len(res), 2)
# The numbers in results were not calculated, this is mostly just a
# smoke test.
self.assertEqual(res[0].shape, (batch_size, num_units * num_shifts * 4))
self.assertAllClose(res[0], expected_output)
# There should be num_shifts * 4 states in the tuple.
self.assertEqual(len(res[1]), num_shifts * 4)
# Checking the shape of each state to be batch_size * num_units
for ss in res[1]:
self.assertEqual(ss.shape[0], batch_size)
self.assertEqual(ss.shape[1], num_units)
self.assertAllClose(np.concatenate(res[1], axis=1), expected_state)
def testAttentionCellWrapperFailures(self):
with self.assertRaisesRegexp(TypeError,
"The parameter cell is not RNNCell."):
contrib_rnn_cell.AttentionCellWrapper(None, 0)
num_units = 8
for state_is_tuple in [False, True]:
with ops.Graph().as_default():
lstm_cell = rnn_cell.BasicLSTMCell(
num_units, state_is_tuple=state_is_tuple)
with self.assertRaisesRegexp(
ValueError, "attn_length should be greater than zero, got 0"):
contrib_rnn_cell.AttentionCellWrapper(
lstm_cell, 0, state_is_tuple=state_is_tuple)
with self.assertRaisesRegexp(
ValueError, "attn_length should be greater than zero, got -1"):
contrib_rnn_cell.AttentionCellWrapper(
lstm_cell, -1, state_is_tuple=state_is_tuple)
with ops.Graph().as_default():
lstm_cell = rnn_cell.BasicLSTMCell(num_units, state_is_tuple=True)
with self.assertRaisesRegexp(
ValueError, "Cell returns tuple of states, but the flag "
"state_is_tuple is not set. State size is: *"):
contrib_rnn_cell.AttentionCellWrapper(
lstm_cell, 4, state_is_tuple=False)
def testAttentionCellWrapperZeros(self):
num_units = 8
attn_length = 16
batch_size = 3
input_size = 4
for state_is_tuple in [False, True]:
with ops.Graph().as_default():
with self.test_session() as sess:
with variable_scope.variable_scope("state_is_tuple_" + str(
state_is_tuple)):
lstm_cell = rnn_cell.BasicLSTMCell(
num_units, state_is_tuple=state_is_tuple)
cell = contrib_rnn_cell.AttentionCellWrapper(
lstm_cell, attn_length, state_is_tuple=state_is_tuple)
if state_is_tuple:
zeros = array_ops.zeros([batch_size, num_units], dtype=np.float32)
attn_state_zeros = array_ops.zeros(
[batch_size, attn_length * num_units], dtype=np.float32)
zero_state = ((zeros, zeros), zeros, attn_state_zeros)
else:
zero_state = array_ops.zeros(
[
batch_size,
num_units * 2 + attn_length * num_units + num_units
],
dtype=np.float32)
inputs = array_ops.zeros(
[batch_size, input_size], dtype=dtypes.float32)
output, state = cell(inputs, zero_state)
self.assertEquals(output.get_shape(), [batch_size, num_units])
if state_is_tuple:
self.assertEquals(len(state), 3)
self.assertEquals(len(state[0]), 2)
self.assertEquals(state[0][0].get_shape(),
[batch_size, num_units])
self.assertEquals(state[0][1].get_shape(),
[batch_size, num_units])
self.assertEquals(state[1].get_shape(), [batch_size, num_units])
self.assertEquals(state[2].get_shape(),
[batch_size, attn_length * num_units])
tensors = [output] + list(state)
else:
self.assertEquals(state.get_shape(), [
batch_size,
num_units * 2 + num_units + attn_length * num_units
])
tensors = [output, state]
zero_result = sum(
[math_ops.reduce_sum(math_ops.abs(x)) for x in tensors])
sess.run(variables.global_variables_initializer())
self.assertTrue(sess.run(zero_result) < 1e-6)
def testAttentionCellWrapperValues(self):
num_units = 8
attn_length = 16
batch_size = 3
for state_is_tuple in [False, True]:
with ops.Graph().as_default():
with self.test_session() as sess:
with variable_scope.variable_scope("state_is_tuple_" + str(
state_is_tuple)):
lstm_cell = rnn_cell.BasicLSTMCell(
num_units, state_is_tuple=state_is_tuple)
cell = contrib_rnn_cell.AttentionCellWrapper(
lstm_cell, attn_length, state_is_tuple=state_is_tuple)
if state_is_tuple:
zeros = constant_op.constant(
0.1 * np.ones(
[batch_size, num_units], dtype=np.float32),
dtype=dtypes.float32)
attn_state_zeros = constant_op.constant(
0.1 * np.ones(
[batch_size, attn_length * num_units], dtype=np.float32),
dtype=dtypes.float32)
zero_state = ((zeros, zeros), zeros, attn_state_zeros)
else:
zero_state = constant_op.constant(
0.1 * np.ones(
[
batch_size,
num_units * 2 + num_units + attn_length * num_units
],
dtype=np.float32),
dtype=dtypes.float32)
inputs = constant_op.constant(
np.array(
[[1., 1., 1., 1.], [2., 2., 2., 2.], [3., 3., 3., 3.]],
dtype=np.float32),
dtype=dtypes.float32)
output, state = cell(inputs, zero_state)
if state_is_tuple:
concat_state = array_ops.concat(
[state[0][0], state[0][1], state[1], state[2]], 1)
else:
concat_state = state
sess.run(variables.global_variables_initializer())
output, state = sess.run([output, concat_state])
# Different inputs so different outputs and states
for i in range(1, batch_size):
self.assertTrue(
float(np.linalg.norm((output[0, :] - output[i, :]))) > 1e-6)
self.assertTrue(
float(np.linalg.norm((state[0, :] - state[i, :]))) > 1e-6)
def _testAttentionCellWrapperCorrectResult(self):
num_units = 4
attn_length = 6
batch_size = 2
expected_output = np.array(
[[1.068372, 0.45496, -0.678277, 0.340538],
[1.018088, 0.378983, -0.572179, 0.268591]],
dtype=np.float32)
expected_state = np.array(
[[0.74946702, 0.34681597, 0.26474735, 1.06485605, 0.38465962,
0.11420801, 0.10272158, 0.30925757, 0.63899988, 0.7181077,
0.47534478, 0.33715725, 0.58086717, 0.49446869, 0.7641536,
0.12814975, 0.92231739, 0.89857256, 0.21889746, 0.38442063,
0.53481543, 0.8876909, 0.45823169, 0.5905602, 0.78038228,
0.56501579, 0.03971386, 0.09870267, 0.8074435, 0.66821432,
0.99211812, 0.12295902, 1.14606023, 0.34370938, -0.79251152,
0.51843399],
[0.5179342, 0.48682183, -0.25426468, 0.96810579, 0.28809637,
0.13607743, -0.11446252, 0.26792109, 0.78047138, 0.63460857,
0.49122369, 0.52007174, 0.73000264, 0.66986895, 0.73576689,
0.86301267, 0.87887371, 0.35185754, 0.93417215, 0.64732957,
0.63173044, 0.66627824, 0.53644657, 0.20477486, 0.98458421,
0.38277245, 0.03746676, 0.92510188, 0.57714164, 0.84932971,
0.36127412, 0.12125921, 1.1362772, 0.34361625, -0.78150457,
0.70582712]],
dtype=np.float32)
seed = 12345
random_seed.set_random_seed(seed)
rnn_scope = None
for state_is_tuple in [False, True]:
with session.Session() as sess:
with variable_scope.variable_scope(
"state_is_tuple", reuse=state_is_tuple,
initializer=init_ops.glorot_uniform_initializer()):
lstm_cell = rnn_cell.BasicLSTMCell(
num_units, state_is_tuple=state_is_tuple)
cell = contrib_rnn_cell.AttentionCellWrapper(
lstm_cell, attn_length, state_is_tuple=state_is_tuple)
# This is legacy behavior to preserve the test. Weight
# sharing no longer works by creating a new RNNCell in the
# same variable scope; so here we restore the scope of the
# RNNCells after the first use below.
if rnn_scope is not None:
(cell._scope, lstm_cell._scope) = rnn_scope # pylint: disable=protected-access,unpacking-non-sequence
zeros1 = random_ops.random_uniform(
(batch_size, num_units), 0.0, 1.0, seed=seed + 1)
zeros2 = random_ops.random_uniform(
(batch_size, num_units), 0.0, 1.0, seed=seed + 2)
zeros3 = random_ops.random_uniform(
(batch_size, num_units), 0.0, 1.0, seed=seed + 3)
attn_state_zeros = random_ops.random_uniform(
(batch_size, attn_length * num_units), 0.0, 1.0, seed=seed + 4)
zero_state = ((zeros1, zeros2), zeros3, attn_state_zeros)
if not state_is_tuple:
zero_state = array_ops.concat([
zero_state[0][0], zero_state[0][1], zero_state[1], zero_state[2]
], 1)
inputs = random_ops.random_uniform(
(batch_size, num_units), 0.0, 1.0, seed=seed + 5)
output, state = cell(inputs, zero_state)
# This is legacy behavior to preserve the test. Weight
# sharing no longer works by creating a new RNNCell in the
# same variable scope; so here we store the scope of the
# first RNNCell for reuse above.
if rnn_scope is None:
rnn_scope = (cell._scope, lstm_cell._scope) # pylint: disable=protected-access
if state_is_tuple:
state = array_ops.concat(
[state[0][0], state[0][1], state[1], state[2]], 1)
sess.run(variables.global_variables_initializer())
self.assertAllClose(sess.run(output), expected_output)
self.assertAllClose(sess.run(state), expected_state)
def testNASCell(self):
num_units = 6
batch_size = 3
expected_output = np.array([[0.576751, 0.576751, 0.576751, 0.576751,
0.576751, 0.576751],
[0.618936, 0.618936, 0.618936, 0.618936,
0.618936, 0.618936],
[0.627393, 0.627393, 0.627393, 0.627393,
0.627393, 0.627393]])
expected_state = np.array([[0.71579772, 0.71579772, 0.71579772, 0.71579772,
0.71579772, 0.71579772, 0.57675087, 0.57675087,
0.57675087, 0.57675087, 0.57675087, 0.57675087],
[0.78041625, 0.78041625, 0.78041625, 0.78041625,
0.78041625, 0.78041625, 0.6189357, 0.6189357,
0.61893570, 0.6189357, 0.6189357, 0.6189357],
[0.79457647, 0.79457647, 0.79457647, 0.79457647,
0.79457653, 0.79457653, 0.62739348, 0.62739348,
0.62739348, 0.62739348, 0.62739348, 0.62739348]
])
with self.test_session() as sess:
with variable_scope.variable_scope(
"nas_test",
initializer=init_ops.constant_initializer(0.5)):
cell = contrib_rnn_cell.NASCell(num_units=num_units)
inputs = constant_op.constant(
np.array([[1., 1., 1., 1.],
[2., 2., 2., 2.],
[3., 3., 3., 3.]],
dtype=np.float32),
dtype=dtypes.float32)
state_value = constant_op.constant(
0.1 * np.ones(
(batch_size, num_units), dtype=np.float32),
dtype=dtypes.float32)
init_state = rnn_cell.LSTMStateTuple(state_value, state_value)
output, state = cell(inputs, init_state)
sess.run([variables.global_variables_initializer()])
res = sess.run([output, state])
# This is a smoke test: Only making sure expected values not change.
self.assertEqual(len(res), 2)
self.assertAllClose(res[0], expected_output)
# There should be 2 states in the tuple.
self.assertEqual(len(res[1]), 2)
# Checking the shape of each state to be batch_size * num_units
new_c, new_h = res[1]
self.assertEqual(new_c.shape[0], batch_size)
self.assertEqual(new_c.shape[1], num_units)
self.assertEqual(new_h.shape[0], batch_size)
self.assertEqual(new_h.shape[1], num_units)
self.assertAllClose(np.concatenate(res[1], axis=1), expected_state)
def testNASCellProj(self):
num_units = 6
batch_size = 3
num_proj = 5
expected_output = np.array([[1.697418, 1.697418, 1.697418, 1.697418,
1.697418],
[1.840037, 1.840037, 1.840037, 1.840037,
1.840037],
[1.873985, 1.873985, 1.873985, 1.873985,
1.873985]])
expected_state = np.array([[0.69855207, 0.69855207, 0.69855207, 0.69855207,
0.69855207, 0.69855207, 1.69741797, 1.69741797,
1.69741797, 1.69741797, 1.69741797],
[0.77073824, 0.77073824, 0.77073824, 0.77073824,
0.77073824, 0.77073824, 1.84003687, 1.84003687,
1.84003687, 1.84003687, 1.84003687],
[0.78973997, 0.78973997, 0.78973997, 0.78973997,
0.78973997, 0.78973997, 1.87398517, 1.87398517,
1.87398517, 1.87398517, 1.87398517]])
with self.test_session() as sess:
with variable_scope.variable_scope(
"nas_proj_test",
initializer=init_ops.constant_initializer(0.5)):
cell = contrib_rnn_cell.NASCell(num_units=num_units, num_proj=num_proj)
inputs = constant_op.constant(
np.array([[1., 1., 1., 1.],
[2., 2., 2., 2.],
[3., 3., 3., 3.]],
dtype=np.float32),
dtype=dtypes.float32)
state_value_c = constant_op.constant(
0.1 * np.ones(
(batch_size, num_units), dtype=np.float32),
dtype=dtypes.float32)
state_value_h = constant_op.constant(
0.1 * np.ones(
(batch_size, num_proj), dtype=np.float32),
dtype=dtypes.float32)
init_state = rnn_cell.LSTMStateTuple(state_value_c, state_value_h)
output, state = cell(inputs, init_state)
sess.run([variables.global_variables_initializer()])
res = sess.run([output, state])
# This is a smoke test: Only making sure expected values not change.
self.assertEqual(len(res), 2)
self.assertAllClose(res[0], expected_output)
# There should be 2 states in the tuple.
self.assertEqual(len(res[1]), 2)
# Checking the shape of each state to be batch_size * num_units
new_c, new_h = res[1]
self.assertEqual(new_c.shape[0], batch_size)
self.assertEqual(new_c.shape[1], num_units)
self.assertEqual(new_h.shape[0], batch_size)
self.assertEqual(new_h.shape[1], num_proj)
self.assertAllClose(np.concatenate(res[1], axis=1), expected_state)
def testUGRNNCell(self):
num_units = 2
batch_size = 3
expected_state_and_output = np.array(
[[0.13752282, 0.13752282],
[0.10545051, 0.10545051],
[0.10074195, 0.10074195]],
dtype=np.float32)
with self.test_session() as sess:
with variable_scope.variable_scope(
"ugrnn_cell_test",
initializer=init_ops.constant_initializer(0.5)):
cell = contrib_rnn_cell.UGRNNCell(num_units=num_units)
inputs = constant_op.constant(
np.array([[1., 1., 1., 1.],
[2., 2., 2., 2.],
[3., 3., 3., 3.]],
dtype=np.float32),
dtype=dtypes.float32)
init_state = constant_op.constant(
0.1 * np.ones(
(batch_size, num_units), dtype=np.float32),
dtype=dtypes.float32)
output, state = cell(inputs, init_state)
sess.run([variables.global_variables_initializer()])
res = sess.run([output, state])
# This is a smoke test: Only making sure expected values didn't change.
self.assertEqual(len(res), 2)
self.assertAllClose(res[0], expected_state_and_output)
self.assertAllClose(res[1], expected_state_and_output)
def testIntersectionRNNCell(self):
num_units = 2
batch_size = 3
expected_state = np.array(
[[0.13752282, 0.13752282],
[0.10545051, 0.10545051],
[0.10074195, 0.10074195]],
dtype=np.float32)
expected_output = np.array(
[[2.00431061, 2.00431061],
[4.00060606, 4.00060606],
[6.00008249, 6.00008249]],
dtype=np.float32)
with self.test_session() as sess:
with variable_scope.variable_scope(
"intersection_rnn_cell_test",
initializer=init_ops.constant_initializer(0.5)):
cell = contrib_rnn_cell.IntersectionRNNCell(
num_units=num_units, num_in_proj=num_units)
inputs = constant_op.constant(
np.array([[1., 1., 1., 1.],
[2., 2., 2., 2.],
[3., 3., 3., 3.]],
dtype=np.float32),
dtype=dtypes.float32)
init_state = constant_op.constant(
0.1 * np.ones(
(batch_size, num_units), dtype=np.float32),
dtype=dtypes.float32)
output, state = cell(inputs, init_state)
sess.run([variables.global_variables_initializer()])
res = sess.run([output, state])
# This is a smoke test: Only making sure expected values didn't change.
self.assertEqual(len(res), 2)
self.assertAllClose(res[0], expected_output)
self.assertAllClose(res[1], expected_state)
def testIntersectionRNNCellFailure(self):
num_units = 2
batch_size = 3
cell = contrib_rnn_cell.IntersectionRNNCell(num_units=num_units)
inputs = constant_op.constant(
np.array([[1., 1., 1., 1.],
[2., 2., 2., 2.],
[3., 3., 3., 3.]],
dtype=np.float32),
dtype=dtypes.float32)
init_state = constant_op.constant(
0.1 * np.ones(
(batch_size, num_units), dtype=np.float32),
dtype=dtypes.float32)
with self.assertRaisesRegexp(
ValueError, "Must have input size == output size for "
"Intersection RNN. To fix, num_in_proj should "
"be set to num_units at cell init."):
cell(inputs, init_state)
def testPhasedLSTMCell(self):
with self.test_session() as sess:
num_units = 2
batch_size = 3
input_size = 4
expected_state_c = np.array(
[[0.00072015, 0.00036633], [0.00083481, 0.00047266],
[0.00085111, 0.00053054]],
dtype=np.float32)
expected_state_h = np.array(
[[0.0005159, 0.00026243], [0.00062958, 0.00035646],
[0.00064732, 0.00040351]],
dtype=np.float32)
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
t = array_ops.zeros([batch_size, 1], dtype=dtypes.float64)
x = array_ops.zeros([batch_size, input_size])
c0 = array_ops.zeros([batch_size, 2])
h0 = array_ops.zeros([batch_size, 2])
state0 = rnn_cell.LSTMStateTuple(c0, h0)
output, state = contrib_rnn_cell.PhasedLSTMCell(num_units=num_units)(
(t, x), state0)
sess.run([variables.global_variables_initializer()])
res = sess.run([output, state], {
t.name:
np.array([[1.], [2.], [3.]]),
x.name:
np.array([[1., 1., 1., 1.],
[2., 2., 2., 2.],
[3., 3., 3., 3.]]),
})
# This is a smoke test, making sure expected values are unchanged.
self.assertEqual(len(res), 2)
self.assertAllClose(res[0], res[1].h)
self.assertAllClose(res[1].c, expected_state_c)
self.assertAllClose(res[1].h, expected_state_h)
def testHighwayWrapper(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"base_cell", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 3])
m = array_ops.zeros([1, 3])
base_cell = rnn_cell.GRUCell(3)
g, m_new = base_cell(x, m)
with variable_scope.variable_scope(
"hw_cell", initializer=init_ops.constant_initializer(0.5)):
hw_cell = contrib_rnn_cell.HighwayWrapper(
rnn_cell.GRUCell(3), carry_bias_init=-100.0)
g_res, m_new_res = hw_cell(x, m)
sess.run([variables.global_variables_initializer()])
res = sess.run([g, g_res, m_new, m_new_res], {
x: np.array([[1., 1., 1.]]),
m: np.array([[0.1, 0.1, 0.1]])
})
# As carry_bias_init is very negative, the carry gate is 'open' and the
# transform gate is 'closed'. This means the output equals the input.
self.assertAllClose(res[1], res[0])
# States are left untouched
self.assertAllClose(res[2], res[3])
def testGLSTMCell(self):
# Ensure that G-LSTM matches LSTM when number_of_groups = 1
batch_size = 2
num_units = 4
number_of_groups = 1
with self.test_session() as sess:
with variable_scope.variable_scope(
"root1", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.ones([batch_size, num_units])
# When number_of_groups = 1, G-LSTM is equivalent to regular LSTM
gcell = contrib_rnn_cell.GLSTMCell(
num_units=num_units, number_of_groups=number_of_groups)
cell = rnn_cell.LSTMCell(num_units=num_units)
self.assertTrue(isinstance(gcell.state_size, tuple))
zero_state = gcell.zero_state(batch_size=batch_size,
dtype=dtypes.float32)
gh, gs = gcell(x, zero_state)
h, g = cell(x, zero_state)
sess.run([variables.global_variables_initializer()])
glstm_result = sess.run([gh, gs])
lstm_result = sess.run([h, g])
self.assertAllClose(glstm_result[0], lstm_result[0], 1e-5)
self.assertAllClose(glstm_result[1], lstm_result[1], 1e-5)
# Test that G-LSTM subgroup act like corresponding sub-LSTMs
batch_size = 2
num_units = 4
number_of_groups = 2
with self.test_session() as sess:
with variable_scope.variable_scope(
"root2", initializer=init_ops.constant_initializer(0.5)):
# input for G-LSTM with 2 groups
glstm_input = array_ops.ones([batch_size, num_units])
gcell = contrib_rnn_cell.GLSTMCell(
num_units=num_units, number_of_groups=number_of_groups)
gcell_zero_state = gcell.zero_state(batch_size=batch_size,
dtype=dtypes.float32)
gh, gs = gcell(glstm_input, gcell_zero_state)
# input for LSTM cell simulating single G-LSTM group
lstm_input = array_ops.ones([batch_size, num_units / number_of_groups])
# note division by number_of_groups. This cell one simulates G-LSTM group
cell = rnn_cell.LSTMCell(num_units=int(num_units / number_of_groups))
cell_zero_state = cell.zero_state(batch_size=batch_size,
dtype=dtypes.float32)
h, g = cell(lstm_input, cell_zero_state)
sess.run([variables.global_variables_initializer()])
[gh_res, h_res] = sess.run([gh, h])
self.assertAllClose(gh_res[:, 0:int(num_units / number_of_groups)],
h_res, 1e-5)
self.assertAllClose(gh_res[:, int(num_units / number_of_groups):],
h_res, 1e-5)
class LayerNormBasicLSTMCellTest(test.TestCase):
# NOTE: all the values in the current test case have been calculated.
def testBasicLSTMCell(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
c0 = array_ops.zeros([1, 2])
h0 = array_ops.zeros([1, 2])
state0 = rnn_cell.LSTMStateTuple(c0, h0)
c1 = array_ops.zeros([1, 2])
h1 = array_ops.zeros([1, 2])
state1 = rnn_cell.LSTMStateTuple(c1, h1)
state = (state0, state1)
single_cell = lambda: contrib_rnn_cell.LayerNormBasicLSTMCell(2)
cell = rnn_cell.MultiRNNCell([single_cell() for _ in range(2)])
g, out_m = cell(x, state)
sess.run([variables.global_variables_initializer()])
res = sess.run([g, out_m], {
x.name: np.array([[1., 1.]]),
c0.name: 0.1 * np.asarray([[0, 1]]),
h0.name: 0.1 * np.asarray([[2, 3]]),
c1.name: 0.1 * np.asarray([[4, 5]]),
h1.name: 0.1 * np.asarray([[6, 7]]),
})
expected_h = np.array([[-0.38079708, 0.38079708]])
expected_state0_c = np.array([[-1.0, 1.0]])
expected_state0_h = np.array([[-0.38079708, 0.38079708]])
expected_state1_c = np.array([[-1.0, 1.0]])
expected_state1_h = np.array([[-0.38079708, 0.38079708]])
actual_h = res[0]
actual_state0_c = res[1][0].c
actual_state0_h = res[1][0].h
actual_state1_c = res[1][1].c
actual_state1_h = res[1][1].h
self.assertAllClose(actual_h, expected_h, 1e-5)
self.assertAllClose(expected_state0_c, actual_state0_c, 1e-5)
self.assertAllClose(expected_state0_h, actual_state0_h, 1e-5)
self.assertAllClose(expected_state1_c, actual_state1_c, 1e-5)
self.assertAllClose(expected_state1_h, actual_state1_h, 1e-5)
with variable_scope.variable_scope(
"other", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros(
[1, 3]) # Test BasicLSTMCell with input_size != num_units.
c = array_ops.zeros([1, 2])
h = array_ops.zeros([1, 2])
state = rnn_cell.LSTMStateTuple(c, h)
cell = contrib_rnn_cell.LayerNormBasicLSTMCell(2)
g, out_m = cell(x, state)
sess.run([variables.global_variables_initializer()])
res = sess.run([g, out_m], {
x.name: np.array([[1., 1., 1.]]),
c.name: 0.1 * np.asarray([[0, 1]]),
h.name: 0.1 * np.asarray([[2, 3]]),
})
expected_h = np.array([[-0.38079708, 0.38079708]])
expected_c = np.array([[-1.0, 1.0]])
self.assertEqual(len(res), 2)
self.assertAllClose(res[0], expected_h, 1e-5)
self.assertAllClose(res[1].c, expected_c, 1e-5)
self.assertAllClose(res[1].h, expected_h, 1e-5)
def testBasicLSTMCellWithoutNorm(self):
"""Tests that BasicLSTMCell with layer_norm=False."""
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
c0 = array_ops.zeros([1, 2])
h0 = array_ops.zeros([1, 2])
state0 = rnn_cell.LSTMStateTuple(c0, h0)
c1 = array_ops.zeros([1, 2])
h1 = array_ops.zeros([1, 2])
state1 = rnn_cell.LSTMStateTuple(c1, h1)
state = (state0, state1)
single_cell = lambda: contrib_rnn_cell.LayerNormBasicLSTMCell(2, layer_norm=False)
cell = rnn_cell.MultiRNNCell([single_cell() for _ in range(2)])
g, out_m = cell(x, state)
sess.run([variables.global_variables_initializer()])
res = sess.run([g, out_m], {
x.name: np.array([[1., 1.]]),
c0.name: 0.1 * np.asarray([[0, 1]]),
h0.name: 0.1 * np.asarray([[2, 3]]),
c1.name: 0.1 * np.asarray([[4, 5]]),
h1.name: 0.1 * np.asarray([[6, 7]]),
})
expected_h = np.array([[ 0.70230919, 0.72581059]])
expected_state0_c = np.array([[ 0.8020075, 0.89599884]])
expected_state0_h = np.array([[ 0.56668288, 0.60858738]])
expected_state1_c = np.array([[ 1.17500675, 1.26892781]])
expected_state1_h = np.array([[ 0.70230919, 0.72581059]])
actual_h = res[0]
actual_state0_c = res[1][0].c
actual_state0_h = res[1][0].h
actual_state1_c = res[1][1].c
actual_state1_h = res[1][1].h
self.assertAllClose(actual_h, expected_h, 1e-5)
self.assertAllClose(expected_state0_c, actual_state0_c, 1e-5)
self.assertAllClose(expected_state0_h, actual_state0_h, 1e-5)
self.assertAllClose(expected_state1_c, actual_state1_c, 1e-5)
self.assertAllClose(expected_state1_h, actual_state1_h, 1e-5)
with variable_scope.variable_scope(
"other", initializer=init_ops.constant_initializer(0.5)) as vs:
x = array_ops.zeros(
[1, 3]) # Test BasicLSTMCell with input_size != num_units.
c = array_ops.zeros([1, 2])
h = array_ops.zeros([1, 2])
state = rnn_cell.LSTMStateTuple(c, h)
cell = contrib_rnn_cell.LayerNormBasicLSTMCell(2, layer_norm=False)
g, out_m = cell(x, state)
sess.run([variables.global_variables_initializer()])
res = sess.run([g, out_m], {
x.name: np.array([[1., 1., 1.]]),
c.name: 0.1 * np.asarray([[0, 1]]),
h.name: 0.1 * np.asarray([[2, 3]]),
})
expected_h = np.array([[ 0.64121795, 0.68166804]])
expected_c = np.array([[ 0.88477188, 0.98103917]])
self.assertEqual(len(res), 2)
self.assertAllClose(res[0], expected_h, 1e-5)
self.assertAllClose(res[1].c, expected_c, 1e-5)
self.assertAllClose(res[1].h, expected_h, 1e-5)
def testBasicLSTMCellWithStateTuple(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
c0 = array_ops.zeros([1, 2])
h0 = array_ops.zeros([1, 2])
state0 = rnn_cell.LSTMStateTuple(c0, h0)
c1 = array_ops.zeros([1, 2])
h1 = array_ops.zeros([1, 2])
state1 = rnn_cell.LSTMStateTuple(c1, h1)
cell = rnn_cell.MultiRNNCell(
[contrib_rnn_cell.LayerNormBasicLSTMCell(2) for _ in range(2)])
h, (s0, s1) = cell(x, (state0, state1))
sess.run([variables.global_variables_initializer()])
res = sess.run([h, s0, s1], {
x.name: np.array([[1., 1.]]),
c0.name: 0.1 * np.asarray([[0, 1]]),
h0.name: 0.1 * np.asarray([[2, 3]]),
c1.name: 0.1 * np.asarray([[4, 5]]),
h1.name: 0.1 * np.asarray([[6, 7]]),
})
expected_h = np.array([[-0.38079708, 0.38079708]])
expected_h0 = np.array([[-0.38079708, 0.38079708]])
expected_c0 = np.array([[-1.0, 1.0]])
expected_h1 = np.array([[-0.38079708, 0.38079708]])
expected_c1 = np.array([[-1.0, 1.0]])
self.assertEqual(len(res), 3)
self.assertAllClose(res[0], expected_h, 1e-5)
self.assertAllClose(res[1].c, expected_c0, 1e-5)
self.assertAllClose(res[1].h, expected_h0, 1e-5)
self.assertAllClose(res[2].c, expected_c1, 1e-5)
self.assertAllClose(res[2].h, expected_h1, 1e-5)
def testBasicLSTMCellWithDropout(self):
def _is_close(x, y, digits=4):
delta = x - y
return delta < 10**(-digits)
def _is_close_in(x, items, digits=4):
for i in items:
if _is_close(x, i, digits):
return True
return False
keep_prob = 0.5
c_high = 2.9998924946
c_low = 0.999983298578
h_low = 0.761552567265
h_high = 0.995008519604
num_units = 5
allowed_low = [2, 3]
with self.test_session() as sess:
with variable_scope.variable_scope(
"other", initializer=init_ops.constant_initializer(1)):
x = array_ops.zeros([1, 5])
c = array_ops.zeros([1, 5])
h = array_ops.zeros([1, 5])
state = rnn_cell.LSTMStateTuple(c, h)
cell = contrib_rnn_cell.LayerNormBasicLSTMCell(
num_units, layer_norm=False, dropout_keep_prob=keep_prob)
g, s = cell(x, state)
sess.run([variables.global_variables_initializer()])
res = sess.run([g, s], {
x.name: np.ones([1, 5]),
c.name: np.ones([1, 5]),
h.name: np.ones([1, 5]),
})
# Since the returned tensors are of size [1,n]
# get the first component right now.
actual_h = res[0][0]
actual_state_c = res[1].c[0]
actual_state_h = res[1].h[0]
# For each item in `c` (the cell inner state) check that
# it is equal to one of the allowed values `c_high` (not
# dropped out) or `c_low` (dropped out) and verify that the
# corresponding item in `h` (the cell activation) is coherent.
# Count the dropped activations and check that their number is
# coherent with the dropout probability.
dropped_count = 0
self.assertTrue((actual_h == actual_state_h).all())
for citem, hitem in zip(actual_state_c, actual_state_h):
self.assertTrue(_is_close_in(citem, [c_low, c_high]))
if _is_close(citem, c_low):
self.assertTrue(_is_close(hitem, h_low))
dropped_count += 1
elif _is_close(citem, c_high):
self.assertTrue(_is_close(hitem, h_high))
self.assertIn(dropped_count, allowed_low)
def _create_multi_lstm_cell_ops(batch_size, num_units, input_depth,
num_layers, max_time, compiled):
with variable_scope.variable_scope(
"root",
initializer=init_ops.random_uniform_initializer(-0.1, 0.1, seed=2)):
inputs = variable_scope.get_variable(
"inputs", initializer=random_ops.random_uniform(
(max_time, batch_size, input_depth), seed=1))
maybe_xla = lambda c: contrib_rnn_cell.CompiledWrapper(c) if compiled else c
cell = rnn_cell.MultiRNNCell(
[maybe_xla(rnn_cell.LSTMCell(num_units)) for _ in range(num_layers)])
initial_state = cell.zero_state(
batch_size=batch_size, dtype=dtypes.float32)
outputs, final_state = rnn.dynamic_rnn(
cell=cell, inputs=inputs, initial_state=initial_state,
time_major=True)
flat_final_state = nest.flatten(final_state)
trainable_variables = variables.trainable_variables()
outputs_grad = gradients_impl.gradients(
[outputs],
trainable_variables + [inputs] + nest.flatten(initial_state))
final_state_grad = gradients_impl.gradients(
flat_final_state,
trainable_variables + [inputs] + nest.flatten(initial_state))
return {"outputs": outputs,
"final_state": flat_final_state,
"outputs_grad": outputs_grad,
"final_state_grad": final_state_grad}
class CompiledWrapperTest(test.TestCase):
def testMultiRNNCellWithLSTMCellAndXLA(self):
# TODO(b/34735319): Don't run this test if XLA is not available.
batch_size = 16
num_units = 32
input_depth = 12
num_layers = 2
max_time = 20
atol = 1e-5
random_seed.set_random_seed(1234)
with self.test_session(graph=ops.Graph()) as sess:
xla_ops = _create_multi_lstm_cell_ops(
batch_size=batch_size, num_units=num_units,
input_depth=input_depth, num_layers=num_layers,
max_time=max_time,
compiled=True)
sess.run([variables.global_variables_initializer()])
xla_results = sess.run(xla_ops)
random_seed.set_random_seed(1234)
with self.test_session(graph=ops.Graph()) as sess:
non_xla_ops = _create_multi_lstm_cell_ops(
batch_size=batch_size, num_units=num_units,
input_depth=input_depth, num_layers=num_layers,
max_time=max_time,
compiled=False)
sess.run([variables.global_variables_initializer()])
non_xla_results = sess.run(non_xla_ops)
self.assertAllClose(
non_xla_results["outputs"], xla_results["outputs"], atol=atol)
for xla_value, non_xla_value in zip(
xla_results["final_state"], non_xla_results["final_state"]):
self.assertAllClose(xla_value, non_xla_value, atol=atol)
for xla_g, non_xla_g in zip(
xla_results["outputs_grad"], non_xla_results["outputs_grad"]):
self.assertAllClose(xla_g, non_xla_g, atol=atol)
for xla_g, non_xla_g in zip(
xla_results["final_state_grad"], non_xla_results["final_state_grad"]):
self.assertAllClose(xla_g, non_xla_g, atol=atol)
def testMultiRNNCellWithStateTuple(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m_bad = array_ops.zeros([1, 4])
m_good = (array_ops.zeros([1, 2]), array_ops.zeros([1, 2]))
# Test incorrectness of state
with self.assertRaisesRegexp(ValueError, "Expected state .* a tuple"):
rnn_cell.MultiRNNCell(
[rnn_cell.GRUCell(2)
for _ in range(2)], state_is_tuple=True)(x, m_bad)
_, ml = rnn_cell.MultiRNNCell(
[rnn_cell.GRUCell(2)
for _ in range(2)], state_is_tuple=True)(x, m_good)
sess.run([variables.global_variables_initializer()])
res = sess.run(ml, {
x.name: np.array([[1., 1.]]),
m_good[0].name: np.array([[0.1, 0.1]]),
m_good[1].name: np.array([[0.1, 0.1]])
})
# The numbers in results were not calculated, this is just a
# smoke test. However, these numbers should match those of
# the test testMultiRNNCell.
self.assertAllClose(res[0], [[0.175991, 0.175991]])
self.assertAllClose(res[1], [[0.13248, 0.13248]])
class BenchmarkLSTMCellXLA(test.Benchmark):
def benchmarkDynamicRNNWithMultiLSTMCell(self):
num_layers = 3
max_time = 50
print("benchmarkDynamicRNNWithMultiLSTMCell")
print("\t" +
"\t".join(["inter_th", "intra_th",
"batch_size", "num_units", "input_depth", "device",
"compiled", "wall_time"]))
warmup_run = True
for (threads,
device,
num_units,
batch_size,
input_depth,
compiled) in itertools.product(
[{"inter": 0, "intra": 0}, {"inter": 1, "intra": 4}],
["cpu", "gpu"],
[32, 512],
[1, 32, 256],
[32, 512],
[False, True]):
if threads["inter"] != 0:
# We only care about testing inter/intra op limitations on
# CPU with small batch size, to mimic embedded devices.
if device != "cpu" or batch_size != 1:
continue
if device == "cpu" and batch_size > 32:
continue
random_seed.set_random_seed(1234)
config = config_pb2.ConfigProto(
inter_op_parallelism_threads=threads["inter"],
intra_op_parallelism_threads=threads["intra"],
allow_soft_placement=False)
with session.Session(config=config, graph=ops.Graph()) as sess:
with ops.device("/%s:0" % device):
ops_dict = _create_multi_lstm_cell_ops(
batch_size=batch_size, num_units=num_units,
input_depth=input_depth, num_layers=num_layers,
max_time=max_time,
compiled=compiled)
sess.run([variables.global_variables_initializer()])
all_ops = nest.flatten(ops_dict.values())
all_ops_group = control_flow_ops.group(*all_ops)
name_suffix = (
"inter_th_%d_intra_th_%d_bs_%d_units_%d_inputdepth_%d"
"_device_%s_xla_%s" % (
threads["inter"], threads["intra"],
batch_size, num_units, input_depth, device, compiled))
if warmup_run:
self.run_op_benchmark(
sess, all_ops_group, min_iters=30, name="ignore_warmup")
warmup_run = False
benchmark_results = self.run_op_benchmark(
sess, all_ops_group, min_iters=50,
name="benchmarkDynamicRNNWithMultiLSTMCell_%s" % name_suffix)
print("\t" +
"\t".join(["%s" % x for x in [
threads["inter"], threads["intra"],
batch_size, num_units, input_depth, device, compiled,
benchmark_results["wall_time"]]]))
if __name__ == "__main__":
test.main()
|
Cnidarias/al-go-rithms | refs/heads/master | greedy/dijkstra_shortest_path/dijkstra_shortest_path.py | 3 |
def dijkstra(graph, source):
vertices, edges = graph
dist = dict()
previous = dict()
for vertex in vertices:
dist[vertex] = float("inf")
previous[vertex] = None
dist[source] = 0
Q = set(vertices)
while len(Q) > 0:
u = minimum_distance(dist, Q)
print('Currently considering', u, 'with a distance of', dist[u])
Q.remove(u)
if dist[u] == float('inf'):
break
n = get_neighbours(graph, u)
for vertex in n:
alt = dist[u] + dist_between(graph, u, vertex)
if alt < dist[vertex]:
dist[vertex] = alt
previous[vertex] = u
return previous
|
rizumu/django | refs/heads/master | django/contrib/admin/models.py | 184 | from __future__ import unicode_literals
from django.conf import settings
from django.contrib.admin.utils import quote
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import NoReverseMatch, reverse
from django.db import models
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible, smart_text
from django.utils.translation import ugettext, ugettext_lazy as _
ADDITION = 1
CHANGE = 2
DELETION = 3
class LogEntryManager(models.Manager):
use_in_migrations = True
def log_action(self, user_id, content_type_id, object_id, object_repr, action_flag, change_message=''):
self.model.objects.create(
user_id=user_id,
content_type_id=content_type_id,
object_id=smart_text(object_id),
object_repr=object_repr[:200],
action_flag=action_flag,
change_message=change_message,
)
@python_2_unicode_compatible
class LogEntry(models.Model):
action_time = models.DateTimeField(
_('action time'),
default=timezone.now,
editable=False,
)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
models.CASCADE,
verbose_name=_('user'),
)
content_type = models.ForeignKey(
ContentType,
models.SET_NULL,
verbose_name=_('content type'),
blank=True, null=True,
)
object_id = models.TextField(_('object id'), blank=True, null=True)
object_repr = models.CharField(_('object repr'), max_length=200)
action_flag = models.PositiveSmallIntegerField(_('action flag'))
change_message = models.TextField(_('change message'), blank=True)
objects = LogEntryManager()
class Meta:
verbose_name = _('log entry')
verbose_name_plural = _('log entries')
db_table = 'django_admin_log'
ordering = ('-action_time',)
def __repr__(self):
return smart_text(self.action_time)
def __str__(self):
if self.is_addition():
return ugettext('Added "%(object)s".') % {'object': self.object_repr}
elif self.is_change():
return ugettext('Changed "%(object)s" - %(changes)s') % {
'object': self.object_repr,
'changes': self.change_message,
}
elif self.is_deletion():
return ugettext('Deleted "%(object)s."') % {'object': self.object_repr}
return ugettext('LogEntry Object')
def is_addition(self):
return self.action_flag == ADDITION
def is_change(self):
return self.action_flag == CHANGE
def is_deletion(self):
return self.action_flag == DELETION
def get_edited_object(self):
"Returns the edited object represented by this log entry"
return self.content_type.get_object_for_this_type(pk=self.object_id)
def get_admin_url(self):
"""
Returns the admin URL to edit the object represented by this log entry.
"""
if self.content_type and self.object_id:
url_name = 'admin:%s_%s_change' % (self.content_type.app_label, self.content_type.model)
try:
return reverse(url_name, args=(quote(self.object_id),))
except NoReverseMatch:
pass
return None
|
khkaminska/bokeh | refs/heads/master | examples/interactions/us_marriages_divorces/us_marriages_divorces_interactive.py | 26 | # coding: utf-8
# Plotting U.S. marriage and divorce statistics
#
# Example code by Randal S. Olson (http://www.randalolson.com)
from bokeh.plotting import figure, show, output_file, ColumnDataSource
from bokeh.models import HoverTool, NumeralTickFormatter
from bokeh.models import SingleIntervalTicker, LinearAxis
import pandas as pd
# Since the data set is loaded in the bokeh data repository, we can do this:
from bokeh.sampledata.us_marriages_divorces import data
md_data = data.copy()
# Fill in missing data with a simple linear interpolation
md_data = md_data.interpolate(method='linear', axis=0).ffill().bfill()
# Tell Bokeh where to save the interactive chart
output_file('us_marriages_divorces_per_capita.html',
# Tell Bokeh to use its minified JavaScript hosted on a
# cdn instead of putting the Bokeh JS in the output file
# Warning: This makes it so people can only view the
# chart with an internet connection
mode='cdn',
title='144 years of marriage and divorce in the U.S.A.')
# Set up the data sources for the lines we'll be plotting.
# We need separate data sources for each line because we're
# displaying different data in the hover tool.
source_marriages = ColumnDataSource(
data=dict(
# x-axis (Years) for the chart
x=md_data.Year.values,
# y-axis (Marriages per capita) for the chart
y=md_data.Marriages_per_1000.values,
# The string version of the y-value that is displayed in the hover box
y_text=md_data.Marriages_per_1000.apply(
lambda x: '{}'.format(round(x, 1))),
# Extra descriptive text that is displayed in the hover box
desc=['marriages per 1,000 people'] * len(md_data),
)
)
source_divorces = ColumnDataSource(
data=dict(
# x-axis (Years) for the chart
x=md_data.Year.values,
# y-axis (Marriages per capita) for the chart
y=md_data.Divorces_per_1000.values,
# The string version of the y-value that is displayed in the hover box
y_text=md_data.Divorces_per_1000.apply(
lambda x: '{}'.format(round(x, 1))),
# Extra descriptive text that is displayed in the hover box
desc=['divorces and annulments per 1,000 people'] * len(md_data),
)
)
# Use HTML to mark up the tooltip that displays over the chart
# Note that the variables in the data sources (above) are referenced with a @
hover = HoverTool(
tooltips='<font face="Arial" size="3">@y_text @desc in @x</font>')
# Select the tools that will be available to the chart
TOOLS = ['pan,wheel_zoom,box_zoom,reset,save,resize'] + [hover]
bplot = figure(tools=TOOLS, width=800, height=500, x_axis_type=None)
# Create a custom x-axis with 10-year intervals
ticker = SingleIntervalTicker(interval=10, num_minor_ticks=0)
xaxis = LinearAxis(ticker=ticker)
bplot.add_layout(xaxis, 'below')
# Customize the y-axis
bplot.yaxis.formatter = NumeralTickFormatter(format='0.0a')
bplot.yaxis.axis_label = '# per 1,000 people'
# Provide a descriptive title for the chart
bplot.title = '144 years of marriage and divorce in the U.S.'
# Finally, plot the data!
# Note that the data source determines what is plotted and what shows in
# the tooltips
bplot.line('x', 'y', color='#1f77b4', line_width=3, source=source_marriages)
bplot.line('x', 'y', color='#ff7f0e', line_width=3, source=source_divorces)
show(bplot)
|
Just-D/chromium-1 | refs/heads/master | tools/telemetry/third_party/gsutilz/third_party/boto/boto/s3/key.py | 72 | # Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2011, Nexenta Systems Inc.
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import email.utils
import errno
import hashlib
import mimetypes
import os
import re
import base64
import binascii
import math
from hashlib import md5
import boto.utils
from boto.compat import BytesIO, six, urllib, encodebytes
from boto.exception import BotoClientError
from boto.exception import StorageDataError
from boto.exception import PleaseRetryException
from boto.provider import Provider
from boto.s3.keyfile import KeyFile
from boto.s3.user import User
from boto import UserAgent
from boto.utils import compute_md5, compute_hash
from boto.utils import find_matching_headers
from boto.utils import merge_headers_by_name
class Key(object):
"""
Represents a key (object) in an S3 bucket.
:ivar bucket: The parent :class:`boto.s3.bucket.Bucket`.
:ivar name: The name of this Key object.
:ivar metadata: A dictionary containing user metadata that you
wish to store with the object or that has been retrieved from
an existing object.
:ivar cache_control: The value of the `Cache-Control` HTTP header.
:ivar content_type: The value of the `Content-Type` HTTP header.
:ivar content_encoding: The value of the `Content-Encoding` HTTP header.
:ivar content_disposition: The value of the `Content-Disposition` HTTP
header.
:ivar content_language: The value of the `Content-Language` HTTP header.
:ivar etag: The `etag` associated with this object.
:ivar last_modified: The string timestamp representing the last
time this object was modified in S3.
:ivar owner: The ID of the owner of this object.
:ivar storage_class: The storage class of the object. Currently, one of:
STANDARD | REDUCED_REDUNDANCY | GLACIER
:ivar md5: The MD5 hash of the contents of the object.
:ivar size: The size, in bytes, of the object.
:ivar version_id: The version ID of this object, if it is a versioned
object.
:ivar encrypted: Whether the object is encrypted while at rest on
the server.
"""
DefaultContentType = 'application/octet-stream'
RestoreBody = """<?xml version="1.0" encoding="UTF-8"?>
<RestoreRequest xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<Days>%s</Days>
</RestoreRequest>"""
BufferSize = boto.config.getint('Boto', 'key_buffer_size', 8192)
# The object metadata fields a user can set, other than custom metadata
# fields (i.e., those beginning with a provider-specific prefix like
# x-amz-meta).
base_user_settable_fields = set(["cache-control", "content-disposition",
"content-encoding", "content-language",
"content-md5", "content-type",
"x-robots-tag", "expires"])
_underscore_base_user_settable_fields = set()
for f in base_user_settable_fields:
_underscore_base_user_settable_fields.add(f.replace('-', '_'))
# Metadata fields, whether user-settable or not, other than custom
# metadata fields (i.e., those beginning with a provider specific prefix
# like x-amz-meta).
base_fields = (base_user_settable_fields |
set(["last-modified", "content-length", "date", "etag"]))
def __init__(self, bucket=None, name=None):
self.bucket = bucket
self.name = name
self.metadata = {}
self.cache_control = None
self.content_type = self.DefaultContentType
self.content_encoding = None
self.content_disposition = None
self.content_language = None
self.filename = None
self.etag = None
self.is_latest = False
self.last_modified = None
self.owner = None
self._storage_class = None
self.path = None
self.resp = None
self.mode = None
self.size = None
self.version_id = None
self.source_version_id = None
self.delete_marker = False
self.encrypted = None
# If the object is being restored, this attribute will be set to True.
# If the object is restored, it will be set to False. Otherwise this
# value will be None. If the restore is completed (ongoing_restore =
# False), the expiry_date will be populated with the expiry date of the
# restored object.
self.ongoing_restore = None
self.expiry_date = None
self.local_hashes = {}
def __repr__(self):
if self.bucket:
name = u'<Key: %s,%s>' % (self.bucket.name, self.name)
else:
name = u'<Key: None,%s>' % self.name
# Encode to bytes for Python 2 to prevent display decoding issues
if not isinstance(name, str):
name = name.encode('utf-8')
return name
def __iter__(self):
return self
@property
def provider(self):
provider = None
if self.bucket and self.bucket.connection:
provider = self.bucket.connection.provider
return provider
def _get_key(self):
return self.name
def _set_key(self, value):
self.name = value
key = property(_get_key, _set_key);
def _get_md5(self):
if 'md5' in self.local_hashes and self.local_hashes['md5']:
return binascii.b2a_hex(self.local_hashes['md5'])
def _set_md5(self, value):
if value:
self.local_hashes['md5'] = binascii.a2b_hex(value)
elif 'md5' in self.local_hashes:
self.local_hashes.pop('md5', None)
md5 = property(_get_md5, _set_md5);
def _get_base64md5(self):
if 'md5' in self.local_hashes and self.local_hashes['md5']:
md5 = self.local_hashes['md5']
if not isinstance(md5, bytes):
md5 = md5.encode('utf-8')
return binascii.b2a_base64(md5).decode('utf-8').rstrip('\n')
def _set_base64md5(self, value):
if value:
if not isinstance(value, six.string_types):
value = value.decode('utf-8')
self.local_hashes['md5'] = binascii.a2b_base64(value)
elif 'md5' in self.local_hashes:
del self.local_hashes['md5']
base64md5 = property(_get_base64md5, _set_base64md5);
def _get_storage_class(self):
if self._storage_class is None and self.bucket:
# Attempt to fetch storage class
list_items = list(self.bucket.list(self.name.encode('utf-8')))
if len(list_items) and getattr(list_items[0], '_storage_class',
None):
self._storage_class = list_items[0]._storage_class
else:
# Key is not yet saved? Just use default...
self._storage_class = 'STANDARD'
return self._storage_class
def _set_storage_class(self, value):
self._storage_class = value
storage_class = property(_get_storage_class, _set_storage_class)
def get_md5_from_hexdigest(self, md5_hexdigest):
"""
A utility function to create the 2-tuple (md5hexdigest, base64md5)
from just having a precalculated md5_hexdigest.
"""
digest = binascii.unhexlify(md5_hexdigest)
base64md5 = encodebytes(digest)
if base64md5[-1] == '\n':
base64md5 = base64md5[0:-1]
return (md5_hexdigest, base64md5)
def handle_encryption_headers(self, resp):
provider = self.bucket.connection.provider
if provider.server_side_encryption_header:
self.encrypted = resp.getheader(
provider.server_side_encryption_header, None)
else:
self.encrypted = None
def handle_version_headers(self, resp, force=False):
provider = self.bucket.connection.provider
# If the Key object already has a version_id attribute value, it
# means that it represents an explicit version and the user is
# doing a get_contents_*(version_id=<foo>) to retrieve another
# version of the Key. In that case, we don't really want to
# overwrite the version_id in this Key object. Comprende?
if self.version_id is None or force:
self.version_id = resp.getheader(provider.version_id, None)
self.source_version_id = resp.getheader(provider.copy_source_version_id,
None)
if resp.getheader(provider.delete_marker, 'false') == 'true':
self.delete_marker = True
else:
self.delete_marker = False
def handle_restore_headers(self, response):
provider = self.bucket.connection.provider
header = response.getheader(provider.restore_header)
if header is None:
return
parts = header.split(',', 1)
for part in parts:
key, val = [i.strip() for i in part.split('=')]
val = val.replace('"', '')
if key == 'ongoing-request':
self.ongoing_restore = True if val.lower() == 'true' else False
elif key == 'expiry-date':
self.expiry_date = val
def handle_addl_headers(self, headers):
"""
Used by Key subclasses to do additional, provider-specific
processing of response headers. No-op for this base class.
"""
pass
def open_read(self, headers=None, query_args='',
override_num_retries=None, response_headers=None):
"""
Open this key for reading
:type headers: dict
:param headers: Headers to pass in the web request
:type query_args: string
:param query_args: Arguments to pass in the query string
(ie, 'torrent')
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying GET.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
"""
if self.resp is None:
self.mode = 'r'
provider = self.bucket.connection.provider
self.resp = self.bucket.connection.make_request(
'GET', self.bucket.name, self.name, headers,
query_args=query_args,
override_num_retries=override_num_retries)
if self.resp.status < 199 or self.resp.status > 299:
body = self.resp.read()
raise provider.storage_response_error(self.resp.status,
self.resp.reason, body)
response_headers = self.resp.msg
self.metadata = boto.utils.get_aws_metadata(response_headers,
provider)
for name, value in response_headers.items():
# To get correct size for Range GETs, use Content-Range
# header if one was returned. If not, use Content-Length
# header.
if (name.lower() == 'content-length' and
'Content-Range' not in response_headers):
self.size = int(value)
elif name.lower() == 'content-range':
end_range = re.sub('.*/(.*)', '\\1', value)
self.size = int(end_range)
elif name.lower() in Key.base_fields:
self.__dict__[name.lower().replace('-', '_')] = value
self.handle_version_headers(self.resp)
self.handle_encryption_headers(self.resp)
self.handle_restore_headers(self.resp)
self.handle_addl_headers(self.resp.getheaders())
def open_write(self, headers=None, override_num_retries=None):
"""
Open this key for writing.
Not yet implemented
:type headers: dict
:param headers: Headers to pass in the write request
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying PUT.
"""
raise BotoClientError('Not Implemented')
def open(self, mode='r', headers=None, query_args=None,
override_num_retries=None):
if mode == 'r':
self.mode = 'r'
self.open_read(headers=headers, query_args=query_args,
override_num_retries=override_num_retries)
elif mode == 'w':
self.mode = 'w'
self.open_write(headers=headers,
override_num_retries=override_num_retries)
else:
raise BotoClientError('Invalid mode: %s' % mode)
closed = False
def close(self, fast=False):
"""
Close this key.
:type fast: bool
:param fast: True if you want the connection to be closed without first
reading the content. This should only be used in cases where subsequent
calls don't need to return the content from the open HTTP connection.
Note: As explained at
http://docs.python.org/2/library/httplib.html#httplib.HTTPConnection.getresponse,
callers must read the whole response before sending a new request to the
server. Calling Key.close(fast=True) and making a subsequent request to
the server will work because boto will get an httplib exception and
close/reopen the connection.
"""
if self.resp and not fast:
self.resp.read()
self.resp = None
self.mode = None
self.closed = True
def next(self):
"""
By providing a next method, the key object supports use as an iterator.
For example, you can now say:
for bytes in key:
write bytes to a file or whatever
All of the HTTP connection stuff is handled for you.
"""
self.open_read()
data = self.resp.read(self.BufferSize)
if not data:
self.close()
raise StopIteration
return data
# Python 3 iterator support
__next__ = next
def read(self, size=0):
self.open_read()
if size == 0:
data = self.resp.read()
else:
data = self.resp.read(size)
if not data:
self.close()
return data
def change_storage_class(self, new_storage_class, dst_bucket=None,
validate_dst_bucket=True):
"""
Change the storage class of an existing key.
Depending on whether a different destination bucket is supplied
or not, this will either move the item within the bucket, preserving
all metadata and ACL info bucket changing the storage class or it
will copy the item to the provided destination bucket, also
preserving metadata and ACL info.
:type new_storage_class: string
:param new_storage_class: The new storage class for the Key.
Possible values are:
* STANDARD
* REDUCED_REDUNDANCY
:type dst_bucket: string
:param dst_bucket: The name of a destination bucket. If not
provided the current bucket of the key will be used.
:type validate_dst_bucket: bool
:param validate_dst_bucket: If True, will validate the dst_bucket
by using an extra list request.
"""
bucket_name = dst_bucket or self.bucket.name
if new_storage_class == 'STANDARD':
return self.copy(bucket_name, self.name,
reduced_redundancy=False, preserve_acl=True,
validate_dst_bucket=validate_dst_bucket)
elif new_storage_class == 'REDUCED_REDUNDANCY':
return self.copy(bucket_name, self.name,
reduced_redundancy=True, preserve_acl=True,
validate_dst_bucket=validate_dst_bucket)
else:
raise BotoClientError('Invalid storage class: %s' %
new_storage_class)
def copy(self, dst_bucket, dst_key, metadata=None,
reduced_redundancy=False, preserve_acl=False,
encrypt_key=False, validate_dst_bucket=True):
"""
Copy this Key to another bucket.
:type dst_bucket: string
:param dst_bucket: The name of the destination bucket
:type dst_key: string
:param dst_key: The name of the destination key
:type metadata: dict
:param metadata: Metadata to be associated with new key. If
metadata is supplied, it will replace the metadata of the
source key being copied. If no metadata is supplied, the
source key's metadata will be copied to the new key.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will force the
storage class of the new Key to be REDUCED_REDUNDANCY
regardless of the storage class of the key being copied.
The Reduced Redundancy Storage (RRS) feature of S3,
provides lower redundancy at lower storage cost.
:type preserve_acl: bool
:param preserve_acl: If True, the ACL from the source key will
be copied to the destination key. If False, the
destination key will have the default ACL. Note that
preserving the ACL in the new key object will require two
additional API calls to S3, one to retrieve the current
ACL and one to set that ACL on the new object. If you
don't care about the ACL, a value of False will be
significantly more efficient.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
:type validate_dst_bucket: bool
:param validate_dst_bucket: If True, will validate the dst_bucket
by using an extra list request.
:rtype: :class:`boto.s3.key.Key` or subclass
:returns: An instance of the newly created key object
"""
dst_bucket = self.bucket.connection.lookup(dst_bucket,
validate_dst_bucket)
if reduced_redundancy:
storage_class = 'REDUCED_REDUNDANCY'
else:
storage_class = self.storage_class
return dst_bucket.copy_key(dst_key, self.bucket.name,
self.name, metadata,
storage_class=storage_class,
preserve_acl=preserve_acl,
encrypt_key=encrypt_key,
src_version_id=self.version_id)
def startElement(self, name, attrs, connection):
if name == 'Owner':
self.owner = User(self)
return self.owner
else:
return None
def endElement(self, name, value, connection):
if name == 'Key':
self.name = value
elif name == 'ETag':
self.etag = value
elif name == 'IsLatest':
if value == 'true':
self.is_latest = True
else:
self.is_latest = False
elif name == 'LastModified':
self.last_modified = value
elif name == 'Size':
self.size = int(value)
elif name == 'StorageClass':
self.storage_class = value
elif name == 'Owner':
pass
elif name == 'VersionId':
self.version_id = value
else:
setattr(self, name, value)
def exists(self, headers=None):
"""
Returns True if the key exists
:rtype: bool
:return: Whether the key exists on S3
"""
return bool(self.bucket.lookup(self.name, headers=headers))
def delete(self, headers=None):
"""
Delete this key from S3
"""
return self.bucket.delete_key(self.name, version_id=self.version_id,
headers=headers)
def get_metadata(self, name):
return self.metadata.get(name)
def set_metadata(self, name, value):
# Ensure that metadata that is vital to signing is in the correct
# case. Applies to ``Content-Type`` & ``Content-MD5``.
if name.lower() == 'content-type':
self.metadata['Content-Type'] = value
elif name.lower() == 'content-md5':
self.metadata['Content-MD5'] = value
else:
self.metadata[name] = value
if name.lower() in Key.base_user_settable_fields:
self.__dict__[name.lower().replace('-', '_')] = value
def update_metadata(self, d):
self.metadata.update(d)
# convenience methods for setting/getting ACL
def set_acl(self, acl_str, headers=None):
if self.bucket is not None:
self.bucket.set_acl(acl_str, self.name, headers=headers)
def get_acl(self, headers=None):
if self.bucket is not None:
return self.bucket.get_acl(self.name, headers=headers)
def get_xml_acl(self, headers=None):
if self.bucket is not None:
return self.bucket.get_xml_acl(self.name, headers=headers)
def set_xml_acl(self, acl_str, headers=None):
if self.bucket is not None:
return self.bucket.set_xml_acl(acl_str, self.name, headers=headers)
def set_canned_acl(self, acl_str, headers=None):
return self.bucket.set_canned_acl(acl_str, self.name, headers)
def get_redirect(self):
"""Return the redirect location configured for this key.
If no redirect is configured (via set_redirect), then None
will be returned.
"""
response = self.bucket.connection.make_request(
'HEAD', self.bucket.name, self.name)
if response.status == 200:
return response.getheader('x-amz-website-redirect-location')
else:
raise self.provider.storage_response_error(
response.status, response.reason, response.read())
def set_redirect(self, redirect_location, headers=None):
"""Configure this key to redirect to another location.
When the bucket associated with this key is accessed from the website
endpoint, a 301 redirect will be issued to the specified
`redirect_location`.
:type redirect_location: string
:param redirect_location: The location to redirect.
"""
if headers is None:
headers = {}
else:
headers = headers.copy()
headers['x-amz-website-redirect-location'] = redirect_location
response = self.bucket.connection.make_request('PUT', self.bucket.name,
self.name, headers)
if response.status == 200:
return True
else:
raise self.provider.storage_response_error(
response.status, response.reason, response.read())
def make_public(self, headers=None):
return self.bucket.set_canned_acl('public-read', self.name, headers)
def generate_url(self, expires_in, method='GET', headers=None,
query_auth=True, force_http=False, response_headers=None,
expires_in_absolute=False, version_id=None,
policy=None, reduced_redundancy=False, encrypt_key=False):
"""
Generate a URL to access this key.
:type expires_in: int
:param expires_in: How long the url is valid for, in seconds
:type method: string
:param method: The method to use for retrieving the file
(default is GET)
:type headers: dict
:param headers: Any headers to pass along in the request
:type query_auth: bool
:param query_auth:
:type force_http: bool
:param force_http: If True, http will be used instead of https.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type expires_in_absolute: bool
:param expires_in_absolute:
:type version_id: string
:param version_id: The version_id of the object to GET. If specified
this overrides any value in the key.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
:rtype: string
:return: The URL to access the key
"""
provider = self.bucket.connection.provider
version_id = version_id or self.version_id
if headers is None:
headers = {}
else:
headers = headers.copy()
# add headers accordingly (usually PUT case)
if policy:
headers[provider.acl_header] = policy
if reduced_redundancy:
self.storage_class = 'REDUCED_REDUNDANCY'
if provider.storage_class_header:
headers[provider.storage_class_header] = self.storage_class
if encrypt_key:
headers[provider.server_side_encryption_header] = 'AES256'
headers = boto.utils.merge_meta(headers, self.metadata, provider)
return self.bucket.connection.generate_url(expires_in, method,
self.bucket.name, self.name,
headers, query_auth,
force_http,
response_headers,
expires_in_absolute,
version_id)
def send_file(self, fp, headers=None, cb=None, num_cb=10,
query_args=None, chunked_transfer=False, size=None):
"""
Upload a file to a key into a bucket on S3.
:type fp: file
:param fp: The file pointer to upload. The file pointer must
point point at the offset from which you wish to upload.
ie. if uploading the full file, it should point at the
start of the file. Normally when a file is opened for
reading, the fp will point at the first byte. See the
bytes parameter below for more info.
:type headers: dict
:param headers: The headers to pass along with the PUT request
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file
transfer. Providing a negative integer will cause your
callback to be called with each buffer read.
:type query_args: string
:param query_args: (optional) Arguments to pass in the query string.
:type chunked_transfer: boolean
:param chunked_transfer: (optional) If true, we use chunked
Transfer-Encoding.
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where you are splitting the file
up into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
"""
self._send_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb,
query_args=query_args,
chunked_transfer=chunked_transfer, size=size)
def _send_file_internal(self, fp, headers=None, cb=None, num_cb=10,
query_args=None, chunked_transfer=False, size=None,
hash_algs=None):
provider = self.bucket.connection.provider
try:
spos = fp.tell()
except IOError:
spos = None
self.read_from_stream = False
# If hash_algs is unset and the MD5 hasn't already been computed,
# default to an MD5 hash_alg to hash the data on-the-fly.
if hash_algs is None and not self.md5:
hash_algs = {'md5': md5}
digesters = dict((alg, hash_algs[alg]()) for alg in hash_algs or {})
def sender(http_conn, method, path, data, headers):
# This function is called repeatedly for temporary retries
# so we must be sure the file pointer is pointing at the
# start of the data.
if spos is not None and spos != fp.tell():
fp.seek(spos)
elif spos is None and self.read_from_stream:
# if seek is not supported, and we've read from this
# stream already, then we need to abort retries to
# avoid setting bad data.
raise provider.storage_data_error(
'Cannot retry failed request. fp does not support seeking.')
# If the caller explicitly specified host header, tell putrequest
# not to add a second host header. Similarly for accept-encoding.
skips = {}
if boto.utils.find_matching_headers('host', headers):
skips['skip_host'] = 1
if boto.utils.find_matching_headers('accept-encoding', headers):
skips['skip_accept_encoding'] = 1
http_conn.putrequest(method, path, **skips)
for key in headers:
http_conn.putheader(key, headers[key])
http_conn.endheaders()
save_debug = self.bucket.connection.debug
self.bucket.connection.debug = 0
# If the debuglevel < 4 we don't want to show connection
# payload, so turn off HTTP connection-level debug output (to
# be restored below).
# Use the getattr approach to allow this to work in AppEngine.
if getattr(http_conn, 'debuglevel', 0) < 4:
http_conn.set_debuglevel(0)
data_len = 0
if cb:
if size:
cb_size = size
elif self.size:
cb_size = self.size
else:
cb_size = 0
if chunked_transfer and cb_size == 0:
# For chunked Transfer, we call the cb for every 1MB
# of data transferred, except when we know size.
cb_count = (1024 * 1024) / self.BufferSize
elif num_cb > 1:
cb_count = int(
math.ceil(cb_size / self.BufferSize / (num_cb - 1.0)))
elif num_cb < 0:
cb_count = -1
else:
cb_count = 0
i = 0
cb(data_len, cb_size)
bytes_togo = size
if bytes_togo and bytes_togo < self.BufferSize:
chunk = fp.read(bytes_togo)
else:
chunk = fp.read(self.BufferSize)
if not isinstance(chunk, bytes):
chunk = chunk.encode('utf-8')
if spos is None:
# read at least something from a non-seekable fp.
self.read_from_stream = True
while chunk:
chunk_len = len(chunk)
data_len += chunk_len
if chunked_transfer:
http_conn.send('%x;\r\n' % chunk_len)
http_conn.send(chunk)
http_conn.send('\r\n')
else:
http_conn.send(chunk)
for alg in digesters:
digesters[alg].update(chunk)
if bytes_togo:
bytes_togo -= chunk_len
if bytes_togo <= 0:
break
if cb:
i += 1
if i == cb_count or cb_count == -1:
cb(data_len, cb_size)
i = 0
if bytes_togo and bytes_togo < self.BufferSize:
chunk = fp.read(bytes_togo)
else:
chunk = fp.read(self.BufferSize)
if not isinstance(chunk, bytes):
chunk = chunk.encode('utf-8')
self.size = data_len
for alg in digesters:
self.local_hashes[alg] = digesters[alg].digest()
if chunked_transfer:
http_conn.send('0\r\n')
# http_conn.send("Content-MD5: %s\r\n" % self.base64md5)
http_conn.send('\r\n')
if cb and (cb_count <= 1 or i > 0) and data_len > 0:
cb(data_len, cb_size)
http_conn.set_debuglevel(save_debug)
self.bucket.connection.debug = save_debug
response = http_conn.getresponse()
body = response.read()
if not self.should_retry(response, chunked_transfer):
raise provider.storage_response_error(
response.status, response.reason, body)
return response
if not headers:
headers = {}
else:
headers = headers.copy()
# Overwrite user-supplied user-agent.
for header in find_matching_headers('User-Agent', headers):
del headers[header]
headers['User-Agent'] = UserAgent
# If storage_class is None, then a user has not explicitly requested
# a storage class, so we can assume STANDARD here
if self._storage_class not in [None, 'STANDARD']:
headers[provider.storage_class_header] = self.storage_class
if find_matching_headers('Content-Encoding', headers):
self.content_encoding = merge_headers_by_name(
'Content-Encoding', headers)
if find_matching_headers('Content-Language', headers):
self.content_language = merge_headers_by_name(
'Content-Language', headers)
content_type_headers = find_matching_headers('Content-Type', headers)
if content_type_headers:
# Some use cases need to suppress sending of the Content-Type
# header and depend on the receiving server to set the content
# type. This can be achieved by setting headers['Content-Type']
# to None when calling this method.
if (len(content_type_headers) == 1 and
headers[content_type_headers[0]] is None):
# Delete null Content-Type value to skip sending that header.
del headers[content_type_headers[0]]
else:
self.content_type = merge_headers_by_name(
'Content-Type', headers)
elif self.path:
self.content_type = mimetypes.guess_type(self.path)[0]
if self.content_type is None:
self.content_type = self.DefaultContentType
headers['Content-Type'] = self.content_type
else:
headers['Content-Type'] = self.content_type
if self.base64md5:
headers['Content-MD5'] = self.base64md5
if chunked_transfer:
headers['Transfer-Encoding'] = 'chunked'
#if not self.base64md5:
# headers['Trailer'] = "Content-MD5"
else:
headers['Content-Length'] = str(self.size)
# This is terrible. We need a SHA256 of the body for SigV4, but to do
# the chunked ``sender`` behavior above, the ``fp`` isn't available to
# the auth mechanism (because closures). Detect if it's SigV4 & embelish
# while we can before the auth calculations occur.
if 'hmac-v4-s3' in self.bucket.connection._required_auth_capability():
kwargs = {'fp': fp, 'hash_algorithm': hashlib.sha256}
if size is not None:
kwargs['size'] = size
headers['_sha256'] = compute_hash(**kwargs)[0]
headers['Expect'] = '100-Continue'
headers = boto.utils.merge_meta(headers, self.metadata, provider)
resp = self.bucket.connection.make_request(
'PUT',
self.bucket.name,
self.name,
headers,
sender=sender,
query_args=query_args
)
self.handle_version_headers(resp, force=True)
self.handle_addl_headers(resp.getheaders())
def should_retry(self, response, chunked_transfer=False):
provider = self.bucket.connection.provider
if not chunked_transfer:
if response.status in [500, 503]:
# 500 & 503 can be plain retries.
return True
if response.getheader('location'):
# If there's a redirect, plain retry.
return True
if 200 <= response.status <= 299:
self.etag = response.getheader('etag')
md5 = self.md5
if isinstance(md5, bytes):
md5 = md5.decode('utf-8')
# If you use customer-provided encryption keys, the ETag value that
# Amazon S3 returns in the response will not be the MD5 of the
# object.
server_side_encryption_customer_algorithm = response.getheader(
'x-amz-server-side-encryption-customer-algorithm', None)
if server_side_encryption_customer_algorithm is None:
if self.etag != '"%s"' % md5:
raise provider.storage_data_error(
'ETag from S3 did not match computed MD5. '
'%s vs. %s' % (self.etag, self.md5))
return True
if response.status == 400:
# The 400 must be trapped so the retry handler can check to
# see if it was a timeout.
# If ``RequestTimeout`` is present, we'll retry. Otherwise, bomb
# out.
body = response.read()
err = provider.storage_response_error(
response.status,
response.reason,
body
)
if err.error_code in ['RequestTimeout']:
raise PleaseRetryException(
"Saw %s, retrying" % err.error_code,
response=response
)
return False
def compute_md5(self, fp, size=None):
"""
:type fp: file
:param fp: File pointer to the file to MD5 hash. The file
pointer will be reset to the same position before the
method returns.
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where the file is being split
in place into different parts. Less bytes may be available.
"""
hex_digest, b64_digest, data_size = compute_md5(fp, size=size)
# Returned values are MD5 hash, base64 encoded MD5 hash, and data size.
# The internal implementation of compute_md5() needs to return the
# data size but we don't want to return that value to the external
# caller because it changes the class interface (i.e. it might
# break some code) so we consume the third tuple value here and
# return the remainder of the tuple to the caller, thereby preserving
# the existing interface.
self.size = data_size
return (hex_digest, b64_digest)
def set_contents_from_stream(self, fp, headers=None, replace=True,
cb=None, num_cb=10, policy=None,
reduced_redundancy=False, query_args=None,
size=None):
"""
Store an object using the name of the Key object as the key in
cloud and the contents of the data stream pointed to by 'fp' as
the contents.
The stream object is not seekable and total size is not known.
This has the implication that we can't specify the
Content-Size and Content-MD5 in the header. So for huge
uploads, the delay in calculating MD5 is avoided but with a
penalty of inability to verify the integrity of the uploaded
data.
:type fp: file
:param fp: the file whose contents are to be uploaded
:type headers: dict
:param headers: additional HTTP headers to be sent with the
PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will first check
to see if an object exists in the bucket with the same key. If it
does, it won't overwrite it. The default value is True which will
overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted to GS and the second representing the
total number of bytes that need to be transmitted.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter, this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the new key
in GS.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type size: int
:param size: (optional) The Maximum number of bytes to read from
the file pointer (fp). This is useful when uploading a
file in multiple parts where you are splitting the file up
into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
"""
provider = self.bucket.connection.provider
if not provider.supports_chunked_transfer():
raise BotoClientError('%s does not support chunked transfer'
% provider.get_provider_name())
# Name of the Object should be specified explicitly for Streams.
if not self.name or self.name == '':
raise BotoClientError('Cannot determine the destination '
'object name for the given stream')
if headers is None:
headers = {}
if policy:
headers[provider.acl_header] = policy
if reduced_redundancy:
self.storage_class = 'REDUCED_REDUNDANCY'
if provider.storage_class_header:
headers[provider.storage_class_header] = self.storage_class
if self.bucket is not None:
if not replace:
if self.bucket.lookup(self.name):
return
self.send_file(fp, headers, cb, num_cb, query_args,
chunked_transfer=True, size=size)
def set_contents_from_file(self, fp, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=False, query_args=None,
encrypt_key=False, size=None, rewind=False):
"""
Store an object in S3 using the name of the Key object as the
key in S3 and the contents of the file pointed to by 'fp' as the
contents. The data is read from 'fp' from its current position until
'size' bytes have been read or EOF.
:type fp: file
:param fp: the file whose contents to upload
:type headers: dict
:param headers: Additional HTTP headers that will be sent with
the PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will
first check to see if an object exists in the bucket with
the same key. If it does, it won't overwrite it. The
default value is True which will overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the second
element. This is the same format returned by the
compute_md5 method.
:param md5: If you need to compute the MD5 for any reason
prior to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values of the
file. Otherwise, the checksum will be computed.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where you are splitting the file
up into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
:type rewind: bool
:param rewind: (optional) If True, the file pointer (fp) will
be rewound to the start before any bytes are read from
it. The default behaviour is False which reads from the
current position of the file pointer (fp).
:rtype: int
:return: The number of bytes written to the key.
"""
provider = self.bucket.connection.provider
headers = headers or {}
if policy:
headers[provider.acl_header] = policy
if encrypt_key:
headers[provider.server_side_encryption_header] = 'AES256'
if rewind:
# caller requests reading from beginning of fp.
fp.seek(0, os.SEEK_SET)
else:
# The following seek/tell/seek logic is intended
# to detect applications using the older interface to
# set_contents_from_file(), which automatically rewound the
# file each time the Key was reused. This changed with commit
# 14ee2d03f4665fe20d19a85286f78d39d924237e, to support uploads
# split into multiple parts and uploaded in parallel, and at
# the time of that commit this check was added because otherwise
# older programs would get a success status and upload an empty
# object. Unfortuantely, it's very inefficient for fp's implemented
# by KeyFile (used, for example, by gsutil when copying between
# providers). So, we skip the check for the KeyFile case.
# TODO: At some point consider removing this seek/tell/seek
# logic, after enough time has passed that it's unlikely any
# programs remain that assume the older auto-rewind interface.
if not isinstance(fp, KeyFile):
spos = fp.tell()
fp.seek(0, os.SEEK_END)
if fp.tell() == spos:
fp.seek(0, os.SEEK_SET)
if fp.tell() != spos:
# Raise an exception as this is likely a programming
# error whereby there is data before the fp but nothing
# after it.
fp.seek(spos)
raise AttributeError('fp is at EOF. Use rewind option '
'or seek() to data start.')
# seek back to the correct position.
fp.seek(spos)
if reduced_redundancy:
self.storage_class = 'REDUCED_REDUNDANCY'
if provider.storage_class_header:
headers[provider.storage_class_header] = self.storage_class
# TODO - What if provider doesn't support reduced reduncancy?
# What if different providers provide different classes?
if hasattr(fp, 'name'):
self.path = fp.name
if self.bucket is not None:
if not md5 and provider.supports_chunked_transfer():
# defer md5 calculation to on the fly and
# we don't know anything about size yet.
chunked_transfer = True
self.size = None
else:
chunked_transfer = False
if isinstance(fp, KeyFile):
# Avoid EOF seek for KeyFile case as it's very inefficient.
key = fp.getkey()
size = key.size - fp.tell()
self.size = size
# At present both GCS and S3 use MD5 for the etag for
# non-multipart-uploaded objects. If the etag is 32 hex
# chars use it as an MD5, to avoid having to read the file
# twice while transferring.
if (re.match('^"[a-fA-F0-9]{32}"$', key.etag)):
etag = key.etag.strip('"')
md5 = (etag, base64.b64encode(binascii.unhexlify(etag)))
if not md5:
# compute_md5() and also set self.size to actual
# size of the bytes read computing the md5.
md5 = self.compute_md5(fp, size)
# adjust size if required
size = self.size
elif size:
self.size = size
else:
# If md5 is provided, still need to size so
# calculate based on bytes to end of content
spos = fp.tell()
fp.seek(0, os.SEEK_END)
self.size = fp.tell() - spos
fp.seek(spos)
size = self.size
self.md5 = md5[0]
self.base64md5 = md5[1]
if self.name is None:
self.name = self.md5
if not replace:
if self.bucket.lookup(self.name):
return
self.send_file(fp, headers=headers, cb=cb, num_cb=num_cb,
query_args=query_args,
chunked_transfer=chunked_transfer, size=size)
# return number of bytes written.
return self.size
def set_contents_from_filename(self, filename, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=False,
encrypt_key=False):
"""
Store an object in S3 using the name of the Key object as the
key in S3 and the contents of the file named by 'filename'.
See set_contents_from_file method for details about the
parameters.
:type filename: string
:param filename: The name of the file that you want to put onto S3
:type headers: dict
:param headers: Additional headers to pass along with the
request to AWS.
:type replace: bool
:param replace: If True, replaces the contents of the file
if it already exists.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the second
element. This is the same format returned by the
compute_md5 method.
:param md5: If you need to compute the MD5 for any reason
prior to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values of the
file. Otherwise, the checksum will be computed.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost. :type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object
will be encrypted on the server-side by S3 and will be
stored in an encrypted form while at rest in S3.
:rtype: int
:return: The number of bytes written to the key.
"""
with open(filename, 'rb') as fp:
return self.set_contents_from_file(fp, headers, replace, cb,
num_cb, policy, md5,
reduced_redundancy,
encrypt_key=encrypt_key)
def set_contents_from_string(self, string_data, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=False,
encrypt_key=False):
"""
Store an object in S3 using the name of the Key object as the
key in S3 and the string 's' as the contents.
See set_contents_from_file method for details about the
parameters.
:type headers: dict
:param headers: Additional headers to pass along with the
request to AWS.
:type replace: bool
:param replace: If True, replaces the contents of the file if
it already exists.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the second
element. This is the same format returned by the
compute_md5 method.
:param md5: If you need to compute the MD5 for any reason
prior to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values of the
file. Otherwise, the checksum will be computed.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
"""
if not isinstance(string_data, bytes):
string_data = string_data.encode("utf-8")
fp = BytesIO(string_data)
r = self.set_contents_from_file(fp, headers, replace, cb, num_cb,
policy, md5, reduced_redundancy,
encrypt_key=encrypt_key)
fp.close()
return r
def get_file(self, fp, headers=None, cb=None, num_cb=10,
torrent=False, version_id=None, override_num_retries=None,
response_headers=None):
"""
Retrieves a file from an S3 Key
:type fp: file
:param fp: File pointer to put the data into
:type headers: string
:param: headers to send when retrieving the files
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: Flag for whether to get a torrent for the file
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying GET.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type version_id: str
:param version_id: The ID of a particular version of the object.
If this parameter is not supplied but the Key object has
a ``version_id`` attribute, that value will be used when
retrieving the object. You can set the Key object's
``version_id`` attribute to None to always grab the latest
version from a version-enabled bucket.
"""
self._get_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb,
torrent=torrent, version_id=version_id,
override_num_retries=override_num_retries,
response_headers=response_headers,
hash_algs=None,
query_args=None)
def _get_file_internal(self, fp, headers=None, cb=None, num_cb=10,
torrent=False, version_id=None, override_num_retries=None,
response_headers=None, hash_algs=None, query_args=None):
if headers is None:
headers = {}
save_debug = self.bucket.connection.debug
if self.bucket.connection.debug == 1:
self.bucket.connection.debug = 0
query_args = query_args or []
if torrent:
query_args.append('torrent')
if hash_algs is None and not torrent:
hash_algs = {'md5': md5}
digesters = dict((alg, hash_algs[alg]()) for alg in hash_algs or {})
# If a version_id is passed in, use that. If not, check to see
# if the Key object has an explicit version_id and, if so, use that.
# Otherwise, don't pass a version_id query param.
if version_id is None:
version_id = self.version_id
if version_id:
query_args.append('versionId=%s' % version_id)
if response_headers:
for key in response_headers:
query_args.append('%s=%s' % (
key, urllib.parse.quote(response_headers[key])))
query_args = '&'.join(query_args)
self.open('r', headers, query_args=query_args,
override_num_retries=override_num_retries)
data_len = 0
if cb:
if self.size is None:
cb_size = 0
else:
cb_size = self.size
if self.size is None and num_cb != -1:
# If size is not available due to chunked transfer for example,
# we'll call the cb for every 1MB of data transferred.
cb_count = (1024 * 1024) / self.BufferSize
elif num_cb > 1:
cb_count = int(math.ceil(cb_size/self.BufferSize/(num_cb-1.0)))
elif num_cb < 0:
cb_count = -1
else:
cb_count = 0
i = 0
cb(data_len, cb_size)
try:
for bytes in self:
fp.write(bytes)
data_len += len(bytes)
for alg in digesters:
digesters[alg].update(bytes)
if cb:
if cb_size > 0 and data_len >= cb_size:
break
i += 1
if i == cb_count or cb_count == -1:
cb(data_len, cb_size)
i = 0
except IOError as e:
if e.errno == errno.ENOSPC:
raise StorageDataError('Out of space for destination file '
'%s' % fp.name)
raise
if cb and (cb_count <= 1 or i > 0) and data_len > 0:
cb(data_len, cb_size)
for alg in digesters:
self.local_hashes[alg] = digesters[alg].digest()
if self.size is None and not torrent and "Range" not in headers:
self.size = data_len
self.close()
self.bucket.connection.debug = save_debug
def get_torrent_file(self, fp, headers=None, cb=None, num_cb=10):
"""
Get a torrent file (see to get_file)
:type fp: file
:param fp: The file pointer of where to put the torrent
:type headers: dict
:param headers: Headers to be passed
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
"""
return self.get_file(fp, headers, cb, num_cb, torrent=True)
def get_contents_to_file(self, fp, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
res_download_handler=None,
response_headers=None):
"""
Retrieve an object from S3 using the name of the Key object as the
key in S3. Write the contents of the object to the file pointed
to by 'fp'.
:type fp: File -like object
:param fp:
:type headers: dict
:param headers: additional HTTP headers that will be sent with
the GET request.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent
file as a string.
:type res_upload_handler: ResumableDownloadHandler
:param res_download_handler: If provided, this handler will
perform the download.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type version_id: str
:param version_id: The ID of a particular version of the object.
If this parameter is not supplied but the Key object has
a ``version_id`` attribute, that value will be used when
retrieving the object. You can set the Key object's
``version_id`` attribute to None to always grab the latest
version from a version-enabled bucket.
"""
if self.bucket is not None:
if res_download_handler:
res_download_handler.get_file(self, fp, headers, cb, num_cb,
torrent=torrent,
version_id=version_id)
else:
self.get_file(fp, headers, cb, num_cb, torrent=torrent,
version_id=version_id,
response_headers=response_headers)
def get_contents_to_filename(self, filename, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
res_download_handler=None,
response_headers=None):
"""
Retrieve an object from S3 using the name of the Key object as the
key in S3. Store contents of the object to a file named by 'filename'.
See get_contents_to_file method for details about the
parameters.
:type filename: string
:param filename: The filename of where to put the file contents
:type headers: dict
:param headers: Any additional headers to send in the request
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent file
as a string.
:type res_upload_handler: ResumableDownloadHandler
:param res_download_handler: If provided, this handler will
perform the download.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type version_id: str
:param version_id: The ID of a particular version of the object.
If this parameter is not supplied but the Key object has
a ``version_id`` attribute, that value will be used when
retrieving the object. You can set the Key object's
``version_id`` attribute to None to always grab the latest
version from a version-enabled bucket.
"""
try:
with open(filename, 'wb') as fp:
self.get_contents_to_file(fp, headers, cb, num_cb,
torrent=torrent,
version_id=version_id,
res_download_handler=res_download_handler,
response_headers=response_headers)
except Exception:
os.remove(filename)
raise
# if last_modified date was sent from s3, try to set file's timestamp
if self.last_modified is not None:
try:
modified_tuple = email.utils.parsedate_tz(self.last_modified)
modified_stamp = int(email.utils.mktime_tz(modified_tuple))
os.utime(fp.name, (modified_stamp, modified_stamp))
except Exception:
pass
def get_contents_as_string(self, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
response_headers=None, encoding=None):
"""
Retrieve an object from S3 using the name of the Key object as the
key in S3. Return the contents of the object as a string.
See get_contents_to_file method for details about the
parameters.
:type headers: dict
:param headers: Any additional headers to send in the request
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent file
as a string.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type version_id: str
:param version_id: The ID of a particular version of the object.
If this parameter is not supplied but the Key object has
a ``version_id`` attribute, that value will be used when
retrieving the object. You can set the Key object's
``version_id`` attribute to None to always grab the latest
version from a version-enabled bucket.
:type encoding: str
:param encoding: The text encoding to use, such as ``utf-8``
or ``iso-8859-1``. If set, then a string will be returned.
Defaults to ``None`` and returns bytes.
:rtype: bytes or str
:returns: The contents of the file as bytes or a string
"""
fp = BytesIO()
self.get_contents_to_file(fp, headers, cb, num_cb, torrent=torrent,
version_id=version_id,
response_headers=response_headers)
value = fp.getvalue()
if encoding is not None:
value = value.decode(encoding)
return value
def add_email_grant(self, permission, email_address, headers=None):
"""
Convenience method that provides a quick way to add an email grant
to a key. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL
and then PUT's the new ACL back to S3.
:type permission: string
:param permission: The permission being granted. Should be one of:
(READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
:type email_address: string
:param email_address: The email address associated with the AWS
account your are granting the permission to.
:type recursive: boolean
:param recursive: A boolean value to controls whether the
command will apply the grant to all keys within the bucket
or not. The default value is False. By passing a True
value, the call will iterate through all keys in the
bucket and apply the same grant to each key. CAUTION: If
you have a lot of keys, this could take a long time!
"""
policy = self.get_acl(headers=headers)
policy.acl.add_email_grant(permission, email_address)
self.set_acl(policy, headers=headers)
def add_user_grant(self, permission, user_id, headers=None,
display_name=None):
"""
Convenience method that provides a quick way to add a canonical
user grant to a key. This method retrieves the current ACL,
creates a new grant based on the parameters passed in, adds that
grant to the ACL and then PUT's the new ACL back to S3.
:type permission: string
:param permission: The permission being granted. Should be one of:
(READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
:type user_id: string
:param user_id: The canonical user id associated with the AWS
account your are granting the permission to.
:type display_name: string
:param display_name: An option string containing the user's
Display Name. Only required on Walrus.
"""
policy = self.get_acl(headers=headers)
policy.acl.add_user_grant(permission, user_id,
display_name=display_name)
self.set_acl(policy, headers=headers)
def _normalize_metadata(self, metadata):
if type(metadata) == set:
norm_metadata = set()
for k in metadata:
norm_metadata.add(k.lower())
else:
norm_metadata = {}
for k in metadata:
norm_metadata[k.lower()] = metadata[k]
return norm_metadata
def _get_remote_metadata(self, headers=None):
"""
Extracts metadata from existing URI into a dict, so we can
overwrite/delete from it to form the new set of metadata to apply to a
key.
"""
metadata = {}
for underscore_name in self._underscore_base_user_settable_fields:
if hasattr(self, underscore_name):
value = getattr(self, underscore_name)
if value:
# Generate HTTP field name corresponding to "_" named field.
field_name = underscore_name.replace('_', '-')
metadata[field_name.lower()] = value
# self.metadata contains custom metadata, which are all user-settable.
prefix = self.provider.metadata_prefix
for underscore_name in self.metadata:
field_name = underscore_name.replace('_', '-')
metadata['%s%s' % (prefix, field_name.lower())] = (
self.metadata[underscore_name])
return metadata
def set_remote_metadata(self, metadata_plus, metadata_minus, preserve_acl,
headers=None):
metadata_plus = self._normalize_metadata(metadata_plus)
metadata_minus = self._normalize_metadata(metadata_minus)
metadata = self._get_remote_metadata()
metadata.update(metadata_plus)
for h in metadata_minus:
if h in metadata:
del metadata[h]
src_bucket = self.bucket
# Boto prepends the meta prefix when adding headers, so strip prefix in
# metadata before sending back in to copy_key() call.
rewritten_metadata = {}
for h in metadata:
if (h.startswith('x-goog-meta-') or h.startswith('x-amz-meta-')):
rewritten_h = (h.replace('x-goog-meta-', '')
.replace('x-amz-meta-', ''))
else:
rewritten_h = h
rewritten_metadata[rewritten_h] = metadata[h]
metadata = rewritten_metadata
src_bucket.copy_key(self.name, self.bucket.name, self.name,
metadata=metadata, preserve_acl=preserve_acl,
headers=headers)
def restore(self, days, headers=None):
"""Restore an object from an archive.
:type days: int
:param days: The lifetime of the restored object (must
be at least 1 day). If the object is already restored
then this parameter can be used to readjust the lifetime
of the restored object. In this case, the days
param is with respect to the initial time of the request.
If the object has not been restored, this param is with
respect to the completion time of the request.
"""
response = self.bucket.connection.make_request(
'POST', self.bucket.name, self.name,
data=self.RestoreBody % days,
headers=headers, query_args='restore')
if response.status not in (200, 202):
provider = self.bucket.connection.provider
raise provider.storage_response_error(response.status,
response.reason,
response.read())
|
krieger-od/nwjs_chromium.src | refs/heads/master | tools/telemetry/telemetry/util/statistics.py | 35 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A collection of statistical utility functions to be used by metrics."""
import math
def Clamp(value, low=0.0, high=1.0):
"""Clamp a value between some low and high value."""
return min(max(value, low), high)
def NormalizeSamples(samples):
"""Sorts the samples, and map them linearly to the range [0,1].
They're mapped such that for the N samples, the first sample is 0.5/N and the
last sample is (N-0.5)/N.
Background: The discrepancy of the sample set i/(N-1); i=0, ..., N-1 is 2/N,
twice the discrepancy of the sample set (i+1/2)/N; i=0, ..., N-1. In our case
we don't want to distinguish between these two cases, as our original domain
is not bounded (it is for Monte Carlo integration, where discrepancy was
first used).
"""
if not samples:
return samples, 1.0
samples = sorted(samples)
low = min(samples)
high = max(samples)
new_low = 0.5 / len(samples)
new_high = (len(samples)-0.5) / len(samples)
if high-low == 0.0:
return [0.5] * len(samples), 1.0
scale = (new_high - new_low) / (high - low)
for i in xrange(0, len(samples)):
samples[i] = float(samples[i] - low) * scale + new_low
return samples, scale
def Discrepancy(samples, location_count=None):
"""Computes the discrepancy of a set of 1D samples from the interval [0,1].
The samples must be sorted. We define the discrepancy of an empty set
of samples to be zero.
http://en.wikipedia.org/wiki/Low-discrepancy_sequence
http://mathworld.wolfram.com/Discrepancy.html
"""
if not samples:
return 0.0
max_local_discrepancy = 0
inv_sample_count = 1.0 / len(samples)
locations = []
# For each location, stores the number of samples less than that location.
count_less = []
# For each location, stores the number of samples less than or equal to that
# location.
count_less_equal = []
if location_count:
# Generate list of equally spaced locations.
sample_index = 0
for i in xrange(0, int(location_count)):
location = float(i) / (location_count-1)
locations.append(location)
while sample_index < len(samples) and samples[sample_index] < location:
sample_index += 1
count_less.append(sample_index)
while sample_index < len(samples) and samples[sample_index] <= location:
sample_index += 1
count_less_equal.append(sample_index)
else:
# Populate locations with sample positions. Append 0 and 1 if necessary.
if samples[0] > 0.0:
locations.append(0.0)
count_less.append(0)
count_less_equal.append(0)
for i in xrange(0, len(samples)):
locations.append(samples[i])
count_less.append(i)
count_less_equal.append(i+1)
if samples[-1] < 1.0:
locations.append(1.0)
count_less.append(len(samples))
count_less_equal.append(len(samples))
# Iterate over the intervals defined by any pair of locations.
for i in xrange(0, len(locations)):
for j in xrange(i+1, len(locations)):
# Length of interval
length = locations[j] - locations[i]
# Local discrepancy for closed interval
count_closed = count_less_equal[j] - count_less[i]
local_discrepancy_closed = abs(float(count_closed) *
inv_sample_count - length)
max_local_discrepancy = max(local_discrepancy_closed,
max_local_discrepancy)
# Local discrepancy for open interval
count_open = count_less[j] - count_less_equal[i]
local_discrepancy_open = abs(float(count_open) *
inv_sample_count - length)
max_local_discrepancy = max(local_discrepancy_open,
max_local_discrepancy)
return max_local_discrepancy
def TimestampsDiscrepancy(timestamps, absolute=True,
location_count=None):
"""A discrepancy based metric for measuring timestamp jank.
TimestampsDiscrepancy quantifies the largest area of jank observed in a series
of timestamps. Note that this is different from metrics based on the
max_time_interval. For example, the time stamp series A = [0,1,2,3,5,6] and
B = [0,1,2,3,5,7] have the same max_time_interval = 2, but
Discrepancy(B) > Discrepancy(A).
Two variants of discrepancy can be computed:
Relative discrepancy is following the original definition of
discrepancy. It characterized the largest area of jank, relative to the
duration of the entire time stamp series. We normalize the raw results,
because the best case discrepancy for a set of N samples is 1/N (for
equally spaced samples), and we want our metric to report 0.0 in that
case.
Absolute discrepancy also characterizes the largest area of jank, but its
value wouldn't change (except for imprecisions due to a low
|interval_multiplier|) if additional 'good' intervals were added to an
exisiting list of time stamps. Its range is [0,inf] and the unit is
milliseconds.
The time stamp series C = [0,2,3,4] and D = [0,2,3,4,5] have the same
absolute discrepancy, but D has lower relative discrepancy than C.
|timestamps| may be a list of lists S = [S_1, S_2, ..., S_N], where each
S_i is a time stamp series. In that case, the discrepancy D(S) is:
D(S) = max(D(S_1), D(S_2), ..., D(S_N))
"""
if not timestamps:
return 0.0
if isinstance(timestamps[0], list):
range_discrepancies = [TimestampsDiscrepancy(r) for r in timestamps]
return max(range_discrepancies)
samples, sample_scale = NormalizeSamples(timestamps)
discrepancy = Discrepancy(samples, location_count)
inv_sample_count = 1.0 / len(samples)
if absolute:
# Compute absolute discrepancy
discrepancy /= sample_scale
else:
# Compute relative discrepancy
discrepancy = Clamp((discrepancy-inv_sample_count) / (1.0-inv_sample_count))
return discrepancy
def DurationsDiscrepancy(durations, absolute=True,
location_count=None):
"""A discrepancy based metric for measuring duration jank.
DurationsDiscrepancy computes a jank metric which measures how irregular a
given sequence of intervals is. In order to minimize jank, each duration
should be equally long. This is similar to how timestamp jank works,
and we therefore reuse the timestamp discrepancy function above to compute a
similar duration discrepancy number.
Because timestamp discrepancy is defined in terms of timestamps, we first
convert the list of durations to monotonically increasing timestamps.
Args:
durations: List of interval lengths in milliseconds.
absolute: See TimestampsDiscrepancy.
interval_multiplier: See TimestampsDiscrepancy.
"""
if not durations:
return 0.0
timestamps = reduce(lambda x, y: x + [x[-1] + y], durations, [0])
return TimestampsDiscrepancy(timestamps, absolute, location_count)
def ArithmeticMean(data):
"""Calculates arithmetic mean.
Args:
data: A list of samples.
Returns:
The arithmetic mean value, or 0 if the list is empty.
"""
numerator_total = Total(data)
denominator_total = Total(len(data))
return DivideIfPossibleOrZero(numerator_total, denominator_total)
def StandardDeviation(data):
"""Calculates the standard deviation.
Args:
data: A list of samples.
Returns:
The standard deviation of the samples provided.
"""
if len(data) == 1:
return 0.0
mean = ArithmeticMean(data)
variances = [float(x) - mean for x in data]
variances = [x * x for x in variances]
std_dev = math.sqrt(ArithmeticMean(variances))
return std_dev
def TrapezoidalRule(data, dx):
""" Calculate the integral according to the trapezoidal rule
TrapezoidalRule approximates the definite integral of f from a to b by
the composite trapezoidal rule, using n subintervals.
http://en.wikipedia.org/wiki/Trapezoidal_rule#Uniform_grid
Args:
data: A list of samples
dx: The uniform distance along the x axis between any two samples
Returns:
The area under the curve defined by the samples and the uniform distance
according to the trapezoidal rule.
"""
n = len(data) - 1
s = data[0] + data[n]
if n == 0:
return 0.0
for i in range(1, n):
s += 2 * data[i]
return s * dx / 2.0
def Total(data):
"""Returns the float value of a number or the sum of a list."""
if type(data) == float:
total = data
elif type(data) == int:
total = float(data)
elif type(data) == list:
total = float(sum(data))
else:
raise TypeError
return total
def DivideIfPossibleOrZero(numerator, denominator):
"""Returns the quotient, or zero if the denominator is zero."""
return (float(numerator) / float(denominator)) if denominator else 0
def GeneralizedMean(values, exponent):
"""See http://en.wikipedia.org/wiki/Generalized_mean"""
if not values:
return 0.0
sum_of_powers = 0.0
for v in values:
sum_of_powers += v ** exponent
return (sum_of_powers / len(values)) ** (1.0/exponent)
def Median(values):
"""Gets the median of a list of values."""
return Percentile(values, 50)
def Percentile(values, percentile):
"""Calculates the value below which a given percentage of values fall.
For example, if 17% of the values are less than 5.0, then 5.0 is the 17th
percentile for this set of values. When the percentage doesn't exactly
match a rank in the list of values, the percentile is computed using linear
interpolation between closest ranks.
Args:
values: A list of numerical values.
percentile: A number between 0 and 100.
Returns:
The Nth percentile for the list of values, where N is the given percentage.
"""
if not values:
return 0.0
sorted_values = sorted(values)
n = len(values)
percentile /= 100.0
if percentile <= 0.5 / n:
return sorted_values[0]
elif percentile >= (n - 0.5) / n:
return sorted_values[-1]
else:
floor_index = int(math.floor(n * percentile - 0.5))
floor_value = sorted_values[floor_index]
ceil_value = sorted_values[floor_index+1]
alpha = n * percentile - 0.5 - floor_index
return floor_value + alpha * (ceil_value - floor_value)
def GeometricMean(values):
"""Compute a rounded geometric mean from an array of values."""
if not values:
return None
# To avoid infinite value errors, make sure no value is less than 0.001.
new_values = []
for value in values:
if value > 0.001:
new_values.append(value)
else:
new_values.append(0.001)
# Compute the sum of the log of the values.
log_sum = sum(map(math.log, new_values))
# Raise e to that sum over the number of values.
mean = math.pow(math.e, (log_sum / len(new_values)))
# Return the rounded mean.
return int(round(mean))
|
Nu3001/external_chromium_org | refs/heads/master | tools/mac/symbolicate_crash.py | 178 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This script can take an Apple-style CrashReporter log and symbolicate it. This
is useful for when a user's reports aren't being uploaded, for example.
Only versions 6, 7, 8, and 9 reports are supported. For more information on the
file format, reference this document:
TN2123 <http://developer.apple.com/library/mac/#technotes/tn2004/tn2123.html>
Information on symbolication was gleaned from:
<http://developer.apple.com/tools/xcode/symbolizingcrashdumps.html>
"""
import optparse
import os.path
import re
import subprocess
import sys
# Maps binary image identifiers to binary names (minus the .dSYM portion) found
# in the archive. These are the only objects that will be looked up.
SYMBOL_IMAGE_MAP = {
'com.google.Chrome': 'Google Chrome.app',
'com.google.Chrome.framework': 'Google Chrome Framework.framework',
'com.google.Chrome.helper': 'Google Chrome Helper.app'
}
class CrashReport(object):
"""A parsed representation of an Apple CrashReport text file."""
def __init__(self, file_name):
super(CrashReport, self).__init__()
self.report_info = {}
self.threads = []
self._binary_images = {}
fd = open(file_name, 'r')
self._ParseHeader(fd)
# Try and get the report version. If it's not a version we handle, abort.
self.report_version = int(self.report_info['Report Version'])
# Version 6: 10.5 and 10.6 crash report
# Version 7: 10.6 spindump report
# Version 8: 10.7 spindump report
# Version 9: 10.7 crash report
valid_versions = (6, 7, 8, 9)
if self.report_version not in valid_versions:
raise Exception("Only crash reports of versions %s are accepted." %
str(valid_versions))
# If this is a spindump (version 7 or 8 report), use a special parser. The
# format is undocumented, but is similar to version 6. However, the spindump
# report contains user and kernel stacks for every process on the system.
if self.report_version == 7 or self.report_version == 8:
self._ParseSpindumpStack(fd)
else:
self._ParseStack(fd)
self._ParseBinaryImages(fd)
fd.close()
def Symbolicate(self, symbol_path):
"""Symbolicates a crash report stack trace."""
# In order to be efficient, collect all the offsets that will be passed to
# atos by the image name.
offsets_by_image = self._CollectAddressesForImages(SYMBOL_IMAGE_MAP.keys())
# For each image, run atos with the list of addresses.
for image_name, addresses in offsets_by_image.items():
# If this image was not loaded or is in no stacks, skip.
if image_name not in self._binary_images or not len(addresses):
continue
# Combine the |image_name| and |symbol_path| into the path of the dSYM.
dsym_file = self._GetDSymPath(symbol_path, image_name)
# From the list of 2-Tuples of (frame, address), create a list of just
# addresses.
address_list = map(lambda x: x[1], addresses)
# Look up the load address of the image.
binary_base = self._binary_images[image_name][0]
# This returns a list of just symbols. The indices will match up with the
# list of |addresses|.
symbol_names = self._RunAtos(binary_base, dsym_file, address_list)
if not symbol_names:
print 'Error loading symbols for ' + image_name
continue
# Attaches a list of symbol names to stack frames. This assumes that the
# order of |addresses| has stayed the same as |symbol_names|.
self._AddSymbolsToFrames(symbol_names, addresses)
def _ParseHeader(self, fd):
"""Parses the header section of a crash report, which contains the OS and
application version information."""
# The header is made up of different sections, depending on the type of
# report and the report version. Almost all have a format of a key and
# value separated by a colon. Accumulate all of these artifacts into a
# dictionary until the first thread stack is reached.
thread_re = re.compile('^[ \t]*Thread ([a-f0-9]+)')
line = ''
while not thread_re.match(line):
# Skip blank lines. There are typically three or four sections separated
# by newlines in the header.
line = line.strip()
if line:
parts = line.split(':', 1)
# Certain lines in different report versions don't follow the key-value
# format, so skip them.
if len(parts) == 2:
# There's a varying amount of space padding after the ':' to align all
# the values; strip that.
self.report_info[parts[0]] = parts[1].lstrip()
line = fd.readline()
# When this loop exits, the header has been read in full. However, the first
# thread stack heading has been read past. Seek backwards from the current
# position by the length of the line so that it is re-read when
# _ParseStack() is entered.
fd.seek(-len(line), os.SEEK_CUR)
def _ParseStack(self, fd):
"""Parses the stack dump of a crash report and creates a list of threads
and their stack traces."""
# Compile a regex that matches the start of a thread stack. Note that this
# must be specific to not include the thread state section, which comes
# right after all the stack traces.
line_re = re.compile('^Thread ([0-9]+)( Crashed)?:(.*)')
# On entry into this function, the fd has been walked up to the "Thread 0"
# line.
line = fd.readline().rstrip()
in_stack = False
thread = None
while line_re.match(line) or in_stack:
# Check for start of the thread stack.
matches = line_re.match(line)
if not line.strip():
# A blank line indicates a break in the thread stack.
in_stack = False
elif matches:
# If this is the start of a thread stack, create the CrashThread.
in_stack = True
thread = CrashThread(matches.group(1))
thread.name = matches.group(3)
thread.did_crash = matches.group(2) != None
self.threads.append(thread)
else:
# All other lines are stack frames.
thread.stack.append(self._ParseStackFrame(line))
# Read the next line.
line = fd.readline()
def _ParseStackFrame(self, line):
"""Takes in a single line of text and transforms it into a StackFrame."""
frame = StackFrame(line)
# A stack frame is in the format of:
# |<frame-number> <binary-image> 0x<address> <symbol> <offset>|.
regex = '^([0-9]+) +(.+)[ \t]+(0x[0-9a-f]+) (.*) \+ ([0-9]+)$'
matches = re.match(regex, line)
if matches is None:
return frame
# Create a stack frame with the information extracted from the regex.
frame.frame_id = matches.group(1)
frame.image = matches.group(2)
frame.address = int(matches.group(3), 0) # Convert HEX to an int.
frame.original_symbol = matches.group(4)
frame.offset = matches.group(5)
frame.line = None
return frame
def _ParseSpindumpStack(self, fd):
"""Parses a spindump stack report. In this format, each thread stack has
both a user and kernel trace. Only the user traces are symbolicated."""
# The stack trace begins with the thread header, which is identified by a
# HEX number. The thread names appear to be incorrect in spindumps.
user_thread_re = re.compile('^ Thread ([0-9a-fx]+)')
# When this method is called, the fd has been walked right up to the first
# line.
line = fd.readline()
in_user_stack = False
in_kernel_stack = False
thread = None
frame_id = 0
while user_thread_re.match(line) or in_user_stack or in_kernel_stack:
# Check for the start of a thread.
matches = user_thread_re.match(line)
if not line.strip():
# A blank line indicates the start of a new thread. The blank line comes
# after the kernel stack before a new thread header.
in_kernel_stack = False
elif matches:
# This is the start of a thread header. The next line is the heading for
# the user stack, followed by the actual trace.
thread = CrashThread(matches.group(1))
frame_id = 0
self.threads.append(thread)
in_user_stack = True
line = fd.readline() # Read past the 'User stack:' header.
elif line.startswith(' Kernel stack:'):
# The kernel stack header comes immediately after the last frame (really
# the top frame) in the user stack, without a blank line.
in_user_stack = False
in_kernel_stack = True
elif in_user_stack:
# If this is a line while in the user stack, parse it as a stack frame.
thread.stack.append(self._ParseSpindumpStackFrame(line))
# Loop with the next line.
line = fd.readline()
# When the loop exits, the file has been read through the 'Binary images:'
# header. Seek backwards so that _ParseBinaryImages() does the right thing.
fd.seek(-len(line), os.SEEK_CUR)
def _ParseSpindumpStackFrame(self, line):
"""Parses a spindump-style stackframe."""
frame = StackFrame(line)
# The format of the frame is either:
# A: |<space><steps> <symbol> + <offset> (in <image-name>) [<address>]|
# B: |<space><steps> ??? (in <image-name> + <offset>) [<address>]|
regex_a = '^([ ]+[0-9]+) (.*) \+ ([0-9]+) \(in (.*)\) \[(0x[0-9a-f]+)\]'
regex_b = '^([ ]+[0-9]+) \?\?\?( \(in (.*) \+ ([0-9]+)\))? \[(0x[0-9a-f]+)\]'
# Create the stack frame with the information extracted from the regex.
matches = re.match(regex_a, line)
if matches:
frame.frame_id = matches.group(1)[4:] # Remove some leading spaces.
frame.original_symbol = matches.group(2)
frame.offset = matches.group(3)
frame.image = matches.group(4)
frame.address = int(matches.group(5), 0)
frame.line = None
return frame
# If pattern A didn't match (which it will most of the time), try B.
matches = re.match(regex_b, line)
if matches:
frame.frame_id = matches.group(1)[4:] # Remove some leading spaces.
frame.image = matches.group(3)
frame.offset = matches.group(4)
frame.address = int(matches.group(5), 0)
frame.line = None
return frame
# Otherwise, this frame could not be matched and just use the raw input.
frame.line = frame.line.strip()
return frame
def _ParseBinaryImages(self, fd):
"""Parses out the binary images section in order to get the load offset."""
# The parser skips some sections, so advance until the "Binary Images"
# header is reached.
while not fd.readline().lstrip().startswith("Binary Images:"): pass
# Create a regex to match the lines of format:
# |0x<start> - 0x<end> <binary-image> <version> (<version>) <<UUID>> <path>|
image_re = re.compile(
'[ ]*(0x[0-9a-f]+) -[ \t]+(0x[0-9a-f]+) [+ ]([a-zA-Z0-9._\-]+)')
# This section is in this format:
# |<start address> - <end address> <image name>|.
while True:
line = fd.readline()
if not line.strip():
# End when a blank line is hit.
return
# Match the line to the regex.
match = image_re.match(line)
if match:
# Store the offsets by image name so it can be referenced during
# symbolication. These are hex numbers with leading '0x', so int() can
# convert them to decimal if base=0.
address_range = (int(match.group(1), 0), int(match.group(2), 0))
self._binary_images[match.group(3)] = address_range
def _CollectAddressesForImages(self, images):
"""Iterates all the threads and stack frames and all the stack frames that
are in a list of binary |images|. The result is a dictionary, keyed by the
image name that maps to a list of tuples. Each is a 2-Tuple of
(stack_frame, address)"""
# Create the collection and initialize it with empty lists for each image.
collection = {}
for image in images:
collection[image] = []
# Perform the iteration.
for thread in self.threads:
for frame in thread.stack:
image_name = self._ImageForAddress(frame.address)
if image_name in images:
# Replace the image name in the frame in case it was elided.
frame.image = image_name
collection[frame.image].append((frame, frame.address))
# Return the result.
return collection
def _ImageForAddress(self, address):
"""Given a PC address, returns the bundle identifier of the image in which
the address resides."""
for image_name, address_range in self._binary_images.items():
if address >= address_range[0] and address <= address_range[1]:
return image_name
return None
def _GetDSymPath(self, base_path, image_name):
"""Takes a base path for the symbols and an image name. It looks the name up
in SYMBOL_IMAGE_MAP and creates a full path to the dSYM in the bundle."""
image_file = SYMBOL_IMAGE_MAP[image_name]
return os.path.join(base_path, image_file + '.dSYM', 'Contents',
'Resources', 'DWARF',
os.path.splitext(image_file)[0]) # Chop off the extension.
def _RunAtos(self, load_address, dsym_file, addresses):
"""Runs the atos with the provided arguments. |addresses| is used as stdin.
Returns a list of symbol information in the same order as |addresses|."""
args = ['atos', '-l', str(load_address), '-o', dsym_file]
# Get the arch type. This is of the format |X86 (Native)|.
if 'Code Type' in self.report_info:
arch = self.report_info['Code Type'].lower().split(' ')
if len(arch) == 2:
arch = arch[0]
if arch == 'x86':
# The crash report refers to i386 as x86, but atos doesn't know what
# that is.
arch = 'i386'
args.extend(['-arch', arch])
proc = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
addresses = map(hex, addresses)
(stdout, stderr) = proc.communicate(' '.join(addresses))
if proc.returncode:
return None
return stdout.rstrip().split('\n')
def _AddSymbolsToFrames(self, symbols, address_tuples):
"""Takes a single value (the list) from _CollectAddressesForImages and does
a smart-zip with the data returned by atos in |symbols|. Note that the
indices must match for this to succeed."""
if len(symbols) != len(address_tuples):
print 'symbols do not match'
# Each line of output from atos is in this format:
# |<symbol> (in <image>) (<file>:<line>)|.
line_regex = re.compile('(.+) \(in (.+)\) (\((.+):([0-9]+)\))?')
# Zip the two data sets together.
for i in range(len(symbols)):
symbol_parts = line_regex.match(symbols[i])
if not symbol_parts:
continue # Error.
frame = address_tuples[i][0]
frame.symbol = symbol_parts.group(1)
frame.image = symbol_parts.group(2)
frame.file_name = symbol_parts.group(4)
frame.line_number = symbol_parts.group(5)
class CrashThread(object):
"""A CrashThread represents a stacktrace of a single thread """
def __init__(self, thread_id):
super(CrashThread, self).__init__()
self.thread_id = thread_id
self.name = None
self.did_crash = False
self.stack = []
def __repr__(self):
name = ''
if self.name:
name = ': ' + self.name
return 'Thread ' + self.thread_id + name + '\n' + \
'\n'.join(map(str, self.stack))
class StackFrame(object):
"""A StackFrame is owned by a CrashThread."""
def __init__(self, line):
super(StackFrame, self).__init__()
# The original line. This will be set to None if symbolication was
# successfuly.
self.line = line
self.frame_id = 0
self.image = None
self.address = 0x0
self.original_symbol = None
self.offset = 0x0
# The following members are set after symbolication.
self.symbol = None
self.file_name = None
self.line_number = 0
def __repr__(self):
# If symbolication failed, just use the original line.
if self.line:
return ' %s' % self.line
# Use different location information depending on symbolicated data.
location = None
if self.file_name:
location = ' - %s:%s' % (self.file_name, self.line_number)
else:
location = ' + %s' % self.offset
# Same with the symbol information.
symbol = self.original_symbol
if self.symbol:
symbol = self.symbol
return ' %s\t0x%x\t[%s\t%s]\t%s' % (self.frame_id, self.address,
self.image, location, symbol)
def PrettyPrintReport(report):
"""Takes a crash report and prints it like the crash server would."""
print 'Process : ' + report.report_info['Process']
print 'Version : ' + report.report_info['Version']
print 'Date : ' + report.report_info['Date/Time']
print 'OS Version : ' + report.report_info['OS Version']
print
if 'Crashed Thread' in report.report_info:
print 'Crashed Thread : ' + report.report_info['Crashed Thread']
print
if 'Event' in report.report_info:
print 'Event : ' + report.report_info['Event']
print
for thread in report.threads:
print
if thread.did_crash:
exc_type = report.report_info['Exception Type'].split(' ')[0]
exc_code = report.report_info['Exception Codes'].replace('at', '@')
print '*CRASHED* ( ' + exc_type + ' / ' + exc_code + ' )'
# Version 7 reports have spindump-style output (with a stepped stack trace),
# so remove the first tab to get better alignment.
if report.report_version == 7:
for line in repr(thread).split('\n'):
print line.replace('\t', ' ', 1)
else:
print thread
def Main(args):
"""Program main."""
parser = optparse.OptionParser(
usage='%prog [options] symbol_path crash_report',
description='This will parse and symbolicate an Apple CrashReporter v6-9 '
'file.')
parser.add_option('-s', '--std-path', action='store_true', dest='std_path',
help='With this flag, the symbol_path is a containing '
'directory, in which a dSYM files are stored in a '
'directory named by the version. Example: '
'[symbolicate_crash.py -s ./symbols/ report.crash] will '
'look for dSYMs in ./symbols/15.0.666.0/ if the report is '
'from that verison.')
(options, args) = parser.parse_args(args[1:])
# Check that we have something to symbolicate.
if len(args) != 2:
parser.print_usage()
return 1
report = CrashReport(args[1])
symbol_path = None
# If not using the standard layout, this is a full path to the symbols.
if not options.std_path:
symbol_path = args[0]
# Otherwise, use the report version to locate symbols in a directory.
else:
# This is in the format of |M.N.B.P (B.P)|. Get just the part before the
# space.
chrome_version = report.report_info['Version'].split(' ')[0]
symbol_path = os.path.join(args[0], chrome_version)
# Check that the symbols exist.
if not os.path.isdir(symbol_path):
print >>sys.stderr, 'Symbol path %s is not a directory' % symbol_path
return 2
print >>sys.stderr, 'Using symbols from ' + symbol_path
print >>sys.stderr, '=' * 80
report.Symbolicate(symbol_path)
PrettyPrintReport(report)
return 0
if __name__ == '__main__':
sys.exit(Main(sys.argv))
|
rmehta/erpnext | refs/heads/develop | erpnext/stock/report/stock_balance/stock_balance.py | 6 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import flt, cint, getdate
def execute(filters=None):
if not filters: filters = {}
validate_filters(filters)
columns = get_columns()
item_map = get_item_details(filters)
iwb_map = get_item_warehouse_map(filters)
data = []
for (company, item, warehouse) in sorted(iwb_map):
qty_dict = iwb_map[(company, item, warehouse)]
data.append([item, item_map[item]["item_name"],
item_map[item]["item_group"],
item_map[item]["brand"],
item_map[item]["description"], warehouse,
item_map[item]["stock_uom"], qty_dict.opening_qty,
qty_dict.opening_val, qty_dict.in_qty,
qty_dict.in_val, qty_dict.out_qty,
qty_dict.out_val, qty_dict.bal_qty,
qty_dict.bal_val, qty_dict.val_rate,
company
])
return columns, data
def get_columns():
"""return columns"""
columns = [
_("Item")+":Link/Item:100",
_("Item Name")+"::150",
_("Item Group")+"::100",
_("Brand")+"::90",
_("Description")+"::140",
_("Warehouse")+":Link/Warehouse:100",
_("Stock UOM")+":Link/UOM:90",
_("Opening Qty")+":Float:100",
_("Opening Value")+":Float:110",
_("In Qty")+":Float:80",
_("In Value")+":Float:80",
_("Out Qty")+":Float:80",
_("Out Value")+":Float:80",
_("Balance Qty")+":Float:100",
_("Balance Value")+":Float:100",
_("Valuation Rate")+":Float:90",
_("Company")+":Link/Company:100"
]
return columns
def get_conditions(filters):
conditions = ""
if not filters.get("from_date"):
frappe.throw(_("'From Date' is required"))
if filters.get("to_date"):
conditions += " and sle.posting_date <= '%s'" % frappe.db.escape(filters.get("to_date"))
else:
frappe.throw(_("'To Date' is required"))
if filters.get("item_group"):
ig_details = frappe.db.get_value("Item Group", filters.get("item_group"),
["lft", "rgt"], as_dict=1)
if ig_details:
conditions += """
and exists (select name from `tabItem Group` ig
where ig.lft >= %s and ig.rgt <= %s and item.item_group = ig.name)
""" % (ig_details.lft, ig_details.rgt)
if filters.get("item_code"):
conditions += " and sle.item_code = '%s'" % frappe.db.escape(filters.get("item_code"), percent=False)
if filters.get("warehouse"):
warehouse_details = frappe.db.get_value("Warehouse", filters.get("warehouse"), ["lft", "rgt"], as_dict=1)
if warehouse_details:
conditions += " and exists (select name from `tabWarehouse` wh \
where wh.lft >= %s and wh.rgt <= %s and sle.warehouse = wh.name)"%(warehouse_details.lft,
warehouse_details.rgt)
return conditions
def get_stock_ledger_entries(filters):
conditions = get_conditions(filters)
join_table_query = ""
if filters.get("item_group"):
join_table_query = "inner join `tabItem` item on item.name = sle.item_code"
return frappe.db.sql("""
select
sle.item_code, warehouse, sle.posting_date, sle.actual_qty, sle.valuation_rate,
sle.company, sle.voucher_type, sle.qty_after_transaction, sle.stock_value_difference
from
`tabStock Ledger Entry` sle force index (posting_sort_index) %s
where sle.docstatus < 2 %s
order by sle.posting_date, sle.posting_time, sle.name""" %
(join_table_query, conditions), as_dict=1)
def get_item_warehouse_map(filters):
iwb_map = {}
from_date = getdate(filters.get("from_date"))
to_date = getdate(filters.get("to_date"))
sle = get_stock_ledger_entries(filters)
for d in sle:
key = (d.company, d.item_code, d.warehouse)
if key not in iwb_map:
iwb_map[key] = frappe._dict({
"opening_qty": 0.0, "opening_val": 0.0,
"in_qty": 0.0, "in_val": 0.0,
"out_qty": 0.0, "out_val": 0.0,
"bal_qty": 0.0, "bal_val": 0.0,
"val_rate": 0.0
})
qty_dict = iwb_map[(d.company, d.item_code, d.warehouse)]
if d.voucher_type == "Stock Reconciliation":
qty_diff = flt(d.qty_after_transaction) - qty_dict.bal_qty
else:
qty_diff = flt(d.actual_qty)
value_diff = flt(d.stock_value_difference)
if d.posting_date < from_date:
qty_dict.opening_qty += qty_diff
qty_dict.opening_val += value_diff
elif d.posting_date >= from_date and d.posting_date <= to_date:
if qty_diff > 0:
qty_dict.in_qty += qty_diff
qty_dict.in_val += value_diff
else:
qty_dict.out_qty += abs(qty_diff)
qty_dict.out_val += abs(value_diff)
qty_dict.val_rate = d.valuation_rate
qty_dict.bal_qty += qty_diff
qty_dict.bal_val += value_diff
iwb_map = filter_items_with_no_transactions(iwb_map)
return iwb_map
def filter_items_with_no_transactions(iwb_map):
for (company, item, warehouse) in sorted(iwb_map):
qty_dict = iwb_map[(company, item, warehouse)]
no_transactions = True
float_precision = cint(frappe.db.get_default("float_precision")) or 3
for key, val in qty_dict.items():
val = flt(val, float_precision)
qty_dict[key] = val
if key != "val_rate" and val:
no_transactions = False
if no_transactions:
iwb_map.pop((company, item, warehouse))
return iwb_map
def get_item_details(filters):
condition = ''
value = ()
if filters.get("item_code"):
condition = "where item_code=%s"
value = (filters.get("item_code"),)
items = frappe.db.sql("""select name, item_name, stock_uom, item_group, brand, description
from tabItem {condition}""".format(condition=condition), value, as_dict=1)
return dict((d.name, d) for d in items)
def validate_filters(filters):
if not (filters.get("item_code") or filters.get("warehouse")):
sle_count = flt(frappe.db.sql("""select count(name) from `tabStock Ledger Entry`""")[0][0])
if sle_count > 500000:
frappe.throw(_("Please set filter based on Item or Warehouse"))
|
consulo/consulo-python | refs/heads/master | plugin/src/main/dist/helpers/py3only/docutils/transforms/peps.py | 44 | # $Id: peps.py 6433 2010-09-28 08:21:25Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Transforms for PEP processing.
- `Headers`: Used to transform a PEP's initial RFC-2822 header. It remains a
field list, but some entries get processed.
- `Contents`: Auto-inserts a table of contents.
- `PEPZero`: Special processing for PEP 0.
"""
__docformat__ = 'reStructuredText'
import os
import re
import time
from docutils import DataError
from docutils import nodes, utils, languages
from docutils.transforms import Transform
from docutils.transforms import parts, references, misc
class Headers(Transform):
"""
Process fields in a PEP's initial RFC-2822 header.
"""
default_priority = 360
pep_url = 'pep-%04d'
pep_cvs_url = ('http://svn.python.org/view/*checkout*'
'/peps/trunk/pep-%04d.txt')
rcs_keyword_substitutions = (
(re.compile(r'\$' r'RCSfile: (.+),v \$$', re.IGNORECASE), r'\1'),
(re.compile(r'\$[a-zA-Z]+: (.+) \$$'), r'\1'),)
def apply(self):
if not len(self.document):
# @@@ replace these DataErrors with proper system messages
raise DataError('Document tree is empty.')
header = self.document[0]
if not isinstance(header, nodes.field_list) or \
'rfc2822' not in header['classes']:
raise DataError('Document does not begin with an RFC-2822 '
'header; it is not a PEP.')
pep = None
for field in header:
if field[0].astext().lower() == 'pep': # should be the first field
value = field[1].astext()
try:
pep = int(value)
cvs_url = self.pep_cvs_url % pep
except ValueError:
pep = value
cvs_url = None
msg = self.document.reporter.warning(
'"PEP" header must contain an integer; "%s" is an '
'invalid value.' % pep, base_node=field)
msgid = self.document.set_id(msg)
prb = nodes.problematic(value, value or '(none)',
refid=msgid)
prbid = self.document.set_id(prb)
msg.add_backref(prbid)
if len(field[1]):
field[1][0][:] = [prb]
else:
field[1] += nodes.paragraph('', '', prb)
break
if pep is None:
raise DataError('Document does not contain an RFC-2822 "PEP" '
'header.')
if pep == 0:
# Special processing for PEP 0.
pending = nodes.pending(PEPZero)
self.document.insert(1, pending)
self.document.note_pending(pending)
if len(header) < 2 or header[1][0].astext().lower() != 'title':
raise DataError('No title!')
for field in header:
name = field[0].astext().lower()
body = field[1]
if len(body) > 1:
raise DataError('PEP header field body contains multiple '
'elements:\n%s' % field.pformat(level=1))
elif len(body) == 1:
if not isinstance(body[0], nodes.paragraph):
raise DataError('PEP header field body may only contain '
'a single paragraph:\n%s'
% field.pformat(level=1))
elif name == 'last-modified':
date = time.strftime(
'%d-%b-%Y',
time.localtime(os.stat(self.document['source'])[8]))
if cvs_url:
body += nodes.paragraph(
'', '', nodes.reference('', date, refuri=cvs_url))
else:
# empty
continue
para = body[0]
if name == 'author':
for node in para:
if isinstance(node, nodes.reference):
node.replace_self(mask_email(node))
elif name == 'discussions-to':
for node in para:
if isinstance(node, nodes.reference):
node.replace_self(mask_email(node, pep))
elif name in ('replaces', 'replaced-by', 'requires'):
newbody = []
space = nodes.Text(' ')
for refpep in re.split(',?\s+', body.astext()):
pepno = int(refpep)
newbody.append(nodes.reference(
refpep, refpep,
refuri=(self.document.settings.pep_base_url
+ self.pep_url % pepno)))
newbody.append(space)
para[:] = newbody[:-1] # drop trailing space
elif name == 'last-modified':
utils.clean_rcs_keywords(para, self.rcs_keyword_substitutions)
if cvs_url:
date = para.astext()
para[:] = [nodes.reference('', date, refuri=cvs_url)]
elif name == 'content-type':
pep_type = para.astext()
uri = self.document.settings.pep_base_url + self.pep_url % 12
para[:] = [nodes.reference('', pep_type, refuri=uri)]
elif name == 'version' and len(body):
utils.clean_rcs_keywords(para, self.rcs_keyword_substitutions)
class Contents(Transform):
"""
Insert an empty table of contents topic and a transform placeholder into
the document after the RFC 2822 header.
"""
default_priority = 380
def apply(self):
language = languages.get_language(self.document.settings.language_code,
self.document.reporter)
name = language.labels['contents']
title = nodes.title('', name)
topic = nodes.topic('', title, classes=['contents'])
name = nodes.fully_normalize_name(name)
if not self.document.has_name(name):
topic['names'].append(name)
self.document.note_implicit_target(topic)
pending = nodes.pending(parts.Contents)
topic += pending
self.document.insert(1, topic)
self.document.note_pending(pending)
class TargetNotes(Transform):
"""
Locate the "References" section, insert a placeholder for an external
target footnote insertion transform at the end, and schedule the
transform to run immediately.
"""
default_priority = 520
def apply(self):
doc = self.document
i = len(doc) - 1
refsect = copyright = None
while i >= 0 and isinstance(doc[i], nodes.section):
title_words = doc[i][0].astext().lower().split()
if 'references' in title_words:
refsect = doc[i]
break
elif 'copyright' in title_words:
copyright = i
i -= 1
if not refsect:
refsect = nodes.section()
refsect += nodes.title('', 'References')
doc.set_id(refsect)
if copyright:
# Put the new "References" section before "Copyright":
doc.insert(copyright, refsect)
else:
# Put the new "References" section at end of doc:
doc.append(refsect)
pending = nodes.pending(references.TargetNotes)
refsect.append(pending)
self.document.note_pending(pending, 0)
pending = nodes.pending(misc.CallBack,
details={'callback': self.cleanup_callback})
refsect.append(pending)
self.document.note_pending(pending, 1)
def cleanup_callback(self, pending):
"""
Remove an empty "References" section.
Called after the `references.TargetNotes` transform is complete.
"""
if len(pending.parent) == 2: # <title> and <pending>
pending.parent.parent.remove(pending.parent)
class PEPZero(Transform):
"""
Special processing for PEP 0.
"""
default_priority =760
def apply(self):
visitor = PEPZeroSpecial(self.document)
self.document.walk(visitor)
self.startnode.parent.remove(self.startnode)
class PEPZeroSpecial(nodes.SparseNodeVisitor):
"""
Perform the special processing needed by PEP 0:
- Mask email addresses.
- Link PEP numbers in the second column of 4-column tables to the PEPs
themselves.
"""
pep_url = Headers.pep_url
def unknown_visit(self, node):
pass
def visit_reference(self, node):
node.replace_self(mask_email(node))
def visit_field_list(self, node):
if 'rfc2822' in node['classes']:
raise nodes.SkipNode
def visit_tgroup(self, node):
self.pep_table = node['cols'] == 4
self.entry = 0
def visit_colspec(self, node):
self.entry += 1
if self.pep_table and self.entry == 2:
node['classes'].append('num')
def visit_row(self, node):
self.entry = 0
def visit_entry(self, node):
self.entry += 1
if self.pep_table and self.entry == 2 and len(node) == 1:
node['classes'].append('num')
p = node[0]
if isinstance(p, nodes.paragraph) and len(p) == 1:
text = p.astext()
try:
pep = int(text)
ref = (self.document.settings.pep_base_url
+ self.pep_url % pep)
p[0] = nodes.reference(text, text, refuri=ref)
except ValueError:
pass
non_masked_addresses = ('peps@python.org',
'python-list@python.org',
'python-dev@python.org')
def mask_email(ref, pepno=None):
"""
Mask the email address in `ref` and return a replacement node.
`ref` is returned unchanged if it contains no email address.
For email addresses such as "user@host", mask the address as "user at
host" (text) to thwart simple email address harvesters (except for those
listed in `non_masked_addresses`). If a PEP number (`pepno`) is given,
return a reference including a default email subject.
"""
if ref.hasattr('refuri') and ref['refuri'].startswith('mailto:'):
if ref['refuri'][8:] in non_masked_addresses:
replacement = ref[0]
else:
replacement_text = ref.astext().replace('@', ' at ')
replacement = nodes.raw('', replacement_text, format='html')
if pepno is None:
return replacement
else:
ref['refuri'] += '?subject=PEP%%20%s' % pepno
ref[:] = [replacement]
return ref
else:
return ref
|
BBELAOUCHA/dMRIParcellation | refs/heads/master | MNNparcellation/inc/__init__.py | 1 |
"""include files for MNNParcellation
"""
from .CSParcellation import Parcellation
from .Prepare_tractogram import Parcellation_data
from .Python2Vtk import ReadVtk2Python, WritePython2Vtk
from .Region_preparation import Mesh, Regions_processing
from .Similarity_Measures import Correlation_SM, Ruzicka_SM, Cosine_SM
from .Similarity_Measures import Tanimoto_SM, Motyka_SM, Roberts_SM
from .util import cond2mat_index, mat2cond_index, vec2symmetric_mat
|
leture/Misago | refs/heads/master | misago/users/views/auth.py | 4 | from django.conf import settings
from django.contrib import auth
from django.shortcuts import redirect
from django.utils.http import is_safe_url
from django.utils.six.moves.urllib.parse import urlparse
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.debug import sensitive_post_parameters
from misago.core.embercli import is_ember_cli_request, get_embercli_host
@sensitive_post_parameters()
@never_cache
@csrf_protect
def login(request):
if request.method == 'POST':
redirect_to = request.POST.get('redirect_to')
if redirect_to:
is_redirect_safe = is_safe_url(
url=redirect_to, host=request.get_host())
if not is_redirect_safe and is_ember_cli_request(request):
is_redirect_safe = is_safe_url(
url=redirect_to, host=get_embercli_host())
if is_redirect_safe:
redirect_to_path = urlparse(redirect_to).path
return redirect(redirect_to_path)
return redirect(settings.LOGIN_REDIRECT_URL)
@never_cache
@csrf_protect
def logout(request):
if request.method == 'POST' and request.user.is_authenticated():
auth.logout(request)
return redirect(settings.LOGIN_REDIRECT_URL)
|
DarioIriberri/MyriadSwitcher | refs/heads/master | src/difficulty/sources/P2pool.py | 1 | __author__ = 'Dario'
from DataSource import DataSource
URL_DIFFICULTIES = "http://myriad.p2pool.geek.nz/api/network.php"
class P2pool(DataSource):
def __init__(self, parent, queue, timeout):
DataSource.__init__(self, parent, queue, timeout)
self.objDiff = None
def fetchDifficulties(self):
self.objDiff = self.fetchDataURL(URL_DIFFICULTIES)
return self.objDiff
def getScryptDifficulty(self):
return self.objDiff["MYR"]["Algorithms"][1]["Difficulty"]
def getGroestlDifficulty(self):
return self.objDiff["MYR"]["Algorithms"][2]["Difficulty"]
def getSkeinDifficulty(self):
return self.objDiff["MYR"]["Algorithms"][3]["Difficulty"]
def getQubitDifficulty(self):
return self.objDiff["MYR"]["Algorithms"][4]["Difficulty"]
|
trunca/enigma2 | refs/heads/6.5 | lib/python/Components/Renderer/SingleEpgList.py | 49 | from Components.VariableText import VariableText
from enigma import eLabel, eEPGCache
from Renderer import Renderer
from time import localtime
class SingleEpgList(Renderer, VariableText):
def __init__(self):
Renderer.__init__(self)
VariableText.__init__(self)
self.epgcache = eEPGCache.getInstance()
GUI_WIDGET = eLabel
def changed(self, what):
event = self.source.event
if event is None:
self.text = ""
return
service = self.source.service
text = ""
evt = None
if self.epgcache is not None:
evt = self.epgcache.lookupEvent(['IBDCT', (service.toString(), 0, -1, -1)])
if evt:
maxx = 0
for x in evt:
if maxx > 0:
if x[4]:
t = localtime(x[1])
text = text + "%02d:%02d %s\n" % (t[3], t[4], x[4])
else:
text = text + "n/a\n"
maxx += 1
if maxx > 4:
break
self.text = text
|
msdx321/android_kernel_samsung_heroXqltechn | refs/heads/tw-6.0 | lazy-prebuilt/aarch64-linux-android-4.9/share/gdb/python/gdb/__init__.py | 83 | # Copyright (C) 2010-2014 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import traceback
import os
import sys
import _gdb
if sys.version_info[0] > 2:
# Python 3 moved "reload"
from imp import reload
from _gdb import *
class _GdbFile (object):
# These two are needed in Python 3
encoding = "UTF-8"
errors = "strict"
def close(self):
# Do nothing.
return None
def isatty(self):
return False
def writelines(self, iterable):
for line in iterable:
self.write(line)
def flush(self):
flush()
class GdbOutputFile (_GdbFile):
def write(self, s):
write(s, stream=STDOUT)
sys.stdout = GdbOutputFile()
class GdbOutputErrorFile (_GdbFile):
def write(self, s):
write(s, stream=STDERR)
sys.stderr = GdbOutputErrorFile()
# Default prompt hook does nothing.
prompt_hook = None
# Ensure that sys.argv is set to something.
# We do not use PySys_SetArgvEx because it did not appear until 2.6.6.
sys.argv = ['']
# Initial pretty printers.
pretty_printers = []
# Initial type printers.
type_printers = []
# Initial frame filters.
frame_filters = {}
# Convenience variable to GDB's python directory
PYTHONDIR = os.path.dirname(os.path.dirname(__file__))
# Auto-load all functions/commands.
# Packages to auto-load.
packages = [
'function',
'command'
]
# pkgutil.iter_modules is not available prior to Python 2.6. Instead,
# manually iterate the list, collating the Python files in each module
# path. Construct the module name, and import.
def auto_load_packages():
for package in packages:
location = os.path.join(os.path.dirname(__file__), package)
if os.path.exists(location):
py_files = filter(lambda x: x.endswith('.py')
and x != '__init__.py',
os.listdir(location))
for py_file in py_files:
# Construct from foo.py, gdb.module.foo
modname = "%s.%s.%s" % ( __name__, package, py_file[:-3] )
try:
if modname in sys.modules:
# reload modules with duplicate names
reload(__import__(modname))
else:
__import__(modname)
except:
sys.stderr.write (traceback.format_exc() + "\n")
auto_load_packages()
def GdbSetPythonDirectory(dir):
"""Update sys.path, reload gdb and auto-load packages."""
global PYTHONDIR
try:
sys.path.remove(PYTHONDIR)
except ValueError:
pass
sys.path.insert(0, dir)
PYTHONDIR = dir
# note that reload overwrites the gdb module without deleting existing
# attributes
reload(__import__(__name__))
auto_load_packages()
|
tallypokemap/PokeAlarm | refs/heads/local | PokeAlarm/Alarm.py | 1 | # Standard Library Imports
import time
import traceback
# 3rd Party Imports
# Local Imports
##################################################### ATTENTION! #####################################################
# You DO NOT NEED to edit this file to customize messages for services! Please see the Wiki on the correct way to
# customize services In fact, doing so will likely NOT work correctly with many features included in PokeAlarm.
# PLEASE ONLY EDIT IF YOU KNOW WHAT YOU ARE DOING!
##################################################### ATTENTION! #####################################################
# This is a basic interface for the Alarms Modules to implement
class Alarm(object):
_defaults = { # These are the default values for the 'Alerts'
"pokemon": {},
"lures": {},
"gyms": {}
}
# Gather settings and create alarm
def __init__(self):
raise NotImplementedError("This is an abstract method.")
# (Re)establishes Service connection
def connect(self):
raise NotImplementedError("This is an abstract method.")
# (Re)establishes Service connection
def startup_message(self):
raise NotImplementedError("This is an abstract method.")
# Set the appropriate settings for each alert
def create_alert_settings(self, settings, default):
raise NotImplementedError("This is an abstract method.")
# Send Alert to the Service
def send_alert(self, alert_settings, info):
raise NotImplementedError("This is an abstract method.")
# Trigger an alert based on Pokemon info
def pokemon_alert(self, pokemon_info):
raise NotImplementedError("This is an abstract method.")
# Trigger an alert based on PokeLure info
def pokestop_alert(self, pokelure_info):
raise NotImplementedError("This is an abstract method.")
# Trigger an alert based on PokeGym info
def gym_alert(self, pokegym_info):
raise NotImplementedError("This is an abstract method.")
# Trigger an alert when a raid egg has spawned (UPCOMING raid event)
def raid_egg_alert(self, pokeraid_info):
raise NotImplementedError("Raid Egg is not implemented!")
# Trigger an alert when a raid egg is HATCHED (active raid)
def raid_alert(self, pokeraid_info):
raise NotImplementedError('Raid Alert is not implemented.')
# Return a version of the string with the correct substitutions made
@staticmethod
def replace(string, pkinfo):
if string is None:
return None
s = string.encode('utf-8')
for key in pkinfo:
s = s.replace("<{}>".format(key), str(pkinfo[key]))
return s
# Attempts to send the alert with the specified args, reconnecting if neccesary
@staticmethod
def try_sending(log, reconnect, name, send_alert, args, max_attempts=3):
for i in range(max_attempts):
try:
send_alert(**args)
return # message sent successfully
except Exception as e:
log.error("Encountered error while sending notification ({}: {})".format(type(e).__name__, e))
log.debug("Stack trace: \n {}".format(traceback.format_exc()))
log.info("{} is having connection issues. {} attempt of {}.".format(name, i+1, max_attempts))
time.sleep(3)
reconnect()
log.error("Could not send notification... Giving up.")
|
dkubiak789/odoo | refs/heads/8.0 | addons/product/report/product_pricelist.py | 341 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv
from openerp.report import report_sxw
class product_pricelist(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(product_pricelist, self).__init__(cr, uid, name, context=context)
self.pricelist=False
self.quantity=[]
self.localcontext.update({
'time': time,
'get_pricelist': self._get_pricelist,
'get_currency': self._get_currency,
'get_categories': self._get_categories,
'get_price': self._get_price,
'get_titles': self._get_titles,
})
def _get_titles(self, form):
lst = []
vals = {}
qtys = 1
for i in range(1,6):
if form['qty'+str(i)]!=0:
vals['qty'+str(qtys)] = str(form['qty'+str(i)]) + ' units'
qtys += 1
lst.append(vals)
return lst
def _set_quantity(self, form):
for i in range(1,6):
q = 'qty%d'%i
if form[q] >0 and form[q] not in self.quantity:
self.quantity.append(form[q])
else:
self.quantity.append(0)
return True
def _get_pricelist(self, pricelist_id):
pricelist = self.pool.get('product.pricelist').read(self.cr, self.uid, [pricelist_id], ['name'], context=self.localcontext)[0]
return pricelist['name']
def _get_currency(self, pricelist_id):
pricelist = self.pool.get('product.pricelist').read(self.cr, self.uid, [pricelist_id], ['currency_id'], context=self.localcontext)[0]
return pricelist['currency_id'][1]
def _get_categories(self, products, form):
cat_ids=[]
res=[]
self.pricelist = form['price_list']
self._set_quantity(form)
pro_ids=[]
for product in products:
pro_ids.append(product.id)
if product.categ_id.id not in cat_ids:
cat_ids.append(product.categ_id.id)
cats = self.pool.get('product.category').name_get(self.cr, self.uid, cat_ids, context=self.localcontext)
if not cats:
return res
for cat in cats:
product_ids=self.pool.get('product.product').search(self.cr, self.uid, [('id', 'in', pro_ids), ('categ_id', '=', cat[0])], context=self.localcontext)
products = []
for product in self.pool.get('product.product').read(self.cr, self.uid, product_ids, ['name', 'code'], context=self.localcontext):
val = {
'id':product['id'],
'name':product['name'],
'code':product['code']
}
i = 1
for qty in self.quantity:
if qty == 0:
val['qty'+str(i)] = 0.0
else:
val['qty'+str(i)]=self._get_price(self.pricelist, product['id'], qty)
i += 1
products.append(val)
res.append({'name':cat[1],'products': products})
return res
def _get_price(self, pricelist_id, product_id, qty):
sale_price_digits = self.get_digits(dp='Product Price')
pricelist = self.pool.get('product.pricelist').browse(self.cr, self.uid, [pricelist_id], context=self.localcontext)[0]
price_dict = self.pool.get('product.pricelist').price_get(self.cr, self.uid, [pricelist_id], product_id, qty, context=self.localcontext)
if price_dict[pricelist_id]:
price = self.formatLang(price_dict[pricelist_id], digits=sale_price_digits, currency_obj=pricelist.currency_id)
else:
res = self.pool.get('product.product').read(self.cr, self.uid, [product_id])
price = self.formatLang(res[0]['list_price'], digits=sale_price_digits, currency_obj=pricelist.currency_id)
return price
class report_product_pricelist(osv.AbstractModel):
_name = 'report.product.report_pricelist'
_inherit = 'report.abstract_report'
_template = 'product.report_pricelist'
_wrapped_report_class = product_pricelist
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
acemgaming/iotvision_final | refs/heads/master | Webserver/node_modules/socket.io/node_modules/socket.io-client/node_modules/engine.io-client/node_modules/engine.io-parser/node_modules/utf8/tests/generate-test-data.py | 1788 | #!/usr/bin/env python
import re
import json
# https://mathiasbynens.be/notes/javascript-encoding#surrogate-formulae
# http://stackoverflow.com/a/13436167/96656
def unisymbol(codePoint):
if codePoint >= 0x0000 and codePoint <= 0xFFFF:
return unichr(codePoint)
elif codePoint >= 0x010000 and codePoint <= 0x10FFFF:
highSurrogate = int((codePoint - 0x10000) / 0x400) + 0xD800
lowSurrogate = int((codePoint - 0x10000) % 0x400) + 0xDC00
return unichr(highSurrogate) + unichr(lowSurrogate)
else:
return 'Error'
def hexify(codePoint):
return 'U+' + hex(codePoint)[2:].upper().zfill(6)
def writeFile(filename, contents):
print filename
with open(filename, 'w') as f:
f.write(contents.strip() + '\n')
data = []
for codePoint in range(0x000000, 0x10FFFF + 1):
# Skip non-scalar values.
if codePoint >= 0xD800 and codePoint <= 0xDFFF:
continue
symbol = unisymbol(codePoint)
# http://stackoverflow.com/a/17199950/96656
bytes = symbol.encode('utf8').decode('latin1')
data.append({
'codePoint': codePoint,
'decoded': symbol,
'encoded': bytes
});
jsonData = json.dumps(data, sort_keys=False, indent=2, separators=(',', ': '))
# Use tabs instead of double spaces for indentation
jsonData = jsonData.replace(' ', '\t')
# Escape hexadecimal digits in escape sequences
jsonData = re.sub(
r'\\u([a-fA-F0-9]{4})',
lambda match: r'\u{}'.format(match.group(1).upper()),
jsonData
)
writeFile('data.json', jsonData)
|
funtoo/portage-funtoo | refs/heads/2013-06-07 | pym/portage/util/_eventloop/PollSelectAdapter.py | 4 | # Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from .PollConstants import PollConstants
import select
class PollSelectAdapter(object):
"""
Use select to emulate a poll object, for
systems that don't support poll().
"""
def __init__(self):
self._registered = {}
self._select_args = [[], [], []]
def register(self, fd, *args):
"""
Only POLLIN is currently supported!
"""
if len(args) > 1:
raise TypeError(
"register expected at most 2 arguments, got " + \
repr(1 + len(args)))
eventmask = PollConstants.POLLIN | \
PollConstants.POLLPRI | PollConstants.POLLOUT
if args:
eventmask = args[0]
self._registered[fd] = eventmask
self._select_args = None
def unregister(self, fd):
self._select_args = None
del self._registered[fd]
def poll(self, *args):
if len(args) > 1:
raise TypeError(
"poll expected at most 2 arguments, got " + \
repr(1 + len(args)))
timeout = None
if args:
timeout = args[0]
select_args = self._select_args
if select_args is None:
select_args = [list(self._registered), [], []]
if timeout is not None:
select_args = select_args[:]
# Translate poll() timeout args to select() timeout args:
#
# | units | value(s) for indefinite block
# ---------|--------------|------------------------------
# poll | milliseconds | omitted, negative, or None
# ---------|--------------|------------------------------
# select | seconds | omitted
# ---------|--------------|------------------------------
if timeout is not None and timeout < 0:
timeout = None
if timeout is not None:
select_args.append(float(timeout) / 1000)
select_events = select.select(*select_args)
poll_events = []
for fd in select_events[0]:
poll_events.append((fd, PollConstants.POLLIN))
return poll_events
|
ArcEye/MK-Qt5 | refs/heads/master | nosetests/test_netcmd.py | 11 | #!/usr/bin/env python
from nose import with_setup
from machinekit.nosetests.realtime import setup_module,teardown_module
from machinekit.nosetests.support import fnear
from machinekit.nosetests.rtapilog import Log
from machinekit import hal,rtapi
import os
l = Log(level=rtapi.MSG_INFO,tag="nosetest")
def test_component_creation():
l.log()
global c1,c2
c1 = hal.Component("c1")
c1.newpin("s32out", hal.HAL_S32, hal.HAL_OUT, init=42)
c1.newpin("s32in", hal.HAL_S32, hal.HAL_IN)
c1.newpin("s32io", hal.HAL_S32, hal.HAL_IO)
c1.newpin("floatout", hal.HAL_FLOAT, hal.HAL_OUT, init=42)
c1.newpin("floatin", hal.HAL_FLOAT, hal.HAL_IN)
c1.newpin("floatio", hal.HAL_FLOAT, hal.HAL_IO)
c1.ready()
c2 = hal.Component("c2")
c2.newpin("s32out", hal.HAL_S32, hal.HAL_OUT, init=4711)
c2.newpin("s32in", hal.HAL_S32, hal.HAL_IN)
c2.newpin("s32io", hal.HAL_S32, hal.HAL_IO)
c2.newpin("floatout", hal.HAL_FLOAT, hal.HAL_OUT, init=4711)
c2.newpin("floatin", hal.HAL_FLOAT, hal.HAL_IN)
c2.newpin("floatio", hal.HAL_FLOAT, hal.HAL_IO)
c2.ready()
def test_net_existing_signal_with_bad_type():
l.log()
hal.newsig("f", hal.HAL_FLOAT)
try:
hal.net("f", "c1.s32out")
raise "should not happen"
except TypeError:
pass
del hal.signals["f"]
assert 'f' not in hal.signals
def test_net_match_nonexistant_signals():
l.log()
try:
hal.net("nosuchsig", "c1.s32out", "c2.s32out")
raise "should not happen"
except TypeError:
pass
def test_net_pin2pin():
l.log()
# out to in is okay
hal.net("c1.s32out", "c2.s32in")
assert hal.pins["c1.s32out"].linked is True
assert hal.pins["c2.s32in"].linked is True
assert 'c1-s32out' in hal.signals
# cleanup
hal.pins["c1.s32out"].unlink()
hal.pins["c2.s32in"].unlink()
del hal.signals['c1-s32out']
assert 'c1-s32out' not in hal.signals
try:
hal.net("c2.s32out", "c1.s32out")
# TypeError: net: signal 'c2-s32out' can not add writer pin 'c1.s32out', it already has HAL_OUT pin 'c2.s32out
except TypeError:
pass
# cleanup
hal.pins["c2.s32out"].unlink()
del hal.signals['c2-s32out']
assert 'c2-s32out' not in hal.signals
def test_net_existing_signal():
l.log()
hal.newsig("s32", hal.HAL_S32)
assert hal.pins["c1.s32out"].linked is False
hal.net("s32", "c1.s32out")
assert hal.pins["c1.s32out"].linked is True
hal.newsig("s32too", hal.HAL_S32)
try:
hal.net("s32too", "c1.s32out")
raise "should not happen"
except RuntimeError:
pass
del hal.signals["s32"]
assert 's32' not in hal.signals
def test_newsig():
l.log()
hal.newsig("floatsig1", hal.HAL_FLOAT)
try:
hal.newsig("floatsig1", hal.HAL_FLOAT)
# RuntimeError: Failed to create signal floatsig1: HAL: ERROR: duplicate signal 'floatsig1'
raise "should not happen"
except RuntimeError:
pass
try:
hal.newsig(32423 * 32432, hal.HAL_FLOAT)
raise "should not happen"
except TypeError:
pass
try:
hal.newsig(None, hal.HAL_FLOAT)
raise "should not happen"
except TypeError:
pass
try:
hal.newsig("badtype", 1234)
raise "should not happen"
except TypeError:
pass
def test_check_net_args():
l.log()
try:
hal.net()
except TypeError:
pass
assert 'c1-s32out' not in hal.signals
try:
hal.net(None, "c1.s32out")
except TypeError:
pass
assert 'c1-s32out' not in hal.signals
# single pin argument
assert hal.pins["c1.s32out"].linked is False
try:
hal.net("c1.s32out")
# RuntimeError: net: at least one pin name expected
except RuntimeError:
pass
# XXX die beiden gehen daneben:
# der pin wird trotz runtime error gelinkt und das signal erzeugt:
# offensichtlich hal_net.pyx:39 vor dem test in hal_net.pyx:60
assert hal.pins["c1.s32out"].linked is False
assert 'c1-s32out' not in hal.signals
# single signal argument
try:
hal.net("noexiste")
# RuntimeError: net: at least one pin name expected
except RuntimeError:
pass
# two signals
hal.newsig('sig1', hal.HAL_FLOAT)
hal.newsig('sig2', hal.HAL_FLOAT)
try:
hal.net('sig1', 'sig2')
# NameError: no such pin: sig2
except NameError:
pass
assert "noexiste" not in hal.signals
hal.net("noexiste", "c1.s32out")
assert "noexiste" in hal.signals
ne = hal.signals["noexiste"]
assert ne.writers == 1
assert ne.readers == 0
assert ne.bidirs == 0
try:
hal.net("floatsig1", "c1.s32out")
raise "should not happen"
except RuntimeError:
pass
(lambda s=__import__('signal'):
s.signal(s.SIGTERM, s.SIG_IGN))()
|
h2educ/scikit-learn | refs/heads/master | sklearn/utils/metaestimators.py | 283 | """Utilities for meta-estimators"""
# Author: Joel Nothman
# Andreas Mueller
# Licence: BSD
from operator import attrgetter
from functools import update_wrapper
__all__ = ['if_delegate_has_method']
class _IffHasAttrDescriptor(object):
"""Implements a conditional property using the descriptor protocol.
Using this class to create a decorator will raise an ``AttributeError``
if the ``attribute_name`` is not present on the base object.
This allows ducktyping of the decorated method based on ``attribute_name``.
See https://docs.python.org/3/howto/descriptor.html for an explanation of
descriptors.
"""
def __init__(self, fn, attribute_name):
self.fn = fn
self.get_attribute = attrgetter(attribute_name)
# update the docstring of the descriptor
update_wrapper(self, fn)
def __get__(self, obj, type=None):
# raise an AttributeError if the attribute is not present on the object
if obj is not None:
# delegate only on instances, not the classes.
# this is to allow access to the docstrings.
self.get_attribute(obj)
# lambda, but not partial, allows help() to work with update_wrapper
out = lambda *args, **kwargs: self.fn(obj, *args, **kwargs)
# update the docstring of the returned function
update_wrapper(out, self.fn)
return out
def if_delegate_has_method(delegate):
"""Create a decorator for methods that are delegated to a sub-estimator
This enables ducktyping by hasattr returning True according to the
sub-estimator.
>>> from sklearn.utils.metaestimators import if_delegate_has_method
>>>
>>>
>>> class MetaEst(object):
... def __init__(self, sub_est):
... self.sub_est = sub_est
...
... @if_delegate_has_method(delegate='sub_est')
... def predict(self, X):
... return self.sub_est.predict(X)
...
>>> class HasPredict(object):
... def predict(self, X):
... return X.sum(axis=1)
...
>>> class HasNoPredict(object):
... pass
...
>>> hasattr(MetaEst(HasPredict()), 'predict')
True
>>> hasattr(MetaEst(HasNoPredict()), 'predict')
False
"""
return lambda fn: _IffHasAttrDescriptor(fn, '%s.%s' % (delegate, fn.__name__))
|
rickerc/neutron_audit | refs/heads/cis-havana-staging | neutron/agent/rpc.py | 16 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
from neutron.common import topics
from neutron.openstack.common import log as logging
from neutron.openstack.common import rpc
from neutron.openstack.common.rpc import proxy
from neutron.openstack.common import timeutils
LOG = logging.getLogger(__name__)
def create_consumers(dispatcher, prefix, topic_details):
"""Create agent RPC consumers.
:param dispatcher: The dispatcher to process the incoming messages.
:param prefix: Common prefix for the plugin/agent message queues.
:param topic_details: A list of topics. Each topic has a name, an
operation, and an optional host param keying the
subscription to topic.host for plugin calls.
:returns: A common Connection.
"""
connection = rpc.create_connection(new=True)
for details in topic_details:
topic, operation, node_name = itertools.islice(
itertools.chain(details, [None]), 3)
topic_name = topics.get_topic_name(prefix, topic, operation)
connection.create_consumer(topic_name, dispatcher, fanout=True)
if node_name:
node_topic_name = '%s.%s' % (topic_name, node_name)
connection.create_consumer(node_topic_name,
dispatcher,
fanout=False)
connection.consume_in_thread()
return connection
class PluginReportStateAPI(proxy.RpcProxy):
BASE_RPC_API_VERSION = '1.0'
def __init__(self, topic):
super(PluginReportStateAPI, self).__init__(
topic=topic, default_version=self.BASE_RPC_API_VERSION)
def report_state(self, context, agent_state, use_call=False):
msg = self.make_msg('report_state',
agent_state={'agent_state':
agent_state},
time=timeutils.strtime())
if use_call:
return self.call(context, msg, topic=self.topic)
else:
return self.cast(context, msg, topic=self.topic)
class PluginApi(proxy.RpcProxy):
'''Agent side of the rpc API.
API version history:
1.0 - Initial version.
'''
BASE_RPC_API_VERSION = '1.1'
def __init__(self, topic):
super(PluginApi, self).__init__(
topic=topic, default_version=self.BASE_RPC_API_VERSION)
def get_device_details(self, context, device, agent_id):
return self.call(context,
self.make_msg('get_device_details', device=device,
agent_id=agent_id),
topic=self.topic)
def update_device_down(self, context, device, agent_id, host=None):
return self.call(context,
self.make_msg('update_device_down', device=device,
agent_id=agent_id, host=host),
topic=self.topic)
def update_device_up(self, context, device, agent_id, host=None):
return self.call(context,
self.make_msg('update_device_up', device=device,
agent_id=agent_id, host=host),
topic=self.topic)
def tunnel_sync(self, context, tunnel_ip, tunnel_type=None):
return self.call(context,
self.make_msg('tunnel_sync', tunnel_ip=tunnel_ip,
tunnel_type=tunnel_type),
topic=self.topic)
|
Bachaco-ve/odoo | refs/heads/8.0 | addons/hr_contract/hr_contract.py | 302 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
class hr_employee(osv.osv):
_name = "hr.employee"
_description = "Employee"
_inherit = "hr.employee"
def _get_latest_contract(self, cr, uid, ids, field_name, args, context=None):
res = {}
obj_contract = self.pool.get('hr.contract')
for emp in self.browse(cr, uid, ids, context=context):
contract_ids = obj_contract.search(cr, uid, [('employee_id','=',emp.id),], order='date_start', context=context)
if contract_ids:
res[emp.id] = contract_ids[-1:][0]
else:
res[emp.id] = False
return res
def _contracts_count(self, cr, uid, ids, field_name, arg, context=None):
Contract = self.pool['hr.contract']
return {
employee_id: Contract.search_count(cr, SUPERUSER_ID, [('employee_id', '=', employee_id)], context=context)
for employee_id in ids
}
_columns = {
'manager': fields.boolean('Is a Manager'),
'medic_exam': fields.date('Medical Examination Date'),
'place_of_birth': fields.char('Place of Birth'),
'children': fields.integer('Number of Children'),
'vehicle': fields.char('Company Vehicle'),
'vehicle_distance': fields.integer('Home-Work Dist.', help="In kilometers"),
'contract_ids': fields.one2many('hr.contract', 'employee_id', 'Contracts'),
'contract_id': fields.function(_get_latest_contract, string='Contract', type='many2one', relation="hr.contract", help='Latest contract of the employee'),
'contracts_count': fields.function(_contracts_count, type='integer', string='Contracts'),
}
class hr_contract_type(osv.osv):
_name = 'hr.contract.type'
_description = 'Contract Type'
_columns = {
'name': fields.char('Contract Type', required=True),
}
class hr_contract(osv.osv):
_name = 'hr.contract'
_description = 'Contract'
_columns = {
'name': fields.char('Contract Reference', required=True),
'employee_id': fields.many2one('hr.employee', "Employee", required=True),
'department_id': fields.related('employee_id','department_id', type='many2one', relation='hr.department', string="Department", readonly=True),
'type_id': fields.many2one('hr.contract.type', "Contract Type", required=True),
'job_id': fields.many2one('hr.job', 'Job Title'),
'date_start': fields.date('Start Date', required=True),
'date_end': fields.date('End Date'),
'trial_date_start': fields.date('Trial Start Date'),
'trial_date_end': fields.date('Trial End Date'),
'working_hours': fields.many2one('resource.calendar','Working Schedule'),
'wage': fields.float('Wage', digits=(16,2), required=True, help="Basic Salary of the employee"),
'advantages': fields.text('Advantages'),
'notes': fields.text('Notes'),
'permit_no': fields.char('Work Permit No', required=False, readonly=False),
'visa_no': fields.char('Visa No', required=False, readonly=False),
'visa_expire': fields.date('Visa Expire Date'),
}
def _get_type(self, cr, uid, context=None):
type_ids = self.pool.get('hr.contract.type').search(cr, uid, [('name', '=', 'Employee')])
return type_ids and type_ids[0] or False
_defaults = {
'date_start': lambda *a: time.strftime("%Y-%m-%d"),
'type_id': _get_type
}
def onchange_employee_id(self, cr, uid, ids, employee_id, context=None):
if not employee_id:
return {'value': {'job_id': False}}
emp_obj = self.pool.get('hr.employee').browse(cr, uid, employee_id, context=context)
job_id = False
if emp_obj.job_id:
job_id = emp_obj.job_id.id
return {'value': {'job_id': job_id}}
def _check_dates(self, cr, uid, ids, context=None):
for contract in self.read(cr, uid, ids, ['date_start', 'date_end'], context=context):
if contract['date_start'] and contract['date_end'] and contract['date_start'] > contract['date_end']:
return False
return True
_constraints = [
(_check_dates, 'Error! Contract start-date must be less than contract end-date.', ['date_start', 'date_end'])
]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
YOTOV-LIMITED/kuma | refs/heads/master | vendor/packages/translate/lang/es.py | 26 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2007 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""This module represents the Spanish language.
.. note:: As it only has special case code for initial inverted punctuation,
it could also be used for Asturian, Galician, or Catalan.
"""
from translate.lang import common
class es(common.Common):
"""This class represents Spanish."""
@classmethod
def punctranslate(cls, text):
"""Implement some extra features for inverted punctuation.
"""
text = super(cls, cls).punctranslate(text)
# If the first sentence ends with ? or !, prepend inverted ¿ or ¡
firstmatch = cls.sentencere.match(text)
if firstmatch is None:
# only one sentence (if any) - use entire string
first = text
else:
first = firstmatch.group()
# remove trailing whitespace
first = first.strip()
# protect against incorrectly handling an empty string
if not first:
return text
if first[-1] == '?':
text = u"¿" + text
elif first[-1] == '!':
text = u"¡" + text
return text
|
jmartinm/invenio | refs/heads/master | modules/webjournal/lib/webjournal_web_tests.py | 27 | # -*- coding: utf-8 -*-
## This file is part of Invenio.
## Copyright (C) 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""WebJournal module web tests."""
from invenio.config import CFG_SITE_SECURE_URL
from invenio.testutils import make_test_suite, \
run_test_suite, \
InvenioWebTestCase
class InvenioWebJournalWebTest(InvenioWebTestCase):
"""WebJournal web tests."""
def test_admin_restrictions(self):
"""webjournal - web test admin restrictions"""
self.browser.get(CFG_SITE_SECURE_URL + '/admin/webjournal/webjournaladmin.py?ln=en')
self.page_source_test(expected_text='Authorization failure')
self.browser.get(CFG_SITE_SECURE_URL + '/admin/webjournal/webjournaladmin.py/administrate?journal_name=AtlantisTimes&ln=en')
self.page_source_test(expected_text='Authorization failure')
self.browser.get(CFG_SITE_SECURE_URL + '/admin/webjournal/webjournaladmin.py/configure?action=add&ln=en')
self.page_source_test(expected_text='Authorization failure')
self.browser.get(CFG_SITE_SECURE_URL + '/admin/webjournal/webjournaladmin.py/index?journal_name=AtlantisTimes&action=askDelete&ln=en')
self.page_source_test(expected_text='Authorization failure')
self.browser.get(CFG_SITE_SECURE_URL + '/admin/webjournal/webjournaladmin.py/index?journal_name=AtlantisTimes&action=delete&ln=en')
self.page_source_test(expected_text='Authorization failure')
self.browser.get(CFG_SITE_SECURE_URL + '/admin/webjournal/webjournaladmin.py/configure?action=edit&journal_name=AtlantisTimes&ln=en')
self.page_source_test(expected_text='Authorization failure')
self.browser.get(CFG_SITE_SECURE_URL + '/admin/webjournal/webjournaladmin.py/feature_record?journal_name=AtlantisTimes&ln=en')
self.page_source_test(expected_text='Authorization failure')
self.browser.get(CFG_SITE_SECURE_URL + '/admin/webjournal/webjournaladmin.py/feature_record?journal_name=AtlantisTimes&action=askremove&recid=7&ln=en')
self.page_source_test(expected_text='Authorization failure')
self.browser.get(CFG_SITE_SECURE_URL + '/admin/webjournal/webjournaladmin.py/feature_record?ln=en')
self.page_source_test(expected_text='Authorization failure')
# Access WebJournal URLs in read-only mode
self.browser.get(CFG_SITE_SECURE_URL + '/admin/webjournal/webjournaladmin.py?ln=en')
self.page_source_test(expected_text='Authorization failure')
# login as juliet
self.login(username="juliet", password="j123uliet")
self.page_source_test(unexpected_text='delete')
self.find_element_by_link_text_with_timeout("AtlantisTimes")
self.browser.find_element_by_link_text("AtlantisTimes").click()
self.page_source_test(expected_text='regenerate',
unexpected_text=['Feature a Record', 'Edit Configuration', 'release now', 'announce now'])
self.browser.get(CFG_SITE_SECURE_URL + '/admin/webjournal/webjournaladmin.py/configure?action=add&ln=en')
self.page_source_test(expected_text='Authorization failure')
self.browser.get(CFG_SITE_SECURE_URL + '/admin/webjournal/webjournaladmin.py/index?journal_name=AtlantisTimes&action=askDelete&ln=en')
self.page_source_test(expected_text='Authorization failure')
self.browser.get(CFG_SITE_SECURE_URL + '/admin/webjournal/webjournaladmin.py/index?journal_name=AtlantisTimes&action=delete&ln=en')
self.page_source_test(expected_text='Authorization failure')
self.browser.get(CFG_SITE_SECURE_URL + '/admin/webjournal/webjournaladmin.py/configure?action=edit&journal_name=AtlantisTimes&ln=en')
self.page_source_test(expected_text='Authorization failure')
self.browser.get(CFG_SITE_SECURE_URL + '/admin/webjournal/webjournaladmin.py/feature_record?journal_name=AtlantisTimes&ln=en')
self.page_source_test(expected_text='Authorization failure')
self.browser.get(CFG_SITE_SECURE_URL + '/admin/webjournal/webjournaladmin.py/feature_record?journal_name=AtlantisTimes&action=askremove&recid=7&ln=en')
self.page_source_test(expected_text='Authorization failure')
self.browser.get(CFG_SITE_SECURE_URL + '/admin/webjournal/webjournaladmin.py/feature_record?ln=en')
self.page_source_test(expected_text='Authorization failure')
self.logout()
# Access WebJournal URLs as fully authorized editor
self.browser.get(CFG_SITE_SECURE_URL + '/admin/webjournal/webjournaladmin.py?ln=en')
# login as balthasar
self.login(username="balthasar", password="b123althasar")
self.page_source_test(expected_text='delete')
self.find_element_by_link_text_with_timeout("AtlantisTimes")
self.browser.find_element_by_link_text("AtlantisTimes").click()
self.page_source_test(expected_text=['Feature a Record', 'Edit Configuration',
'regenerate', 'release now', 'announce now'])
self.browser.get(CFG_SITE_SECURE_URL + '/admin/webjournal/webjournaladmin.py/configure?action=add&ln=en')
self.page_source_test(unexpected_text='Authorization failure')
self.browser.get(CFG_SITE_SECURE_URL + '/admin/webjournal/webjournaladmin.py/index?journal_name=AtlantisTimes&action=askDelete&ln=en')
self.page_source_test(unexpected_text='Authorization failure')
self.browser.get(CFG_SITE_SECURE_URL + '/admin/webjournal/webjournaladmin.py/configure?action=edit&journal_name=AtlantisTimes&ln=en')
self.page_source_test(unexpected_text='Authorization failure')
self.browser.get(CFG_SITE_SECURE_URL + '/admin/webjournal/webjournaladmin.py/feature_record?journal_name=AtlantisTimes&ln=en')
self.page_source_test(unexpected_text='Authorization failure')
self.browser.get(CFG_SITE_SECURE_URL + '/admin/webjournal/webjournaladmin.py/feature_record?ln=en')
self.page_source_test(unexpected_text='Authorization failure')
self.logout()
TEST_SUITE = make_test_suite(InvenioWebJournalWebTest, )
if __name__ == '__main__':
run_test_suite(TEST_SUITE, warn_user=True)
|
colinsullivan/AuditionManager | refs/heads/master | auditionapp/auditions/tests.py | 1940 | """
This file demonstrates two different styles of tests (one doctest and one
unittest). These will both pass when you run "manage.py test".
Replace these with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.failUnlessEqual(1 + 1, 2)
__test__ = {"doctest": """
Another way to test that 1 + 1 is equal to 2.
>>> 1 + 1 == 2
True
"""}
|
scorphus/django | refs/heads/master | tests/template_tests/filter_tests/test_truncatechars.py | 428 | from django.test import SimpleTestCase
from ..utils import setup
class TruncatecharsTests(SimpleTestCase):
@setup({'truncatechars01': '{{ a|truncatechars:5 }}'})
def test_truncatechars01(self):
output = self.engine.render_to_string('truncatechars01', {'a': 'Testing, testing'})
self.assertEqual(output, 'Te...')
@setup({'truncatechars02': '{{ a|truncatechars:7 }}'})
def test_truncatechars02(self):
output = self.engine.render_to_string('truncatechars02', {'a': 'Testing'})
self.assertEqual(output, 'Testing')
|
ScreamingUdder/mantid | refs/heads/master | scripts/Inelastic/IndirectCommon.py | 1 | # pylint: disable=invalid-name,redefined-builtin
from __future__ import (absolute_import, division, print_function)
from six.moves import range
import mantid.simpleapi as s_api
from mantid import config, logger
import os.path
import math
import datetime
import re
import numpy as np
def StartTime(prog):
logger.notice('----------')
message = 'Program ' + prog + ' started @ ' + str(datetime.datetime.now())
logger.notice(message)
def EndTime(prog):
message = 'Program ' + prog + ' ended @ ' + str(datetime.datetime.now())
logger.notice(message)
logger.notice('----------')
def get_run_number(ws_name):
"""
Gets the run number for a given workspace.
Attempts to get from logs and falls back to parsing the workspace name for
something that looks like a run number.
@param ws_name Name of workspace
@return Parsed run number
"""
workspace = s_api.mtd[ws_name]
run_number = str(workspace.getRunNumber())
if run_number == '0':
# Attempt to parse run number off of name
match = re.match(r'([a-zA-Z]+)([0-9]+)', ws_name)
if match:
run_number = match.group(2)
else:
raise RuntimeError("Could not find run number associated with workspace.")
return run_number
def getInstrRun(ws_name):
"""
Get the instrument name and run number from a workspace.
@param ws_name - name of the workspace
@return tuple of form (instrument, run number)
"""
run_number = get_run_number(ws_name)
instrument = s_api.mtd[ws_name].getInstrument().getName()
if instrument != '':
for facility in config.getFacilities():
try:
instrument = facility.instrument(instrument).filePrefix(int(run_number))
instrument = instrument.lower()
break
except RuntimeError:
continue
return instrument, run_number
def getWSprefix(wsname):
"""
Returns a string of the form '<ins><run>_<analyser><refl>_' on which
all of our other naming conventions are built. The workspace is used to get the
instrument parameters.
"""
if wsname == '':
return ''
workspace = s_api.mtd[wsname]
facility = config['default.facility']
ws_run = workspace.getRun()
if 'facility' in ws_run:
facility = ws_run.getLogData('facility').value
(instrument, run_number) = getInstrRun(wsname)
if facility == 'ILL':
run_name = instrument + '_' + run_number
else:
run_name = instrument + run_number
try:
analyser = workspace.getInstrument().getStringParameter('analyser')[0]
reflection = workspace.getInstrument().getStringParameter('reflection')[0]
except IndexError:
analyser = ''
reflection = ''
prefix = run_name + '_' + analyser + reflection
if len(analyser + reflection) > 0:
prefix += '_'
return prefix
def getEfixed(workspace):
if isinstance(workspace, str):
inst = s_api.mtd[workspace].getInstrument()
else:
inst = workspace.getInstrument()
if inst.hasParameter('Efixed'):
return inst.getNumberParameter('EFixed')[0]
if inst.hasParameter('analyser'):
analyser_name = inst.getStringParameter('analyser')[0]
analyser_comp = inst.getComponentByName(analyser_name)
if analyser_comp is not None and analyser_comp.hasParameter('Efixed'):
return analyser_comp.getNumberParameter('EFixed')[0]
raise ValueError('No Efixed parameter found')
def checkUnitIs(ws, unit_id, axis_index=0):
"""
Check that the workspace has the correct units by comparing
against the UnitID.
"""
axis = s_api.mtd[ws].getAxis(axis_index)
unit = axis.getUnit()
return unit.unitID() == unit_id
def getDefaultWorkingDirectory():
"""
Get the default save directory and check it's valid.
"""
workdir = config['defaultsave.directory']
if not os.path.isdir(workdir):
raise IOError("Default save directory is not a valid path!")
return workdir
def createQaxis(inputWS):
result = []
workspace = s_api.mtd[inputWS]
num_hist = workspace.getNumberHistograms()
if workspace.getAxis(1).isSpectra():
inst = workspace.getInstrument()
sample_pos = inst.getSample().getPos()
beam_pos = sample_pos - inst.getSource().getPos()
for i in range(0, num_hist):
efixed = getEfixed(inputWS)
detector = workspace.getDetector(i)
theta = detector.getTwoTheta(sample_pos, beam_pos) / 2
lamda = math.sqrt(81.787 / efixed)
q = 4 * math.pi * math.sin(theta) / lamda
result.append(q)
else:
axis = workspace.getAxis(1)
msg = 'Creating Axis based on Detector Q value: '
if not axis.isNumeric():
msg += 'Input workspace must have either spectra or numeric axis.'
raise ValueError(msg)
if axis.getUnit().unitID() != 'MomentumTransfer':
msg += 'Input must have axis values of Q'
raise ValueError(msg)
for i in range(0, num_hist):
result.append(float(axis.label(i)))
return result
def GetWSangles(inWS):
num_hist = s_api.mtd[inWS].getNumberHistograms() # get no. of histograms/groups
source_pos = s_api.mtd[inWS].getInstrument().getSource().getPos()
sample_pos = s_api.mtd[inWS].getInstrument().getSample().getPos()
beam_pos = sample_pos - source_pos
angles = [] # will be list of angles
for index in range(0, num_hist):
detector = s_api.mtd[inWS].getDetector(index) # get index
two_theta = detector.getTwoTheta(sample_pos, beam_pos) * 180.0 / math.pi # calc angle
angles.append(two_theta) # add angle
return angles
def GetThetaQ(ws):
"""
Returns the theta and elastic Q for each spectrum in a given workspace.
@param ws Workspace to get theta and Q for
@returns A tuple containing a list of theta values and a list of Q values
"""
e_fixed = getEfixed(ws)
wavelas = math.sqrt(81.787 / e_fixed) # Elastic wavelength
k0 = 4.0 * math.pi / wavelas
axis = s_api.mtd[ws].getAxis(1)
# If axis is in spec number need to retrieve angles and calculate Q
if axis.isSpectra():
theta = np.array(GetWSangles(ws))
q = k0 * np.sin(0.5 * np.radians(theta))
# If axis is in Q need to calculate back to angles and just return axis values
elif axis.isNumeric() and axis.getUnit().unitID() == 'MomentumTransfer':
q_bin_edge = axis.extractValues()
q = list()
for i in range(1, len(q_bin_edge)):
q_centre = ((q_bin_edge[i] - q_bin_edge[i - 1]) / 2) + q_bin_edge[i - 1]
q.append(q_centre)
np_q = np.array(q)
theta = 2.0 * np.degrees(np.arcsin(np_q / k0))
# Out of options here
else:
raise RuntimeError('Cannot get theta and Q for workspace %s' % ws)
return theta, q
def ExtractFloat(data_string):
"""
Extract float values from an ASCII string
"""
values = data_string.split()
values = [float(v) for v in values]
return values
def ExtractInt(data_string):
"""
Extract int values from an ASCII string
"""
values = data_string.split()
values = [int(v) for v in values]
return values
def PadArray(inarray, nfixed):
"""
Pad a list to specified size.
"""
npt = len(inarray)
padding = nfixed - npt
outarray = []
outarray.extend(inarray)
outarray += [0] * padding
return outarray
def CheckAnalysers(in1WS, in2WS):
"""
Check workspaces have identical analysers and reflections
Args:
@param in1WS - first 2D workspace
@param in2WS - second 2D workspace
Returns:
@return None
Raises:
@exception Valuerror - workspaces have different analysers
@exception Valuerror - workspaces have different reflections
"""
ws1 = s_api.mtd[in1WS]
try:
analyser_1 = ws1.getInstrument().getStringParameter('analyser')[0]
reflection_1 = ws1.getInstrument().getStringParameter('reflection')[0]
except IndexError:
raise RuntimeError('Could not find analyser or reflection for workspace %s' % in1WS)
ws2 = s_api.mtd[in2WS]
try:
analyser_2 = ws2.getInstrument().getStringParameter('analyser')[0]
reflection_2 = ws2.getInstrument().getStringParameter('reflection')[0]
except:
raise RuntimeError('Could not find analyser or reflection for workspace %s' % in2WS)
if analyser_1 != analyser_2:
raise ValueError('Workspace %s and %s have different analysers' % (ws1, ws2))
elif reflection_1 != reflection_2:
raise ValueError('Workspace %s and %s have different reflections' % (ws1, ws2))
else:
logger.information('Analyser is %s, reflection %s' % (analyser_1, reflection_1))
def CheckHistZero(inWS):
"""
Retrieves basic info on a worskspace
Checks the workspace is not empty, then returns the number of histogram and
the number of X-points, which is the number of bin boundaries minus one
Args:
@param inWS 2D workspace
Returns:
@return num_hist - number of histograms in the workspace
@return ntc - number of X-points in the first histogram, which is the number of bin
boundaries minus one. It is assumed all histograms have the same
number of X-points.
Raises:
@exception ValueError - Worskpace has no histograms
"""
num_hist = s_api.mtd[inWS].getNumberHistograms() # no. of hist/groups in WS
if num_hist == 0:
raise ValueError('Workspace ' + inWS + ' has NO histograms')
x_in = s_api.mtd[inWS].readX(0)
ntc = len(x_in) - 1 # no. points from length of x array
if ntc == 0:
raise ValueError('Workspace ' + inWS + ' has NO points')
return num_hist, ntc
def CheckHistSame(in1WS, name1, in2WS, name2):
"""
Check workspaces have same number of histograms and bin boundaries
Args:
@param in1WS - first 2D workspace
@param name1 - single-word descriptor of first 2D workspace
@param in2WS - second 2D workspace
@param name2 - single-word descriptor of second 2D workspace
Returns:
@return None
Raises:
Valuerror: number of histograms is different
Valuerror: number of bin boundaries in the histograms is different
"""
num_hist_1 = s_api.mtd[in1WS].getNumberHistograms() # no. of hist/groups in WS1
x_1 = s_api.mtd[in1WS].readX(0)
x_len_1 = len(x_1)
num_hist_2 = s_api.mtd[in2WS].getNumberHistograms() # no. of hist/groups in WS2
x_2 = s_api.mtd[in2WS].readX(0)
x_len_2 = len(x_2)
if num_hist_1 != num_hist_2: # Check that no. groups are the same
error_1 = '%s (%s) histograms (%d)' % (name1, in1WS, num_hist_1)
error_2 = '%s (%s) histograms (%d)' % (name2, in2WS, num_hist_2)
error = error_1 + ' not = ' + error_2
raise ValueError(error)
elif x_len_1 != x_len_2:
error_1 = '%s (%s) array length (%d)' % (name1, in1WS, x_len_1)
error_2 = '%s (%s) array length (%d)' % (name2, in2WS, x_len_2)
error = error_1 + ' not = ' + error_2
raise ValueError(error)
def CheckXrange(x_range, range_type):
if not ((len(x_range) == 2) or (len(x_range) == 4)):
raise ValueError(range_type + ' - Range must contain either 2 or 4 numbers')
for lower, upper in zip(x_range[::2], x_range[1::2]):
if math.fabs(lower) < 1e-5:
raise ValueError('%s - input minimum (%f) is zero' % (range_type, lower))
if math.fabs(upper) < 1e-5:
raise ValueError('%s - input maximum (%f) is zero' % (range_type, upper))
if upper < lower:
raise ValueError('%s - input maximum (%f) < minimum (%f)' % (range_type, upper, lower))
def CheckElimits(erange, Xin):
len_x = len(Xin) - 1
if math.fabs(erange[0]) < 1e-5:
raise ValueError('Elimits - input emin (%f) is Zero' % (erange[0]))
if erange[0] < Xin[0]:
raise ValueError('Elimits - input emin (%f) < data emin (%f)' % (erange[0], Xin[0]))
if math.fabs(erange[1]) < 1e-5:
raise ValueError('Elimits - input emax (%f) is Zero' % (erange[1]))
if erange[1] > Xin[len_x]:
raise ValueError('Elimits - input emax (%f) > data emax (%f)' % (erange[1], Xin[len_x]))
if erange[1] < erange[0]:
raise ValueError('Elimits - input emax (%f) < emin (%f)' % (erange[1], erange[0]))
def getInstrumentParameter(ws, param_name):
"""Get an named instrument parameter from a workspace.
Args:
@param ws The workspace to get the instrument from.
@param param_name The name of the parameter to look up.
"""
inst = s_api.mtd[ws].getInstrument()
# Create a map of type parameters to functions. This is so we avoid writing lots of
# if statements becuase there's no way to dynamically get the type.
func_map = {'double': inst.getNumberParameter, 'string': inst.getStringParameter,
'int': inst.getIntParameter, 'bool': inst.getBoolParameter}
if inst.hasParameter(param_name):
param_type = inst.getParameterType(param_name)
if param_type != '':
param = func_map[param_type](param_name)[0]
else:
raise ValueError('Unable to retrieve %s from Instrument Parameter file.' % param_name)
else:
raise ValueError('Unable to retrieve %s from Instrument Parameter file.' % param_name)
return param
def convertToElasticQ(input_ws, output_ws=None):
"""
Helper function to convert the spectrum axis of a sample to ElasticQ.
@param input_ws - the name of the workspace to convert from
@param output_ws - the name to call the converted workspace
"""
if output_ws is None:
output_ws = input_ws
axis = s_api.mtd[input_ws].getAxis(1)
if axis.isSpectra():
e_fixed = getEfixed(input_ws)
s_api.ConvertSpectrumAxis(input_ws, Target='ElasticQ', EMode='Indirect', EFixed=e_fixed,
OutputWorkspace=output_ws)
elif axis.isNumeric():
# Check that units are Momentum Transfer
if axis.getUnit().unitID() != 'MomentumTransfer':
raise RuntimeError('Input must have axis values of Q')
s_api.CloneWorkspace(input_ws, OutputWorkspace=output_ws)
else:
raise RuntimeError('Input workspace must have either spectra or numeric axis.')
def transposeFitParametersTable(params_table, output_table=None):
"""
Transpose the parameter table created from a multi domain Fit.
This function will make the output consistent with PlotPeakByLogValue.
@param params_table - the parameter table output from Fit.
@param output_table - name to call the transposed table. If omitted,
the output_table will be the same as the params_table
"""
params_table = s_api.mtd[params_table]
table_ws = '__tmp_table_ws'
table_ws = s_api.CreateEmptyTableWorkspace(OutputWorkspace=table_ws)
param_names = params_table.column(0)[:-1] # -1 to remove cost function
param_values = params_table.column(1)[:-1]
param_errors = params_table.column(2)[:-1]
# Find the number of parameters per function
func_index = param_names[0].split('.')[0]
num_params = 0
for i, name in enumerate(param_names):
if name.split('.')[0] != func_index:
num_params = i
break
# Create columns with parameter names for headers
column_names = ['.'.join(name.split('.')[1:]) for name in param_names[:num_params]]
column_error_names = [name + '_Err' for name in column_names]
column_names = list(zip(column_names, column_error_names))
table_ws.addColumn('double', 'axis-1')
for name, error_name in column_names:
table_ws.addColumn('double', name)
table_ws.addColumn('double', error_name)
# Output parameter values to table row
for i in range(0, params_table.rowCount() - 1, num_params):
row_values = param_values[i:i + num_params]
row_errors = param_errors[i:i + num_params]
row = [value for pair in zip(row_values, row_errors) for value in pair]
row = [i / num_params] + row
table_ws.addRow(row)
if output_table is None:
output_table = params_table.name()
s_api.RenameWorkspace(table_ws.name(), OutputWorkspace=output_table)
def IndentifyDataBoundaries(sample_ws):
"""
Indentifies and returns the first and last no zero data point in a workspace
For multiple workspace spectra, the data points that are closest to the centre
out of all the spectra in the workspace are returned
"""
sample_ws = s_api.mtd[sample_ws]
nhists = sample_ws.getNumberHistograms()
start_data_idx, end_data_idx = 0, 0
# For all spectra in the workspace
for spectra in range(0, nhists):
# Obtain first and last non zero values
y_data = sample_ws.readY(spectra)
spectra_start_data = firstNonZero(y_data)
spectra_end_data = firstNonZero(list(reversed(y_data)))
# Replace workspace start and end if data is closer to the center
if spectra_start_data > start_data_idx:
start_data_idx = spectra_start_data
if spectra_end_data > end_data_idx:
end_data_idx = spectra_end_data
# Convert Bin index to data value
x_data = sample_ws.readX(0)
first_data_point = x_data[start_data_idx]
last_data_point = x_data[len(x_data) - end_data_idx - 2]
return first_data_point, last_data_point
def firstNonZero(data):
"""
Returns the index of the first non zero value in the list
"""
for i in range(len(data)):
if data[i] != 0:
return i
def formatRuns(runs, instrument_name):
"""
:return: A list of runs prefixed with the given instrument name
"""
run_list = []
for run in runs:
if '-' in run:
a, b = run.split('-')
run_list.extend(range(int(a), int(b) + 1))
else:
try:
run_list.append(int(run))
except:
# run already has instrument eg 'osi1000'
run_list.append(run)
for idx, run in enumerate(run_list):
if type(run) == int:
run_list[idx] = instrument_name + str(run)
return run_list
|
paolodedios/tensorflow | refs/heads/master | tensorflow/python/data/kernel_tests/range_test.py | 5 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.range()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.kernel_tests import checkpoint_test_base
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import combinations
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.platform import test
class RangeTest(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(output_type=[
dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64
])))
def testStop(self, output_type):
stop = 5
dataset = dataset_ops.Dataset.range(stop, output_type=output_type)
expected_output = np.arange(stop, dtype=output_type.as_numpy_dtype)
self.assertDatasetProduces(dataset, expected_output=expected_output)
self.assertEqual(output_type, dataset_ops.get_legacy_output_types(dataset))
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(output_type=[
dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64
])))
def testStartStop(self, output_type):
start, stop = 2, 5
dataset = dataset_ops.Dataset.range(start, stop, output_type=output_type)
expected_output = np.arange(start, stop, dtype=output_type.as_numpy_dtype)
self.assertDatasetProduces(dataset, expected_output=expected_output)
self.assertEqual(output_type, dataset_ops.get_legacy_output_types(dataset))
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(output_type=[
dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64
])))
def testStartStopStep(self, output_type):
start, stop, step = 2, 10, 2
dataset = dataset_ops.Dataset.range(
start, stop, step, output_type=output_type)
expected_output = np.arange(
start, stop, step, dtype=output_type.as_numpy_dtype)
self.assertDatasetProduces(dataset, expected_output=expected_output)
self.assertEqual(output_type, dataset_ops.get_legacy_output_types(dataset))
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(output_type=[
dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64
])))
def testZeroStep(self, output_type):
start, stop, step = 2, 10, 0
with self.assertRaises(errors.InvalidArgumentError):
dataset = dataset_ops.Dataset.range(
start, stop, step, output_type=output_type)
self.evaluate(dataset._variant_tensor)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(output_type=[
dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64
])))
def testNegativeStep(self, output_type):
start, stop, step = 2, 10, -1
dataset = dataset_ops.Dataset.range(
start, stop, step, output_type=output_type)
expected_output = np.arange(
start, stop, step, dtype=output_type.as_numpy_dtype)
self.assertDatasetProduces(dataset, expected_output=expected_output)
self.assertEqual(output_type, dataset_ops.get_legacy_output_types(dataset))
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(output_type=[
dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64
])))
def testStopLessThanStart(self, output_type):
start, stop = 10, 2
dataset = dataset_ops.Dataset.range(start, stop, output_type=output_type)
expected_output = np.arange(start, stop, dtype=output_type.as_numpy_dtype)
self.assertDatasetProduces(dataset, expected_output=expected_output)
self.assertEqual(output_type, dataset_ops.get_legacy_output_types(dataset))
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(output_type=[
dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64
])))
def testStopLessThanStartWithPositiveStep(self, output_type):
start, stop, step = 10, 2, 2
dataset = dataset_ops.Dataset.range(
start, stop, step, output_type=output_type)
expected_output = np.arange(
start, stop, step, dtype=output_type.as_numpy_dtype)
self.assertDatasetProduces(dataset, expected_output=expected_output)
self.assertEqual(output_type, dataset_ops.get_legacy_output_types(dataset))
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(output_type=[
dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64
])))
def testStopLessThanStartWithNegativeStep(self, output_type):
start, stop, step = 10, 2, -1
dataset = dataset_ops.Dataset.range(
start, stop, step, output_type=output_type)
expected_output = np.arange(
start, stop, step, dtype=output_type.as_numpy_dtype)
self.assertDatasetProduces(dataset, expected_output=expected_output)
self.assertEqual(output_type, dataset_ops.get_legacy_output_types(dataset))
class RangeCheckpointTest(checkpoint_test_base.CheckpointTestBase,
parameterized.TestCase):
def _build_range_dataset(self, start, stop):
return dataset_ops.Dataset.range(start, stop)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations()))
def test(self, verify_fn):
start = 2
stop = 10
verify_fn(self, lambda: self._build_range_dataset(start, stop),
stop - start)
if __name__ == "__main__":
test.main()
|
martinrusev/amonone | refs/heads/master | amon/apps/healthchecks/tests/models_test.py | 2 | import unittest
import hashlib
from datetime import datetime
from amon.apps.healthchecks.models import (
health_checks_model,
health_checks_results_model,
health_checks_api_model
)
from amon.apps.servers.models import server_model
from amon.utils.dates import unix_utc_now
class HealthChecksResultsModelTest(unittest.TestCase):
def setUp(self):
server_model.collection.insert({"name": "test"})
self.server = server_model.collection.find_one()
def tearDown(self):
self._cleanup()
def _cleanup(self):
health_checks_model.collection.remove()
health_checks_results_model.collection.remove()
server_model.collection.remove()
def save_test(self):
self._cleanup()
data = [
{u'output': u'CheckDisk WARNING: / 83.35% bytes usage (29 GiB/35 GiB)\n', u'command': u'check-disk-usage.rb -w 80 -c 90', u'exit_code': 1}
]
formated_data = health_checks_results_model.save(data=data, server=self.server)
for d in formated_data:
assert set(d.keys()) == set(['output', 'command', 'exit_code', 'health_checks_data_id'])
assert health_checks_results_model.collection.find().count() == 1
assert health_checks_model.collection.find().count() == 1
result = health_checks_model.collection.find_one()
assert result['command'] == "check-disk-usage.rb"
assert result['params'] == "-w 80 -c 90"
assert result['unique_id'] == hashlib.md5("check-disk-usage.rb -w 80 -c 90".encode()).hexdigest()
assert result['last_check']['status'] == 'warning'
self._cleanup()
for i in range(50):
health_checks_results_model.save(data=data, server=self.server)
assert health_checks_results_model.collection.find().count() == 50
assert health_checks_model.collection.find().count() == 1
result = health_checks_model.collection.find_one()
class HealthChecksModelTest(unittest.TestCase):
def _cleanup(self):
health_checks_model.collection.remove()
health_checks_results_model.collection.remove()
server_model.collection.remove()
def test_sort_and_filter(self):
self._cleanup()
server_model.collection.insert({"name": "check_sort_and_filter_default"})
server = server_model.collection.find_one()
for i in range(0, 10):
data = [{
'command': "check_sort_and_filter_default.rb",
'exit_code': 1,
'output': 'CheckDisk WARNING: / 83.35% bytes usage (29 GiB/35 GiB)'
}]
health_checks_results_model.save(data=data, server=server)
result = health_checks_model.sort_and_filter()
assert len(result.all_checks) == 1
assert result.all_checks[0]['last_check']
assert result.all_checks[0]['last_check']['status'] == 'warning'
self._cleanup()
for i in range(0, 10):
server_id = server_model.collection.insert({"name": "{0}_server_check_sort_and_filter_by_host".format(i)})
server = server_model.get_by_id(server_id)
# exit_codes = {0: "ok", 1: "warning", 2: "critical"}
exit_code = 2 if i <= 5 else 2
exit_code = 1 if i > 5 else exit_code
for j in range(0, 100):
data = [{
'command': '{0}_check_sort_and_filter_by_host.rb'.format(i),
'exit_code': exit_code,
'output': 'CheckBanner OK: port 22 open'
}]
health_checks_results_model.save(data=data, server=server)
result = health_checks_model.sort_and_filter(sort_by='status')
assert len(result.sorted_result) == 10
for i in range(0, 10):
status = 'critical' if i <= 5 else 'ok'
status = 'warning' if i > 5 else status
assert result.sorted_result[i]['last_check']['status'] == status
result = health_checks_model.sort_and_filter(sort_by='host')
assert len(result.sorted_result) == 10
for i in range(0, 10):
assert result.sorted_result[i]['server']['name'] == "{0}_server_check_sort_and_filter_by_host".format(i)
result = health_checks_model.sort_and_filter(sort_by='host', filter_by='critical')
assert len(result.sorted_result) == 6
result = health_checks_model.sort_and_filter(sort_by='host', filter_by='warning')
assert len(result.sorted_result) == 4
def test_save(self):
self._cleanup()
server_id = server_model.collection.insert({"name": "server_check_sort_and_filter_by_host"})
server = server_model.get_by_id(server_id)
command = "testmehere"
for i in range(0, 10):
health_checks_model.save(command=command, server=server)
assert health_checks_model.collection.find().count() == 1
def test_delete(self):
self._cleanup()
server_id = server_model.collection.insert({"name": "server_check_sort_and_filter_by_host"})
server = server_model.get_by_id(server_id)
command = "testmehere"
for i in range(0, 5):
health_checks_model.save(command=command, server=server)
result = health_checks_model.collection.count()
check = health_checks_model.collection.find_one()
assert result == 1
health_checks_model.delete(check_id=check['_id'])
result = health_checks_model.collection.count()
assert result == 0
def tearDown(self):
health_checks_model.collection.remove()
class HealthChecksAPIModelTest(unittest.TestCase):
def _cleanup(self):
health_checks_model.collection.remove()
health_checks_results_model.collection.remove()
server_model.collection.remove()
def test_get_commands_for_server(self):
self._cleanup()
server_id = server_model.collection.insert({"name": "server_check_sort_and_filter_by_host"})
server = server_model.get_by_id(server_id)
command = "testmehere -w 10"
for i in range(0, 10):
health_checks_model.save(command=command, server=server)
second_command = "testmeagain -c 10"
for i in range(0, 5):
health_checks_model.save(command=second_command, server=server)
result = health_checks_api_model.get_commands_for_server(server_id=server['_id'])
assert result.count() == 2
def test_get_unique_commands(self):
self._cleanup()
server_id = server_model.collection.insert({"name": "server_check_sort_and_filter_by_host"})
server = server_model.get_by_id(server_id)
for i in range(0, 10):
command = "testcommand{0} -w {0} -c {0}".format(i)
health_checks_model.save(command=command, server=server)
result = health_checks_api_model.get_unique_commands()
assert len(result) == 10
def test_get_params_for_command(self):
self._cleanup()
server_id = server_model.collection.insert({"name": "server_check_sort_and_filter_by_host"})
server = server_model.get_by_id(server_id)
for i in range(0, 10):
command = "testcommand -w {0} -c {0}".format(i)
health_checks_model.save(command=command, server=server)
# Duplicate - still has to return only 10 unique params
for i in range(0, 10):
command = "testcommand -w {0} -c {0}".format(i)
health_checks_model.save(command=command, server=server)
result = health_checks_api_model.get_params_for_command(command_string="testcommand")
assert len(result) == 10
|
colinnewell/odoo | refs/heads/8.0 | addons/website_project/__init__.py | 409 | import models
import controllers |
lunyang/Data-Science-45min-Intros | refs/heads/master | python-unittest-101/mathy.py | 30 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
__author__="Josh Montague"
__license__="MIT License"
class Calcs(object):
"""
Objects of type Calcs have two methods for numerical calculations, and
an attribute of 'zero' which is set to 0
"""
def __init__(self):
self.zero = 0
def add_one(self, x):
"""Return the argument incremented by 1"""
try:
return x + 1
except TypeError: # be gentle
return "And what, exactly would it mean to add one to {}?".format(x)
def square(self, x):
"""Return the square of the argument"""
return x**2 # no exception handling
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.