text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
# coding: utf-8
from __future__ import print_function, absolute_import, division, unicode_literals
import sys
from .compat import no_limit_int # NOQA
if False: # MYPY
from typing import Text, Any, Dict, List # NOQA
__all__ = ["ScalarFloat", "ExponentialFloat", "ExponentialCapsFloat"]
class ScalarFloat(float):
def __new__(cls, *args, **kw):
# type: (Any, Any, Any) -> Any
width = kw.pop('width', None) # type: ignore
prec = kw.pop('prec', None) # type: ignore
m_sign = kw.pop('m_sign', None) # type: ignore
m_lead0 = kw.pop('m_lead0', 0) # type: ignore
exp = kw.pop('exp', None) # type: ignore
e_width = kw.pop('e_width', None) # type: ignore
e_sign = kw.pop('e_sign', None) # type: ignore
underscore = kw.pop('underscore', None) # type: ignore
v = float.__new__(cls, *args, **kw) # type: ignore
v._width = width
v._prec = prec
v._m_sign = m_sign
v._m_lead0 = m_lead0
v._exp = exp
v._e_width = e_width
v._e_sign = e_sign
v._underscore = underscore
return v
def __iadd__(self, a): # type: ignore
# type: (Any) -> Any
x = type(self)(self + a)
x._width = self._width # type: ignore
x._underscore = self._underscore[:] if self._underscore is not None else None # type: ignore # NOQA
return x
def __ifloordiv__(self, a): # type: ignore
# type: (Any) -> Any
x = type(self)(self // a)
x._width = self._width # type: ignore
x._underscore = self._underscore[:] if self._underscore is not None else None # type: ignore # NOQA
return x
def __imul__(self, a): # type: ignore
# type: (Any) -> Any
x = type(self)(self * a)
x._width = self._width # type: ignore
x._underscore = self._underscore[:] if self._underscore is not None else None # type: ignore # NOQA
return x
def __ipow__(self, a): # type: ignore
# type: (Any) -> Any
x = type(self)(self ** a)
x._width = self._width # type: ignore
x._underscore = self._underscore[:] if self._underscore is not None else None # type: ignore # NOQA
return x
def __isub__(self, a): # type: ignore
# type: (Any) -> Any
x = type(self)(self - a)
x._width = self._width # type: ignore
x._underscore = self._underscore[:] if self._underscore is not None else None # type: ignore # NOQA
return x
def dump(self, out=sys.stdout):
# type: (Any) -> Any
print('ScalarFloat({}| w:{}, p:{}, s:{}, lz:{}|{}, w:{}, s:{})'.format(
self, self._width, self._prec, self._m_sign, self._m_lead0, # type: ignore
self._exp, self._e_width, self._e_sign), file=out) # type: ignore
class ExponentialFloat(ScalarFloat):
def __new__(cls, value, width=None, underscore=None):
# type: (Any, Any, Any) -> Any
return ScalarFloat.__new__(cls, value, width=width, underscore=underscore)
class ExponentialCapsFloat(ScalarFloat):
def __new__(cls, value, width=None, underscore=None):
# type: (Any, Any, Any) -> Any
return ScalarFloat.__new__(cls, value, width=width, underscore=underscore)
| Samuel789/MediPi | MedManagementWeb/env/lib/python3.5/site-packages/ruamel/yaml/scalarfloat.py | Python | apache-2.0 | 3,378 | 0.00148 |
# -*- coding: utf-8 -*-
"""
pygments.lexers.rdf
~~~~~~~~~~~~~~~~~~~
Lexers for semantic web and RDF query languages and markup.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, default
from pygments.token import Keyword, Punctuation, String, Number, Operator, Generic, \
Whitespace, Name, Literal, Comment, Text
__all__ = ['SparqlLexer', 'TurtleLexer']
class SparqlLexer(RegexLexer):
"""
Lexer for `SPARQL <http://www.w3.org/TR/rdf-sparql-query/>`_ query language.
.. versionadded:: 2.0
"""
name = 'SPARQL'
aliases = ['sparql']
filenames = ['*.rq', '*.sparql']
mimetypes = ['application/sparql-query']
# character group definitions ::
PN_CHARS_BASE_GRP = (u'a-zA-Z'
u'\u00c0-\u00d6'
u'\u00d8-\u00f6'
u'\u00f8-\u02ff'
u'\u0370-\u037d'
u'\u037f-\u1fff'
u'\u200c-\u200d'
u'\u2070-\u218f'
u'\u2c00-\u2fef'
u'\u3001-\ud7ff'
u'\uf900-\ufdcf'
u'\ufdf0-\ufffd')
PN_CHARS_U_GRP = (PN_CHARS_BASE_GRP + '_')
PN_CHARS_GRP = (PN_CHARS_U_GRP +
r'\-' +
r'0-9' +
u'\u00b7' +
u'\u0300-\u036f' +
u'\u203f-\u2040')
HEX_GRP = '0-9A-Fa-f'
PN_LOCAL_ESC_CHARS_GRP = r' _~.\-!$&"()*+,;=/?#@%'
# terminal productions ::
PN_CHARS_BASE = '[' + PN_CHARS_BASE_GRP + ']'
PN_CHARS_U = '[' + PN_CHARS_U_GRP + ']'
PN_CHARS = '[' + PN_CHARS_GRP + ']'
HEX = '[' + HEX_GRP + ']'
PN_LOCAL_ESC_CHARS = '[' + PN_LOCAL_ESC_CHARS_GRP + ']'
IRIREF = r'<(?:[^<>"{}|^`\\\x00-\x20])*>'
BLANK_NODE_LABEL = '_:[0-9' + PN_CHARS_U_GRP + '](?:[' + PN_CHARS_GRP + \
'.]*' + PN_CHARS + ')?'
PN_PREFIX = PN_CHARS_BASE + '(?:[' + PN_CHARS_GRP + '.]*' + PN_CHARS + ')?'
VARNAME = u'[0-9' + PN_CHARS_U_GRP + '][' + PN_CHARS_U_GRP + \
u'0-9\u00b7\u0300-\u036f\u203f-\u2040]*'
PERCENT = '%' + HEX + HEX
PN_LOCAL_ESC = r'\\' + PN_LOCAL_ESC_CHARS
PLX = '(?:' + PERCENT + ')|(?:' + PN_LOCAL_ESC + ')'
PN_LOCAL = ('(?:[' + PN_CHARS_U_GRP + ':0-9' + ']|' + PLX + ')' +
'(?:(?:[' + PN_CHARS_GRP + '.:]|' + PLX + ')*(?:[' +
PN_CHARS_GRP + ':]|' + PLX + '))?')
EXPONENT = r'[eE][+-]?\d+'
# Lexer token definitions ::
tokens = {
'root': [
(r'\s+', Text),
# keywords ::
(r'((?i)select|construct|describe|ask|where|filter|group\s+by|minus|'
r'distinct|reduced|from\s+named|from|order\s+by|desc|asc|limit|'
r'offset|bindings|load|clear|drop|create|add|move|copy|'
r'insert\s+data|delete\s+data|delete\s+where|delete|insert|'
r'using\s+named|using|graph|default|named|all|optional|service|'
r'silent|bind|union|not\s+in|in|as|having|to|prefix|base)\b', Keyword),
(r'(a)\b', Keyword),
# IRIs ::
('(' + IRIREF + ')', Name.Label),
# blank nodes ::
('(' + BLANK_NODE_LABEL + ')', Name.Label),
# # variables ::
('[?$]' + VARNAME, Name.Variable),
# prefixed names ::
(r'(' + PN_PREFIX + ')?(\:)(' + PN_LOCAL + ')?',
bygroups(Name.Namespace, Punctuation, Name.Tag)),
# function names ::
(r'((?i)str|lang|langmatches|datatype|bound|iri|uri|bnode|rand|abs|'
r'ceil|floor|round|concat|strlen|ucase|lcase|encode_for_uri|'
r'contains|strstarts|strends|strbefore|strafter|year|month|day|'
r'hours|minutes|seconds|timezone|tz|now|md5|sha1|sha256|sha384|'
r'sha512|coalesce|if|strlang|strdt|sameterm|isiri|isuri|isblank|'
r'isliteral|isnumeric|regex|substr|replace|exists|not\s+exists|'
r'count|sum|min|max|avg|sample|group_concat|separator)\b',
Name.Function),
# boolean literals ::
(r'(true|false)', Keyword.Constant),
# double literals ::
(r'[+\-]?(\d+\.\d*' + EXPONENT + '|\.?\d+' + EXPONENT + ')', Number.Float),
# decimal literals ::
(r'[+\-]?(\d+\.\d*|\.\d+)', Number.Float),
# integer literals ::
(r'[+\-]?\d+', Number.Integer),
# operators ::
(r'(\|\||&&|=|\*|\-|\+|/|!=|<=|>=|!|<|>)', Operator),
# punctuation characters ::
(r'[(){}.;,:^\[\]]', Punctuation),
# line comments ::
(r'#[^\n]*', Comment),
# strings ::
(r'"""', String, 'triple-double-quoted-string'),
(r'"', String, 'single-double-quoted-string'),
(r"'''", String, 'triple-single-quoted-string'),
(r"'", String, 'single-single-quoted-string'),
],
'triple-double-quoted-string': [
(r'"""', String, 'end-of-string'),
(r'[^\\]+', String),
(r'\\', String, 'string-escape'),
],
'single-double-quoted-string': [
(r'"', String, 'end-of-string'),
(r'[^"\\\n]+', String),
(r'\\', String, 'string-escape'),
],
'triple-single-quoted-string': [
(r"'''", String, 'end-of-string'),
(r'[^\\]+', String),
(r'\\', String.Escape, 'string-escape'),
],
'single-single-quoted-string': [
(r"'", String, 'end-of-string'),
(r"[^'\\\n]+", String),
(r'\\', String, 'string-escape'),
],
'string-escape': [
(r'u' + HEX + '{4}', String.Escape, '#pop'),
(r'U' + HEX + '{8}', String.Escape, '#pop'),
(r'.', String.Escape, '#pop'),
],
'end-of-string': [
(r'(@)([a-zA-Z]+(?:-[a-zA-Z0-9]+)*)',
bygroups(Operator, Name.Function), '#pop:2'),
(r'\^\^', Operator, '#pop:2'),
default('#pop:2'),
],
}
class TurtleLexer(RegexLexer):
"""
Lexer for `Turtle <http://www.w3.org/TR/turtle/>`_ data language.
.. versionadded:: 2.1
"""
name = 'Turtle'
aliases = ['turtle']
filenames = ['*.ttl']
mimetypes = ['text/turtle', 'application/x-turtle']
flags = re.IGNORECASE
patterns = {
'PNAME_NS': r'((?:[a-z][\w-]*)?\:)', # Simplified character range
'IRIREF': r'(<[^<>"{}|^`\\\x00-\x20]*>)'
}
# PNAME_NS PN_LOCAL (with simplified character range)
patterns['PrefixedName'] = r'%(PNAME_NS)s([a-z][\w-]*)' % patterns
tokens = {
'root': [
(r'\s+', Whitespace),
# Base / prefix
(r'(@base|BASE)(\s+)%(IRIREF)s(\s*)(\.?)' % patterns,
bygroups(Keyword, Whitespace, Name.Variable, Whitespace,
Punctuation)),
(r'(@prefix|PREFIX)(\s+)%(PNAME_NS)s(\s+)%(IRIREF)s(\s*)(\.?)' % patterns,
bygroups(Keyword, Whitespace, Name.Namespace, Whitespace,
Name.Variable, Whitespace, Punctuation)),
# The shorthand predicate 'a'
(r'(?<=\s)a(?=\s)', Keyword.Type),
# IRIREF
(r'%(IRIREF)s' % patterns, Name.Variable),
# PrefixedName
(r'%(PrefixedName)s' % patterns,
bygroups(Name.Namespace, Name.Tag)),
# Comment
(r'#[^\n]+', Comment),
(r'\b(true|false)\b', Literal),
(r'[+\-]?\d*\.\d+', Number.Float),
(r'[+\-]?\d*(:?\.\d+)?E[+\-]?\d+', Number.Float),
(r'[+\-]?\d+', Number.Integer),
(r'[\[\](){}.;,:^]', Punctuation),
(r'"""', String, 'triple-double-quoted-string'),
(r'"', String, 'single-double-quoted-string'),
(r"'''", String, 'triple-single-quoted-string'),
(r"'", String, 'single-single-quoted-string'),
],
'triple-double-quoted-string': [
(r'"""', String, 'end-of-string'),
(r'[^\\]+', String),
(r'\\', String, 'string-escape'),
],
'single-double-quoted-string': [
(r'"', String, 'end-of-string'),
(r'[^"\\\n]+', String),
(r'\\', String, 'string-escape'),
],
'triple-single-quoted-string': [
(r"'''", String, 'end-of-string'),
(r'[^\\]+', String),
(r'\\', String, 'string-escape'),
],
'single-single-quoted-string': [
(r"'", String, 'end-of-string'),
(r"[^'\\\n]+", String),
(r'\\', String, 'string-escape'),
],
'string-escape': [
(r'.', String, '#pop'),
],
'end-of-string': [
(r'(@)([a-z]+(:?-[a-z0-9]+)*)',
bygroups(Operator, Generic.Emph), '#pop:2'),
(r'(\^\^)%(IRIREF)s' % patterns, bygroups(Operator, Generic.Emph), '#pop:2'),
(r'(\^\^)%(PrefixedName)s' % patterns,
bygroups(Operator, Generic.Emph, Generic.Emph), '#pop:2'),
default('#pop:2'),
],
}
| wandb/client | wandb/vendor/pygments/lexers/rdf.py | Python | mit | 9,398 | 0.00117 |
from __future__ import print_function
# Time: O(n)
# Space: O(1)
#
# Given a sorted linked list, delete all nodes that have duplicate numbers,
# leaving only distinct numbers from the original list.
#
# For example,
# Given 1->2->3->3->4->4->5, return 1->2->5.
# Given 1->1->1->2->3, return 2->3.
#
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def __repr__(self):
if self is None:
return "Nil"
else:
return "{} -> {}".format(self.val, repr(self.next))
class Solution(object):
def deleteDuplicates(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
dummy = ListNode(0)
pre, cur = dummy, head
while cur:
if cur.next and cur.next.val == cur.val:
val = cur.val;
while cur and cur.val == val:
cur = cur.next
pre.next = cur
else:
pre.next = cur
pre = cur
cur = cur.next
return dummy.next
if __name__ == "__main__":
head, head.next, head.next.next = ListNode(1), ListNode(2), ListNode(3)
head.next.next.next, head.next.next.next.next = ListNode(3), ListNode(4)
head.next.next.next.next.next, head.next.next.next.next.next.next = ListNode(4), ListNode(5)
print(Solution().deleteDuplicates(head))
| tudennis/LeetCode---kamyu104-11-24-2015 | Python/remove-duplicates-from-sorted-list-ii.py | Python | mit | 1,452 | 0.004132 |
# Copyright (c) 2012 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Module containing the wiki service (WikiService class).
This service is used to convert some text to HTML. The converting
process, its rules, are described in the class documentation.
"""
import re
from service import Service
class WikiService(Service):
"""Class describing the wiki service.
This service is made to manipulate text and convert it to HTML format.
The converting rules are highly customizable by inheriting from this
class. Most of the constants used to convert text to HTML
are created in the constructor. Therefore, others may be added
very simply.
Here is a short example that shows how to add a new markup
to the wiki syntax. You can add a new 'wiki.py' file in your
'services' package placed in your bundle, and then paste the following
code into the file:
>>> from ext.aboard.service.default import WikiService
>>> class wiki(WikiService) # It's important to name -that class Wiki
... def __init__(self):
... Wikiservice.__init__(self)
... self.add_markup("bold", "b", "<strong>{string}</strong>")
You can also change the delimiters for the markup, add new regular
expressions, delete markups, etc. Have a look at the class methods.
"""
name = "wiki"
def __init__(self, start="<", end=">", close="/"):
"""Service constructor."""
Service.__init__(self)
self.markup_delimiter_start = start
self.markup_delimiter_end = end
self.markup_delimiter_close = close
self.expressions = []
self.exceptions = []
# Add the exceptions
self.add_except_expression("@", "@")
self.add_except_markup("pre")
# Add the expressions and markups
self.add_expression("italic", "/(.*?)/", "<em>\\1</em>")
self.add_expression("bold", r"\*(.*?)\*", "<strong>\\1</strong>")
self.add_expression(
"header1",
r"^(\s*)h1\.\s+(.*?)\s*$",
r"\1<h1>\2</h1>",
re.MULTILINE
)
# Test (o delete)
text = """
h1. A test
This is some text with *that* in bold,
But @*this part* h1. should@ not be interpreted at all.
<pre>
This one is a *long*
non /interpreted/ text, somehow.</pre>
and, finally, /this should be in italic/ and *bold*.
Well, @that *one* again@.
"""
def add_expression(self, name, regexp, replacement, options=0):
"""Add a new regular expression.
This methods automatically compiles the given regular expression and
adds the result to the self.expressions list.
Expected arguments:
name -- the name of the expression (see below)
regexp -- the regular expression which will be compiled
options [optionnal] -- the regular expression options.
An expression name should be a unique identifier. It's mostlu used
to replace an expression (if a developer decides to change the
rule to create bold text, for instance, he will use this identifier).
"""
name = name.lower()
names = [line[0] for line in self.expressions]
if name in names:
raise ValueError("the identifier {} already exists in the " \
"expression list. Use the 'replace_expression' " \
"method to replace it".format(repr(name)))
compiled = re.compile(regexp, options)
self.expressions.append((name, compiled, replacement))
def replace_expressions(self, name, regexp, replacement, options=0):
"""Replace an existing expression using its identifier.
The expected arguments are the same as the 'add_expression' method.
Instead of simply adding a new expression, though, it first delete
the expression with the name. This is very useful to define a new
rule for certain formatting.
"""
name = name.lower()
names = [line[0] for line in self.expressions]
if name not in names:
raise ValueError("the identifier {} doesn't exists in the " \
"expression list. Use the 'add_expression' " \
"method to add it".format(repr(name)))
compiled = re.compile(regexp, options)
exp_pos = names.find(name)
del self.expressions[exp_pos]
self.expressions.insert(exp_pos, (name, compiled, replacement))
def remove_expression(self, name):
"""Remove the expression identified by its name."""
name = name.lower()
names = [line[0] for line in self.expressions]
if name not in names:
raise ValueError("the identifier {} doesn't exists in the " \
"expression list.".format(repr(name)))
exp_pos = names.find(name)
del self.expressions[exp_pos]
def add_except_expression(self, start, end, options=0):
"""Add an expression for a Wiki exception.
Exceptions are not interpreted. If this expression is found, it is
deleted and its content (the second group) is copied into a
temporary field and paste in the original text, unchanged, at the end of the process.
"""
self.exceptions.append((start, end, options))
def add_markup(self, name, markup, html):
"""Add a new markup.
A wiki markup is by default close to a HTML markup. It should
begin with > (<), end with < (>). To close the markup
after the text to select, it use another > followed
by /, the markup and the < symbol.
These three symbols (markup_delimiter_start, markup_delimiter_end
and markup_delimiter_close) are instance attributes and can be
set in the constructor of a subclass. this allows to
set new markup symbols, brackets for instance.
Note: the 'html' parameter should contain the '{string}'
sub-string to identify a replacement. For instance:
>>> wiki.add_markup("italic", "i", "<em>{string}</em>")
That code will allow text like:
We <i>made</i> it!
To:
We <em>made</em> it!
"""
start = self.markup_delimiter_start
end = self.markup_delimiter_end
close = self.markup_delimiter_close
regexp = start + markup + end + "(.*?)" + start + close + markup + end
replacement = html.format(string="\\1")
self.add_expression(name, regexp, replacement)
def replace_markup(self, name, markup, html):
"""Replace the identified by markup.
The expected arguments are the same ones as the 'add_markup' method.
The markup name has to exist, though.
"""
start = self.markup_delimiter_start
end = self.markup_delimiter_end
close = self.markup_delimiter_close
regexp = start + markup + end + "(.*?)" + start + close + markup + end
replacement = html.format(string="\\1")
self.replace_expression(name, regexp, replacement)
def remove_markup(self, name):
"""Remove the markup."""
self.remove_expression(name)
def add_except_markup(self, markup):
"""Add a markup exception."""
start = self.markup_delimiter_start
end = self.markup_delimiter_end
close = self.markup_delimiter_close
markup_start = start + markup + end
markup_end = start + close + markup + end
self.add_except_expression(markup_start, markup_end, re.DOTALL)
def convert_text(self, text):
"""Return the HTML text converted from the text argument."""
raw_text = self.get_raw_text(text)
raw_text = raw_text.replace("{", "{{").replace("}", "}}")
# First remove the exceptions
raw_exceptions = {}
tmp_exceptions = []
def replace(match):
name = "exp_" + str(i) + "_" + str(len(tmp_exceptions))
tmp_exceptions.append(None)
return "{" + name + "}"
for i, (start, end, opts) in enumerate(self.exceptions):
tmp_exceptions = []
s_regexp = start + "(.*?)" + end
r_regexp = "(" + start + ".*?" + end + ")"
for j, content in enumerate(re.findall(s_regexp, raw_text, opts)):
name = "exp_" + str(i) + "_" + str(j)
raw_exceptions[name] = content
raw_text = re.sub(r_regexp, replace, raw_text, flags=opts)
for name, regexp, replacement in self.expressions:
raw_text = regexp.sub(replacement, raw_text)
return raw_text.format(**raw_exceptions)
@staticmethod
def get_raw_text(text):
"""Escape the HTML characters."""
to_esc = {
"<": "<",
">": ">",
}
for car, repl in to_esc.items():
text = text.replace(car, repl)
return text
| v-legoff/pa-poc3 | src/service/default/wiki.py | Python | bsd-3-clause | 10,806 | 0.005552 |
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17.
Copyright (c) 2015 Robert Pooley
Copyright (c) 2007, 2008 Johannes Berg
Copyright (c) 2007 Andy Lutomirski
Copyright (c) 2007 Mike Kershaw
Copyright (c) 2008-2009 Luis R. Rodriguez
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
from libnl.attr import nla_policy, NLA_U16, NLA_U32, NLA_U64, NLA_U8
from libnl.misc import c_int8, c_uint8, SIZEOF_S8, SIZEOF_U8
from libnl.nl80211 import nl80211
from libnl.nl80211.iw_util import ampdu_space, get_ht_capability, get_ht_mcs, get_ssid
WLAN_CAPABILITY_ESS = 1 << 0
WLAN_CAPABILITY_IBSS = 1 << 1
WLAN_CAPABILITY_CF_POLLABLE = 1 << 2
WLAN_CAPABILITY_CF_POLL_REQUEST = 1 << 3
WLAN_CAPABILITY_PRIVACY = 1 << 4
WLAN_CAPABILITY_SHORT_PREAMBLE = 1 << 5
WLAN_CAPABILITY_PBCC = 1 << 6
WLAN_CAPABILITY_CHANNEL_AGILITY = 1 << 7
WLAN_CAPABILITY_SPECTRUM_MGMT = 1 << 8
WLAN_CAPABILITY_QOS = 1 << 9
WLAN_CAPABILITY_SHORT_SLOT_TIME = 1 << 10
WLAN_CAPABILITY_APSD = 1 << 11
WLAN_CAPABILITY_RADIO_MEASURE = 1 << 12
WLAN_CAPABILITY_DSSS_OFDM = 1 << 13
WLAN_CAPABILITY_DEL_BACK = 1 << 14
WLAN_CAPABILITY_IMM_BACK = 1 << 15
# DMG (60gHz) 802.11ad
WLAN_CAPABILITY_DMG_TYPE_MASK = 3 << 0
WLAN_CAPABILITY_DMG_TYPE_IBSS = 1 << 0 # Tx by: STA
WLAN_CAPABILITY_DMG_TYPE_PBSS = 2 << 0 # Tx by: PCP
WLAN_CAPABILITY_DMG_TYPE_AP = 3 << 0 # Tx by: AP
WLAN_CAPABILITY_DMG_CBAP_ONLY = 1 << 2
WLAN_CAPABILITY_DMG_CBAP_SOURCE = 1 << 3
WLAN_CAPABILITY_DMG_PRIVACY = 1 << 4
WLAN_CAPABILITY_DMG_ECPAC = 1 << 5
WLAN_CAPABILITY_DMG_SPECTRUM_MGMT = 1 << 8
WLAN_CAPABILITY_DMG_RADIO_MEASURE = 1 << 12
IEEE80211_COUNTRY_EXTENSION_ID = 201
BSS_MEMBERSHIP_SELECTOR_VHT_PHY = 126
BSS_MEMBERSHIP_SELECTOR_HT_PHY = 127
ms_oui = b'\x00\x50\xf2'
ieee80211_oui = b'\x00\x0f\xac'
wfa_oui = b'\x50\x6f\x9a'
country_env_str = lambda e: {'I': 'Indoor only', 'O': 'Outdoor only', ' ': 'Indoor/Outdoor'}.get(e, 'bogus')
wifi_wps_dev_passwd_id = lambda e: {0: 'Default (PIN)', 1: 'User-specified', 2: 'Machine-specified', 3: 'Rekey',
4: 'PushButton', 5: 'Registrar-specified'}.get(e, '??')
ht_secondary_offset = ('no secondary', 'above', '[reserved!]', 'below')
class ieee80211_country_ie_triplet(object):
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n60."""
def __init__(self, data):
"""Constructor."""
self.first_channel = c_uint8.from_buffer(data[:SIZEOF_U8]).value
self.reg_extension_id = self.first_channel
data = data[SIZEOF_U8:]
self.num_channels = c_uint8.from_buffer(data[:SIZEOF_U8]).value
self.reg_class = self.num_channels
data = data[SIZEOF_U8:]
self.max_power = c_int8.from_buffer(data[:SIZEOF_S8]).value
self.coverage_class = c_uint8.from_buffer(data[:SIZEOF_U8]).value
self.chans = self.ext = self
def get_supprates(_, data):
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n227.
Positional arguments:
data -- bytearray data to read.
"""
answer = list()
for i in range(len(data)):
r = data[i] & 0x7f
if r == BSS_MEMBERSHIP_SELECTOR_VHT_PHY and data[i] & 0x80:
value = 'VHT'
elif r == BSS_MEMBERSHIP_SELECTOR_HT_PHY and data[i] & 0x80:
value = 'HT'
else:
value = '{0}.{1}'.format(int(r / 2), int(5 * (r & 1)))
answer.append('{0}{1}'.format(value, '*' if data[i] & 0x80 else ''))
return answer
def get_country(_, data):
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n267.
Positional arguments:
data -- bytearray data to read.
Returns:
Dict.
"""
answers = {'Environment': country_env_str(chr(data[2]))}
data = data[3:]
while len(data) >= 3:
triplet = ieee80211_country_ie_triplet(data)
if triplet.ext.reg_extension_id >= IEEE80211_COUNTRY_EXTENSION_ID:
answers['Extension ID'] = triplet.ext.reg_extension_id
answers['Regulatory Class'] = triplet.ext.reg_class
answers['Coverage class'] = triplet.ext.coverage_class
answers['up to dm'] = triplet.ext.coverage_class * 450
data = data[3:]
continue
if triplet.chans.first_channel <= 14: # 2 GHz.
end_channel = triplet.chans.first_channel + (triplet.chans.num_channels - 1)
else:
end_channel = triplet.chans.first_channel + (4 * (triplet.chans.num_channels - 1))
answers['Channels dBm'] = triplet.chans.max_power
answers['Channels'] = (triplet.chans.first_channel, end_channel)
data = data[3:]
return answers
def get_erp(_, data):
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n323.
Positional arguments:
data -- bytearray data to read.
Returns:
String.
"""
if data[0] == 0x00:
return '<no flags>'
if data[0] & 0x01:
return 'NonERP_Present'
if data[0] & 0x02:
return 'Use_Protection'
if data[0] & 0x04:
return 'Barker_Preamble_Mode'
return ''
def get_cipher(data):
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n336.
Positional arguments:
data -- bytearray data to read.
Returns:
WiFi stream cipher used by the access point (string).
"""
legend = {0: 'Use group cipher suite', 1: 'WEP-40', 2: 'TKIP', 4: 'CCMP', 5: 'WEP-104', }
key = data[3]
if ieee80211_oui == bytes(data[:3]):
legend.update({6: 'AES-128-CMAC', 8: 'GCMP', })
elif ms_oui != bytes(data[:3]):
key = None
return legend.get(key, '{0:02x}-{1:02x}-{2:02x}:{3}'.format(data[0], data[1], data[2], data[3]))
def get_auth(data):
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n393.
Positional arguments:
data -- bytearray data to read.
Returns:
WiFi authentication method used by the access point (string).
"""
legend = {1: 'IEEE 802.1X"', 2: 'PSK', }
key = data[3]
if ieee80211_oui == bytes(data[:3]):
legend.update({3: 'FT/IEEE 802.1X', 4: 'FT/PSK', 5: 'IEEE 802.1X/SHA-256', 6: 'PSK/SHA-256', 7: 'TDLS/TPK', })
elif ms_oui != bytes(data[:3]):
key = None
return legend.get(key, '{0:02x}-{1:02x}-{2:02x}:{3}'.format(data[0], data[1], data[2], data[3]))
def get_rsn_ie(defcipher, defauth, data):
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n441.
Positional arguments:
defcipher -- default cipher if not in data (string).
defauth -- default authentication suites if not in data (string).
data -- bytearray data to read.
Returns:
Dict.
"""
answers = dict()
answers['version'] = data[0] + (data[1] << 8)
data = data[2:]
if len(data) < 4:
answers['group_cipher'] = answers['pairwise_ciphers'] = defcipher
return answers
answers['group_cipher'] = get_cipher(data)
data = data[4:]
if len(data) < 2:
answers['pairwise_ciphers'] = defcipher
return answers
count = data[0] | (data[1] << 8)
if 2 + (count * 4) > len(data):
answers['bogus tail data'] = data
return answers
answers['pairwise_ciphers'] = ' '.join(get_cipher(data[2 + (i * 4):]) for i in range(count))
data = data[2 + (count * 4):]
if len(data) < 2:
answers['authentication_suites'] = defauth
return answers
count = data[0] | (data[1] << 8)
if 2 + (count * 4) > len(data):
answers['bogus tail data'] = data
return answers
answers['authentication_suites'] = ' '.join(get_auth(data[2 + (i * 4):]) for i in range(count))
data = data[2 + (count * 4):]
if len(data) >= 2:
capa = data[0] | (data[1] << 8)
answers['rsn_ie_capabilities'] = list()
if capa & 0x0001:
answers['rsn_ie_capabilities'].append('PreAuth')
if capa & 0x0002:
answers['rsn_ie_capabilities'].append('NoPairwise')
case = {0: '1-PTKSA-RC', 1: '2-PTKSA-RC', 2: '4-PTKSA-RC', 3: '16-PTKSA-RC'}.get((capa & 0x000c) >> 2)
if case:
answers['rsn_ie_capabilities'].append(case)
case = {0: '1-GTKSA-RC', 1: '2-GTKSA-RC', 2: '4-GTKSA-RC', 3: '16-GTKSA-RC'}.get((capa & 0x0030) >> 4)
if case:
answers['rsn_ie_capabilities'].append(case)
if capa & 0x0040:
answers['rsn_ie_capabilities'].append('MFP-required')
if capa & 0x0080:
answers['rsn_ie_capabilities'].append('MFP-capable')
if capa & 0x0200:
answers['rsn_ie_capabilities'].append('Peerkey-enabled')
if capa & 0x0400:
answers['rsn_ie_capabilities'].append('SPP-AMSDU-capable')
if capa & 0x0800:
answers['rsn_ie_capabilities'].append('SPP-AMSDU-required')
answers['rsn_ie_capabilities'].append('(0x{0:04x})'.format(capa))
data = data[2:]
invalid = False
if len(data) >= 2:
pmkid_count = data[0] | (data[1] << 8)
if len(data) >= 2 + 16 * pmkid_count:
answers['PMKIDs'] = pmkid_count
data = data[2 + 16 * pmkid_count:]
else:
invalid = True
if len(data) >= 4 and not invalid:
answers['Group mgmt cipher suite'] = get_cipher(data)
data = data[4:]
if data:
answers['* bogus tail data ({0})'.format(len(data))] = ' '.join(format(x, '02x') for x in data)
return answers
def get_ht_capa(_, data):
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n602.
Positional arguments:
data -- bytearray data to read.
Returns:
Dict.
"""
answers = {
'Capabilities': get_ht_capability(data[0] | (data[1] << 8)),
'Minimum RX AMPDU time spacing': ampdu_space.get((data[2] >> 2) & 7, 'BUG (spacing more than 3 bits!)'),
'Maximum RX AMPDU length': {0: 8191, 1: 16383, 2: 32767, 3: 65535}.get(data[2] & 3, 0),
}
answers.update(get_ht_mcs(data[3:]))
return answers
def get_interworking(type_, data):
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n645.
Positional arguments:
type_ -- corresponding `ieprinters` dictionary key for the instance.
data -- bytearray data to read.
"""
raise NotImplementedError
def get_11u_advert(_, data):
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n676.
Positional arguments:
data -- bytearray data to read.
Returns:
Dict.
"""
answers = dict()
idx = 0
while idx < len(data) - 1:
qri = data[idx]
proto_id = data[idx + 1]
answers['Query Response Info'] = qri
answers['Query Response Length Limit'] = qri & 0x7f
if qri & (1 << 7):
answers['PAME-BI'] = True
answers['proto_id'] = {0: 'ANQP', 1: 'MIH Information Service', 3: 'Emergency Alert System (EAS)',
2: 'MIH Command and Event Services Capability Discovery',
221: 'Vendor Specific'}.get(proto_id, 'Reserved: {0}'.format(proto_id))
idx += 2
return answers
def get_11u_rcon(type_, data):
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n708.
Positional arguments:
type_ -- corresponding `ieprinters` dictionary key for the instance.
data -- bytearray data to read.
"""
raise NotImplementedError
def get_ht_op(_, data):
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n766.
Positional arguments:
data -- bytearray data to read.
Returns:
Dict.
"""
protection = ('no', 'nonmember', 20, 'non-HT mixed')
sta_chan_width = (20, 'any')
answers = {
'primary channel': data[0],
'secondary channel offset': ht_secondary_offset[data[1] & 0x3],
'STA channel width': sta_chan_width[(data[1] & 0x4) >> 2],
'RIFS': (data[1] & 0x8) >> 3,
'HT protection': protection[data[2] & 0x3],
'non-GF present': (data[2] & 0x4) >> 2,
'OBSS non-GF present': (data[2] & 0x10) >> 4,
'dual beacon': (data[4] & 0x40) >> 6,
'dual CTS protection': (data[4] & 0x80) >> 7,
'STBC beacon': data[5] & 0x1,
'L-SIG TXOP Prot': (data[5] & 0x2) >> 1,
'PCO active': (data[5] & 0x4) >> 2,
'PCO phase': (data[5] & 0x8) >> 3,
}
return answers
CAPA = {
0: 'HT Information Exchange Supported',
1: 'reserved (On-demand Beacon)',
2: 'Extended Channel Switching',
3: 'reserved (Wave Indication)',
4: 'PSMP Capability',
5: 'reserved (Service Interval Granularity)',
6: 'S-PSMP Capability',
7: 'Event',
8: 'Diagnostics',
9: 'Multicast Diagnostics',
10: 'Location Tracking',
11: 'FMS',
12: 'Proxy ARP Service',
13: 'Collocated Interference Reporting',
14: 'Civic Location',
15: 'Geospatial Location',
16: 'TFS',
17: 'WNM-Sleep Mode',
18: 'TIM Broadcast',
19: 'BSS Transition',
20: 'QoS Traffic Capability',
21: 'AC Station Count',
22: 'Multiple BSSID',
23: 'Timing Measurement',
24: 'Channel Usage',
25: 'SSID List',
26: 'DMS',
27: 'UTC TSF Offset',
28: 'TDLS Peer U-APSD Buffer STA Support',
29: 'TDLS Peer PSM Support',
30: 'TDLS channel switching',
31: 'Interworking',
32: 'QoS Map',
33: 'EBR',
34: 'SSPN Interface',
35: 'Reserved',
36: 'MSGCF Capability',
37: 'TDLS Support',
38: 'TDLS Prohibited',
39: 'TDLS Channel Switching Prohibited',
40: 'Reject Unadmitted Frame',
44: 'Identifier Location',
45: 'U-APSD Coexistence',
46: 'WNM-Notification',
47: 'Reserved',
48: 'UTF-8 SSID',
}
def get_capabilities(_, data):
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n796.
Positional arguments:
data -- bytearray data to read.
Returns:
List.
"""
answers = list()
for i in range(len(data)):
base = i * 8
for bit in range(8):
if not data[i] & (1 << bit):
continue
answers.append(CAPA.get(bit + base, bit))
return answers
def get_tim(_, data):
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n874.
Positional arguments:
data -- bytearray data to read.
Returns:
Dict.
"""
answers = {
'DTIM Count': data[0],
'DTIM Period': data[1],
'Bitmap Control': data[2],
'Bitmap[0]': data[3],
}
if len(data) - 4:
answers['+ octets'] = len(data) - 4
return answers
def get_vht_capa(type_, data):
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n889.
Positional arguments:
type_ -- corresponding `ieprinters` dictionary key for the instance.
data -- bytearray data to read.
"""
raise NotImplementedError
def get_vht_oper(type_, data):
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n897.
Positional arguments:
type_ -- corresponding `ieprinters` dictionary key for the instance.
data -- bytearray data to read.
"""
raise NotImplementedError
def get_obss_scan_params(_, data):
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n914.
Positional arguments:
data -- bytearray data to read.
Returns:
Dict.
"""
answers = {
'passive dwell': (data[1] << 8) | data[0],
'active dwell': (data[3] << 8) | data[2],
'channel width trigger scan interval': (data[5] << 8) | data[4],
'scan passive total per channel': (data[7] << 8) | data[6],
'scan active total per channel': (data[9] << 8) | data[8],
'BSS width channel transition delay factor': (data[11] << 8) | data[10],
'OBSS Scan Activity Threshold': ((data[13] << 8) | data[12]) / 100.0
}
return answers
def get_secchan_offs(type_, data):
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n927.
Positional arguments:
type_ -- corresponding `ieprinters` dictionary key for the instance.
data -- bytearray data to read.
"""
if data[0] < len(ht_secondary_offset):
return "{0} ({1})".format(ht_secondary_offset[data[0]], data[0])
return "{0}".format(data[0])
def get_bss_load(_, data):
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n935.
Positional arguments:
data -- bytearray data to read.
Returns:
Dict.
"""
answers = {
'station count': (data[1] << 8) | data[0],
'channel utilisation': data[2] / 255.0,
'available admission capacity': (data[4] << 8) | data[3],
}
return answers
def get_mesh_conf(type_, data):
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n943.
Positional arguments:
type_ -- corresponding `ieprinters` dictionary key for the instance.
data -- bytearray data to read.
"""
raise NotImplementedError
class ie_print(object):
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n974.
Instance variables:
name -- printer label (string).
print_ -- print function to call. Has arguments type_ (c_uint8) and data (bytearray).
minlen -- used for validation (c_uint8).
maxlen -- used for validation (c_uint8).
flags -- type of printer (c_uint8).
"""
def __init__(self, name, print_, minlen, maxlen, flags):
"""Constructor."""
self.name = name
self.print_ = print_
self.minlen = minlen
self.maxlen = maxlen
self.flags = flags
def get_ie(instance, key, data):
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n981.
Positional arguments:
instance -- `ie_print` class instance.
key -- corresponding `ieprinters` dictionary key for the instance.
data -- bytearray data to read.
Returns:
Dictionary of parsed data with string keys.
"""
if not instance.print_:
return dict()
if len(data) < instance.minlen or len(data) > instance.maxlen:
if data:
return {'<invalid: {0} byte(s)>'.format(len(data)): ' '.join(format(x, '02x') for x in data)}
return {'<invalid: no data>': data}
return {instance.name: instance.print_(key, data)}
ieprinters = { # http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n1013
0: ie_print('SSID', get_ssid, 0, 32, 3),
1: ie_print('Supported rates', get_supprates, 0, 255, 1),
3: ie_print('DS Parameter set', lambda _, d: d[0], 1, 1, 1),
5: ie_print('TIM', get_tim, 4, 255, 1),
6: ie_print('IBSS ATIM window', lambda _, d: '{0} TUs'.format((d[1] << 8) + d[0]), 2, 2, 1),
7: ie_print('Country', get_country, 3, 255, 1),
11: ie_print('BSS Load', get_bss_load, 5, 5, 1),
32: ie_print('Power constraint', lambda _, d: '{0} dB'.format(d[0]), 1, 1, 1),
35: ie_print('TPC report', lambda _, d: 'TX power: {0} dBm'.format(d[0]), 2, 2, 1),
42: ie_print('ERP', get_erp, 1, 255, 1),
45: ie_print('HT capabilities', get_ht_capa, 26, 26, 1),
47: ie_print('ERP D4.0', get_erp, 1, 255, 1),
48: ie_print('RSN', lambda _, d: get_rsn_ie('CCMP', 'IEEE 802.1x', d), 2, 255, 1),
50: ie_print('Extended supported rates', get_supprates, 0, 255, 1),
61: ie_print('HT operation', get_ht_op, 22, 22, 1),
62: ie_print('Secondary Channel Offset', get_secchan_offs, 1, 1, 1),
74: ie_print('Overlapping BSS scan params', get_obss_scan_params, 14, 255, 1),
107: ie_print('802.11u Interworking', get_interworking, 0, 255, 1),
108: ie_print('802.11u Advertisement', get_11u_advert, 0, 255, 1),
111: ie_print('802.11u Roaming Consortium', get_11u_rcon, 0, 255, 1),
113: ie_print('MESH Configuration', get_mesh_conf, 7, 7, 1),
114: ie_print('MESH ID', get_ssid, 0, 32, 3),
127: ie_print('Extended capabilities', get_capabilities, 0, 255, 1),
191: ie_print('VHT capabilities', get_vht_capa, 12, 255, 1),
192: ie_print('VHT operation', get_vht_oper, 5, 255, 1),
}
def get_wifi_wmm_param(data):
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n1046.
Positional arguments:
data -- bytearray data to read.
Returns:
Dict.
"""
answers = dict()
aci_tbl = ('BE', 'BK', 'VI', 'VO')
if data[0] & 0x80:
answers['u-APSD'] = True
data = data[2:]
for i in range(4):
key = aci_tbl[(data[0] >> 5) & 3]
value = dict()
if data[0] & 0x10:
value['acm'] = True
value['CW'] = ((1 << (data[1] & 0xf)) - 1, (1 << (data[1] >> 4)) - 1)
value['AIFSN'] = data[0] & 0xf
if data[2] | data[3]:
value['TXOP'] = (data[2] + (data[3] << 8)) * 32
answers[key] = value
data = data[4:]
return {'Parameter version 1': answers}
def get_wifi_wmm(_, data):
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n1088.
Positional arguments:
data -- bytearray data to read.
Returns:
Dict.
"""
answers = dict()
if data[0] == 0x01:
if len(data) < 20:
key = 'invalid'
elif data[1] != 1:
key = 'Parameter: not version 1'
else:
answers.update(get_wifi_wmm_param(data[2:]))
return answers
elif data[0] == 0x00:
key = 'information'
else:
key = 'type {0}'.format(data[0])
answers[key] = ' '.join(format(x, '02x') for x in data)
return answers
def get_wifi_wps(_, data):
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n1130.
Positional arguments:
data -- bytearray data to read.
Returns:
Dict.
"""
answers = dict()
while len(data) >= 4:
subtype = (data[0] << 8) + data[1]
sublen = (data[2] << 8) + data[3]
if sublen > len(data):
break
elif subtype == 0x104a:
answers['Version'] = data[4] >> 4, data[4] & 0xF
elif subtype == 0x1011:
answers['Device name'] = data[4:sublen + 4]
elif subtype == 0x1012:
if sublen != 2:
answers['Device Password ID'] = 'invalid length %d'.format(sublen)
else:
id_ = data[4] << 8 | data[5]
answers['Device Password ID'] = (id_, wifi_wps_dev_passwd_id(id_))
elif subtype == 0x1021:
answers['Manufacturer'] = data[4:sublen + 4]
elif subtype == 0x1023:
answers['Model'] = data[4:sublen + 4]
elif subtype == 0x1024:
answers['Model Number'] = data[4:sublen + 4]
elif subtype == 0x103b:
val = data[4]
answers['Response Type'] = (val, 'AP' if val == 3 else '')
elif subtype == 0x103c:
answers['RF Bands'] = data[4]
elif subtype == 0x1041:
answers['Selected Registrar'] = data[4]
elif subtype == 0x1042:
answers['Serial Number'] = data[4:sublen + 4]
elif subtype == 0x1044:
val = data[4]
answers['Wi-Fi Protected Setup State'] = (val, {1: 'Unconfigured', 2: 'Configured'}.get(val, ''))
elif subtype == 0x1047:
if sublen != 16:
answers['UUID'] = '(invalid, length={0})'.format(sublen)
else:
answers['UUID'] = bytearray(data[4:19])
elif subtype == 0x1054:
if sublen != 8:
answers['Primary Device Type'] = '(invalid, length={0})'.format(sublen)
else:
answers['Primary Device Type'] = '{0}-{1}-{2}'.format(
data[4] << 8 | data[5],
''.join(format(x, '02x') for x in data[6:9]),
data[10] << 8 | data[11]
)
elif subtype == 0x1057:
answers['AP setup locked'] = data[4]
elif subtype == 0x1008 or subtype == 0x1053:
meth = (data[4] << 8) + data[5]
key = 'Selected Registrar Config methods' if subtype == 0x1053 else 'Config methods'
values = [s for i, s in enumerate(('USB', 'Ethernet', 'Label', 'Display', 'Ext. NFC', 'Int. NFC',
'NFC Intf.', 'PBC', 'Keypad')) if meth & (1 << i)]
answers[key] = values
else:
value = ' '.join(format(x, '02x') for x in data[4:])
answers['Unknown TLV ({0:04x}, {1} bytes)'.format(subtype, sublen)] = value
data = data[4:]
if data:
answers['bogus tail data'] = ' '.join(format(x, '02x') for x in data)
return answers
wifiprinters = { # http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n1300
1: ie_print('WPA', lambda _, d: get_rsn_ie('TKIP', 'IEEE 802.1X', d), 2, 255, 1),
2: ie_print('WMM', get_wifi_wmm, 1, 255, 1),
4: ie_print('WPS', get_wifi_wps, 0, 255, 1),
}
def get_p2p(type_, data):
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n1306.
Positional arguments:
type_ -- corresponding `ieprinters` dictionary key for the instance.
data -- bytearray data to read.
"""
raise NotImplementedError
def get_hs20_ind(type_, data):
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n1386.
Positional arguments:
type_ -- corresponding `ieprinters` dictionary key for the instance.
data -- bytearray data to read.
"""
raise NotImplementedError
wfa_printers = { # http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n1396
9: ie_print('P2P', get_p2p, 2, 255, 1),
16: ie_print('HotSpot 2.0 Indication', get_hs20_ind, 1, 255, 1),
}
def get_vendor(data):
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n1401.
Positional arguments:
data -- bytearray data to read.
Returns:
Dictionary of parsed data with string keys.
"""
if len(data) < 3:
return dict(('Vendor specific: <too short> data', ' '.join(format(x, '02x'))) for x in data)
key = data[3]
if bytes(data[:3]) == ms_oui:
if key in wifiprinters and wifiprinters[key].flags & 1:
return get_ie(wifiprinters[key], key, data[4:])
return dict(('MS/WiFi {0:02x}, data'.format(key), ' '.join(format(x, '02x'))) for x in data[4:])
if bytes(data[:3]) == wfa_oui:
if key in wfa_printers and wfa_printers[key].flags & 1:
return get_ie(wfa_printers[key], key, data[4:])
return dict(('WFA {0:02x}, data'.format(key), ' '.join(format(x, '02x'))) for x in data[4:])
unknown_key = 'Vendor specific: OUI {0:02x}:{1:02x}:{2:02x}, data'.format(data[0], data[1], data[2])
unknown_value = ' '.join(format(x, '02x') for x in data[3:])
return {unknown_key: unknown_value}
def get_ies(ie):
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n1456.
Positional arguments:
ie -- bytearray data to read.
Returns:
Dictionary of all parsed data. In the iw tool it prints everything to terminal. This function returns a dictionary
with string keys (being the "titles" of data printed by iw), and data values (integers/strings/etc).
"""
answers = dict()
while len(ie) >= 2 and len(ie) >= ie[1]:
key = ie[0] # Should be key in `ieprinters` dict.
len_ = ie[1] # Length of this information element.
data = ie[2:len_ + 2] # Data for this information element.
if key in ieprinters and ieprinters[key].flags & 1:
answers.update(get_ie(ieprinters[key], key, data))
elif key == 221:
answers.update(get_vendor(data))
else:
answers['Unknown IE ({0})'.format(key)] = ' '.join(format(x, '02x') for x in data)
ie = ie[len_ + 2:]
return answers
bss_policy = dict((i, None) for i in range(nl80211.NL80211_BSS_MAX + 1))
bss_policy.update({ # http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n1549
nl80211.NL80211_BSS_BSSID: nla_policy(),
nl80211.NL80211_BSS_FREQUENCY: nla_policy(type_=NLA_U32),
nl80211.NL80211_BSS_TSF: nla_policy(type_=NLA_U64),
nl80211.NL80211_BSS_BEACON_INTERVAL: nla_policy(type_=NLA_U16),
nl80211.NL80211_BSS_CAPABILITY: nla_policy(type_=NLA_U16),
nl80211.NL80211_BSS_INFORMATION_ELEMENTS: nla_policy(),
nl80211.NL80211_BSS_SIGNAL_MBM: nla_policy(type_=NLA_U32),
nl80211.NL80211_BSS_SIGNAL_UNSPEC: nla_policy(type_=NLA_U8),
nl80211.NL80211_BSS_STATUS: nla_policy(type_=NLA_U32),
nl80211.NL80211_BSS_SEEN_MS_AGO: nla_policy(type_=NLA_U32),
nl80211.NL80211_BSS_BEACON_IES: nla_policy(),
nl80211.NL80211_BSS_CHAN_WIDTH: nla_policy(),
nl80211.NL80211_BSS_BEACON_TSF: nla_policy(),
nl80211.NL80211_BSS_PRESP_DATA: nla_policy(),
})
| Robpol86/libnl | libnl/nl80211/iw_scan.py | Python | lgpl-2.1 | 29,804 | 0.001711 |
#
# Project: MXCuBE
# https://github.com/mxcube
#
# This file is part of MXCuBE software.
#
# MXCuBE is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MXCuBE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with MXCuBE. If not, see <http://www.gnu.org/licenses/>.
import logging
from gui.utils import QtImport
from gui.BaseComponents import BaseWidget
from gui.widgets.task_toolbox_widget import TaskToolBoxWidget
from HardwareRepository import HardwareRepository as HWR
__credits__ = ["MXCuBE collaboration"]
__license__ = "LGPLv3+"
__category__ = "General"
class TaskToolBoxBrick(BaseWidget):
request_tree_brick = QtImport.pyqtSignal()
def __init__(self, *args):
BaseWidget.__init__(self, *args)
# Internal values -----------------------------------------------------
self.ispyb_logged_in = False
self.tree_brick = None
# Properties ----------------------------------------------------------
self.add_property("useOscStartCbox", "boolean", False)
self.add_property("useCompression", "boolean", False)
#self.add_property("availableTasks", "string", "discrete char helical")
self.add_property("showDiscreetTask", "boolean", True)
self.add_property("showHelicalTask", "boolean", True)
self.add_property("showCharTask", "boolean", True)
self.add_property("showAdvancedTask", "boolean", True)
self.add_property("showStillScanTask", "boolean", False)
self.add_property("showCollectNowButton", "boolean", False)
# Signals -------------------------------------------------------------
self.define_signal("request_tree_brick", ())
# Slots ---------------------------------------------------------------
self.define_slot("logged_in", ())
self.define_slot("set_session", ())
self.define_slot("selection_changed", ())
self.define_slot("user_group_saved", ())
self.define_slot("set_tree_brick", ())
# Graphic elements ----------------------------------------------------
self.task_tool_box_widget = TaskToolBoxWidget(self)
# Layout --------------------------------------------------------------
self.main_layout = QtImport.QVBoxLayout(self)
self.main_layout.addWidget(self.task_tool_box_widget)
self.main_layout.setSpacing(0)
self.main_layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(self.main_layout)
# SizePolicies --------------------------------------------------------
# self.setSizePolicy(QtImport.QSizePolicy.MinimumExpanding,
# QtImport.QSizePolicy.MinimumExpanding)
# Other ---------------------------------------------------------------
HWR.beamline.sample_view.connect("pointSelected", self.point_selected)
def set_expert_mode(self, expert):
self.task_tool_box_widget.set_expert_mode(expert)
def run(self):
if HWR.beamline.session.session_id:
self.setEnabled(True)
#self.task_tool_box_widget.set_available_tasks(self["availableTasks"])
self.request_tree_brick.emit()
self.task_tool_box_widget.adjust_width(self.width())
def user_group_saved(self, new_user_group):
HWR.beamline.session.set_user_group(str(new_user_group))
self.task_tool_box_widget.update_data_path_model()
path = (
HWR.beamline.session.get_base_image_directory()
+ "/"
+ str(new_user_group)
)
msg = "Image path is: %s" % path
logging.getLogger("GUI").info(msg)
@QtImport.pyqtSlot(BaseWidget)
def set_tree_brick(self, brick):
self.tree_brick = brick
self.tree_brick.compression_state = self["useCompression"] == 1
self.task_tool_box_widget.set_tree_brick(brick)
@QtImport.pyqtSlot(int, str, str, int, str, str, bool)
def set_session(
self,
session_id,
t_prop_code=None,
prop_number=None,
prop_id=None,
start_date=None,
prop_code=None,
is_inhouse=None,
):
"""
Connected to the slot set_session and is called after a
request to get the current session from LIMS (ISPyB) is
made. The signal is normally emitted by the brick that
handles LIMS login, ie ProposalBrick.
The session_id is '' if no session could be retrieved.
"""
if session_id is "":
self.logged_in(True)
@QtImport.pyqtSlot(bool)
def logged_in(self, logged_in):
"""
Handels the signal logged_in from the brick the handles
LIMS (ISPyB) login, ie ProposalBrick. The signal is
emitted when a user was succesfully logged in.
"""
logged_in = True
self.ispyb_logged_in = logged_in
if HWR.beamline.session is not None:
HWR.beamline.session.set_user_group("")
self.setEnabled(logged_in)
self.task_tool_box_widget.ispyb_logged_in(logged_in)
def property_changed(self, property_name, old_value, new_value):
if property_name == "useOscStartCbox":
self.task_tool_box_widget.use_osc_start_cbox(new_value)
elif property_name == "useCompression":
self.task_tool_box_widget.enable_compression(new_value)
elif property_name == "showCollectNowButton":
self.task_tool_box_widget.collect_now_button.setVisible(new_value)
elif property_name == "showDiscreetTask":
if not new_value:
self.task_tool_box_widget.hide_task(
self.task_tool_box_widget.discrete_page
)
elif property_name == "showHelicalTask":
if not new_value:
self.task_tool_box_widget.hide_task(
self.task_tool_box_widget.helical_page
)
elif property_name == "showCharTask":
if not new_value:
self.task_tool_box_widget.hide_task(self.task_tool_box_widget.char_page)
elif property_name == "showAdvancedTask":
if not new_value:
self.task_tool_box_widget.hide_task(
self.task_tool_box_widget.advanced_page
)
elif property_name == "showStillScanTask":
if not new_value:
self.task_tool_box_widget.hide_task(
self.task_tool_box_widget.still_scan_page
)
def selection_changed(self, items):
"""
Connected to the signal "selection_changed" of the TreeBrick.
Called when the selection in the tree changes.
"""
self.task_tool_box_widget.selection_changed(items)
def point_selected(self, selected_position):
self.task_tool_box_widget.helical_page.centred_position_selection(
selected_position
)
self.task_tool_box_widget.discrete_page.centred_position_selection(
selected_position
)
self.task_tool_box_widget.char_page.centred_position_selection(
selected_position
)
self.task_tool_box_widget.energy_scan_page.centred_position_selection(
selected_position
)
self.task_tool_box_widget.xrf_spectrum_page.centred_position_selection(
selected_position
)
self.task_tool_box_widget.discrete_page.refresh_current_item()
self.task_tool_box_widget.helical_page.refresh_current_item()
self.task_tool_box_widget.char_page.refresh_current_item()
self.task_tool_box_widget.energy_scan_page.refresh_current_item()
self.task_tool_box_widget.xrf_spectrum_page.refresh_current_item()
| IvarsKarpics/mxcube | gui/bricks/TaskToolBoxBrick.py | Python | lgpl-3.0 | 8,196 | 0.000488 |
from django.conf.urls import patterns, url
from . import views
urlpatterns = patterns(
'',
url(r'^$', views.customer_list, name='customers'),
url(r'^(?P<pk>[0-9]+)/$', views.customer_details, name='customer-details')
)
| hongquan/saleor | saleor/dashboard/customer/urls.py | Python | bsd-3-clause | 234 | 0 |
#!/usr/bin/python
import numpy as np
#a = np.linspace(0.,10.,100)
#b = np.sqrt(a)
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import csv
def import_text(filename, separator):
for line in csv.reader(open(filename), delimiter=separator,
skipinitialspace=True):
if line:
yield line
def to_num(s):
try:
return int(s)
except ValueError:
return float(s)
def to_float(s):
try:
return float(s)
except ValueError:
return int(s)
def column(matrix, i):
return [row[i] for row in matrix]
def bandwidth(timings, sizes):
result = []
for i in range(0, len(timings)):
result.append((2*to_float(sizes[i]))/(to_float(timings[i])*1000000000.0))
return result
#read data
table = []
for data in import_text('./0_cudamemcpy_offset1.dat', ' '):
table.append(data)
#print column(table, 0)[1:]
size = column(table, 1)[1:]
size_string = column(table, 0)[1:]
#print size_string
# data
char_t = column(table, 2)[1:]
#short_t = column(table, 3)[1:]
#float_t = column(table, 4)[1:]
#double_t = column(table, 5)[1:]
#float3_t = column(table, 6)[1:]
#float4_t = column(table, 7)[1:]
char_bw = bandwidth(char_t, size)
#short_bw = bandwidth(short_t, size)
#float_bw = bandwidth(float_t, size)
#double_bw = bandwidth(double_t, size)
#float3_bw = bandwidth(float3_t, size)
#float4_bw = bandwidth(float4_t, size)
# read other table
di_table = []
for di_data in import_text('./1_direct_offset1.dat', ' '):
di_table.append(di_data)
#print column(table, 0)[1:]
#size_string = column(table, 0)[1:]
#print size_string
# data
di_char_t = column(di_table, 2)[1:]
di_short_t = column(di_table, 3)[1:]
di_float_t = column(di_table, 4)[1:]
di_double_t = column(di_table, 5)[1:]
di_float3_t = column(di_table, 6)[1:]
di_float4_t = column(di_table, 7)[1:]
di_char_bw = bandwidth(di_char_t, size)
di_short_bw = bandwidth(di_short_t, size)
di_float_bw = bandwidth(di_float_t, size)
di_double_bw = bandwidth(di_double_t, size)
di_float3_bw = bandwidth(di_float3_t, size)
di_float4_bw = bandwidth(di_float4_t, size)
size_np = np.array(size)
# normalize the size
for i in range(0, len(size)):
size_np[i] = i+1
# size_np[len(size)-1-i] = to_num(to_num(size_np[len(size)-1-i])/to_num(size_np[0])) #to_float(size[i])/to_float(size[0])
#print to_float(size[11])
#print to_float(float4_t[11])
#print (to_float(2*sizes[i])/(to_float(timings[i])*1000000000.0))
#print char_bw
#print float_bw
#print float
# start drawing
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title("cuMemcpy v.s. d2d_direct_kernel (address not aligned)");
ax.set_xlabel(table[0][0])
ax.set_ylabel('Bandwidth (GB/sec)')
#print len(size_string)
#print len(char_bw)
fig.add_subplot(ax)
#ax.set_ylim([180,260])
print size_np
print size_string
#ax.set_xticklabels(size_np, range(len(size_np)))
ax.set_xticklabels(size_string)
#fig.xticks(size_np, size_string)
#ax.set_xticks(size_np, ('128K', '256K', '512K', '1M', '2M', '4M', '8M', '16M', '32M', '64M'))
#ax.set_autoscaley_on(False)
ax.plot(size_np, char_bw, linestyle = '-', color = 'blue', marker='o', linewidth = 1, label='cudaMemcpy')
#ax.plot(size, short_bw, linestyle = '-', color = 'red', linewidth = 1, label='cudaMemcpy_short')
#ax.plot(size, float_bw, linestyle = '-', color = 'c', linewidth = 1, label='cudaMemcpy_float')
#ax.plot(size, double_bw, linestyle = '-', color = 'm', linewidth = 1, label='cudaMemcpy_double')
#ax.plot(size, float3_bw, linestyle = '-', color = 'k', linewidth = 1, label='cudaMemcpy_float3')
#ax.plot(size, float4_bw, linestyle = '-', color = 'y', linewidth = 1, label='cudaMemcpy_float4')
ax.plot(size_np, di_char_bw, linestyle = ':', color = 'blue', marker='o', linewidth = 2, label='d2d_direct_char')
ax.plot(size_np, di_short_bw, linestyle = ':', color = 'red', marker='s', linewidth = 2, label='d2d_direct_short')
ax.plot(size_np, di_float_bw, linestyle = ':', color = 'c', marker='p', linewidth = 2, label='d2d_direct_float')
ax.plot(size_np, di_double_bw, linestyle = ':', color = 'm', marker='*', linewidth = 2, label='d2d_direct_double')
ax.plot(size_np, di_float3_bw, linestyle = ':', color = 'k', marker='h', linewidth = 2, label='d2d_direct_float3')
ax.plot(size_np, di_float4_bw, linestyle = ':', color = 'y', marker='x', linewidth = 2, label='d2d_direct_float4')
size_num=range(len(size))
#print size_num
print size_string
box = ax.get_position()
ax.set_position([box.x0, box.y0+box.height*0.1, box.width, box.height*0.9])
#ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.05), ncol = 6, fancybox = True, shadow = True, prop={'size':9}, )
#ax.legend(loc='upper center', ncol = 3, fancybox = True, shadow = True, prop={'size':9}, )
#ax.legend(loc='upper left', ncol = 1, fancybox = True, shadow = True, prop={'size':9}, )
ax.legend(loc='upper center', ncol = 4, bbox_to_anchor=(0.5,-0.1), fancybox = True, shadow = True, prop={'size':9}, )
plt.show()
fig.savefig('cudaMemcpy_vs_d2d_offset1.pdf')
| ypzhang/jusha | book/scripts/d2d_kernel/cumemcpy_to_direct_offset1.py | Python | lgpl-3.0 | 5,058 | 0.028865 |
# A Gui interface allowing the binary illiterate to figure out the ip address the Arduino has been assigned.
import os
import re
from PySide.QtCore import QFile, QMetaObject, QSignalMapper, Slot, QRegExp
from PySide.QtGui import QDialog, QPushButton, QRegExpValidator
from PySide.QtUiTools import QUiLoader
class IPHelper(QDialog):
def __init__(self, parent=None):
super(IPHelper, self).__init__(parent)
f = QFile(os.path.join(os.path.split(__file__)[0], 'iphelper.ui'))
loadUi(f, self)
f.close()
self.ipAddress = None
# create validators
validator = QRegExpValidator(QRegExp('\d{,3}'))
self.uiFirstTetTXT.setValidator(validator)
self.uiSecondTetTXT.setValidator(validator)
self.uiThirdTetTXT.setValidator(validator)
self.uiFourthTetTXT.setValidator(validator)
# build a map of the buttons
self.buttons = [None]*16
self.signalMapper = QSignalMapper(self)
self.signalMapper.mapped.connect(self.tetMap)
for button in self.findChildren(QPushButton):
match = re.findall(r'^uiTrellis(\d{,2})BTN$', button.objectName())
if match:
i = int(match[0])
self.buttons[i] = button
if i >= 12:
self.signalMapper.setMapping(button, i)
button.clicked.connect(self.signalMapper.map)
self.tetMap(12)
@Slot()
def accept(self):
self.ipAddress = '{}.{}.{}.{}'.format(self.uiFirstTetTXT.text(), self.uiSecondTetTXT.text(), self.uiThirdTetTXT.text(), self.uiFourthTetTXT.text())
super(IPHelper, self).accept()
@Slot(int)
def tetMap(self, index):
button = self.buttons[index]
if not button.isChecked():
return
for i in range(12, 16):
b = self.buttons[i]
if b != button:
b.setChecked(False)
# update the buttons to match the current value of the text
for edit in (self.uiFirstTetTXT, self.uiSecondTetTXT, self.uiThirdTetTXT, self.uiFourthTetTXT):
edit.setProperty('active', False)
if index == 12:
val = int(self.uiFourthTetTXT.text())
self.uiFourthTetTXT.setProperty('active', True)
elif index == 13:
val = int(self.uiThirdTetTXT.text())
self.uiThirdTetTXT.setProperty('active', True)
elif index == 14:
val = int(self.uiSecondTetTXT.text())
self.uiSecondTetTXT.setProperty('active', True)
elif index == 15:
val = int(self.uiFirstTetTXT.text())
self.uiFirstTetTXT.setProperty('active', True)
for i in range(8):
b = self.buttons[i]
b.blockSignals(True)
b.setChecked(2**i & val)
b.blockSignals(False)
# force a refresh of the styleSheet
self.setStyleSheet(self.styleSheet())
@Slot()
def buttonPressed(self):
total = 0
for i in range(8):
if self.buttons[i].isChecked():
total += 2**i
total = unicode(total)
if self.uiTrellis12BTN.isChecked():
self.uiFourthTetTXT.setText(total)
elif self.uiTrellis13BTN.isChecked():
self.uiThirdTetTXT.setText(total)
elif self.uiTrellis14BTN.isChecked():
self.uiSecondTetTXT.setText(total)
elif self.uiTrellis15BTN.isChecked():
self.uiFirstTetTXT.setText(total)
# Code to load a ui file like using PyQt4
# https://www.mail-archive.com/pyside@lists.openbossa.org/msg01401.html
class MyQUiLoader(QUiLoader):
def __init__(self, baseinstance):
super(MyQUiLoader, self).__init__()
self.baseinstance = baseinstance
def createWidget(self, className, parent=None, name=""):
widget = super(MyQUiLoader, self).createWidget(className, parent, name)
if parent is None:
return self.baseinstance
else:
setattr(self.baseinstance, name, widget)
return widget
def loadUi(uifile, baseinstance=None):
loader = MyQUiLoader(baseinstance)
ui = loader.load(uifile)
QMetaObject.connectSlotsByName(ui)
return ui
| MHendricks/Motionbuilder-Remote | iphelper.py | Python | mit | 3,612 | 0.027409 |
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import sickbeard
import os.path
import datetime
from sickbeard import db, common, helpers, logger
from sickbeard.providers.generic import GenericProvider
from sickbeard import encodingKludge as ek
from sickbeard.name_parser.parser import NameParser, InvalidNameException
MAX_DB_VERSION = 19
class MainSanityCheck(db.DBSanityCheck):
def check(self):
self.fix_duplicate_shows()
self.fix_duplicate_episodes()
self.fix_orphan_episodes()
def fix_duplicate_shows(self):
sqlResults = self.connection.select("SELECT show_id, tvdb_id, COUNT(tvdb_id) as count FROM tv_shows GROUP BY tvdb_id HAVING count > 1")
for cur_duplicate in sqlResults:
logger.log(u"Duplicate show detected! tvdb_id: " + str(cur_duplicate["tvdb_id"]) + u" count: " + str(cur_duplicate["count"]), logger.DEBUG)
cur_dupe_results = self.connection.select("SELECT show_id, tvdb_id FROM tv_shows WHERE tvdb_id = ? LIMIT ?",
[cur_duplicate["tvdb_id"], int(cur_duplicate["count"]) - 1]
)
for cur_dupe_id in cur_dupe_results:
logger.log(u"Deleting duplicate show with tvdb_id: " + str(cur_dupe_id["tvdb_id"]) + u" show_id: " + str(cur_dupe_id["show_id"]))
self.connection.action("DELETE FROM tv_shows WHERE show_id = ?", [cur_dupe_id["show_id"]])
else:
logger.log(u"No duplicate show, check passed")
def fix_duplicate_episodes(self):
sqlResults = self.connection.select("SELECT showid, season, episode, COUNT(showid) as count FROM tv_episodes GROUP BY showid, season, episode HAVING count > 1")
for cur_duplicate in sqlResults:
logger.log(u"Duplicate episode detected! showid: " + str(cur_duplicate["showid"]) + u" season: " + str(cur_duplicate["season"]) + u" episode: " + str(cur_duplicate["episode"]) + u" count: " + str(cur_duplicate["count"]), logger.DEBUG)
cur_dupe_results = self.connection.select("SELECT episode_id FROM tv_episodes WHERE showid = ? AND season = ? and episode = ? ORDER BY episode_id DESC LIMIT ?",
[cur_duplicate["showid"], cur_duplicate["season"], cur_duplicate["episode"], int(cur_duplicate["count"]) - 1]
)
for cur_dupe_id in cur_dupe_results:
logger.log(u"Deleting duplicate episode with episode_id: " + str(cur_dupe_id["episode_id"]))
self.connection.action("DELETE FROM tv_episodes WHERE episode_id = ?", [cur_dupe_id["episode_id"]])
else:
logger.log(u"No duplicate episode, check passed")
def fix_orphan_episodes(self):
sqlResults = self.connection.select("SELECT episode_id, showid, tv_shows.tvdb_id FROM tv_episodes LEFT JOIN tv_shows ON tv_episodes.showid=tv_shows.tvdb_id WHERE tv_shows.tvdb_id is NULL")
for cur_orphan in sqlResults:
logger.log(u"Orphan episode detected! episode_id: " + str(cur_orphan["episode_id"]) + " showid: " + str(cur_orphan["showid"]), logger.DEBUG)
logger.log(u"Deleting orphan episode with episode_id: " + str(cur_orphan["episode_id"]))
self.connection.action("DELETE FROM tv_episodes WHERE episode_id = ?", [cur_orphan["episode_id"]])
else:
logger.log(u"No orphan episode, check passed")
def backupDatabase(version):
helpers.backupVersionedFile(db.dbFilename(), version)
# ======================
# = Main DB Migrations =
# ======================
# Add new migrations at the bottom of the list; subclass the previous migration.
class InitialSchema (db.SchemaUpgrade):
def test(self):
return self.hasTable("tv_shows")
def execute(self):
queries = [
"CREATE TABLE tv_shows (show_id INTEGER PRIMARY KEY, location TEXT, show_name TEXT, tvdb_id NUMERIC, network TEXT, genre TEXT, runtime NUMERIC, quality NUMERIC, airs TEXT, status TEXT, seasonfolders NUMERIC, paused NUMERIC, startyear NUMERIC);",
"CREATE TABLE tv_episodes (episode_id INTEGER PRIMARY KEY, showid NUMERIC, tvdbid NUMERIC, name TEXT, season NUMERIC, episode NUMERIC, description TEXT, airdate NUMERIC, hasnfo NUMERIC, hastbn NUMERIC, status NUMERIC, location TEXT);",
"CREATE TABLE info (last_backlog NUMERIC, last_tvdb NUMERIC);",
"CREATE TABLE history (action NUMERIC, date NUMERIC, showid NUMERIC, season NUMERIC, episode NUMERIC, quality NUMERIC, resource TEXT, provider NUMERIC);",
"CREATE TABLE episode_links (episode_id INTEGER, link TEXT);",
"CREATE TABLE imdb_info (tvdb_id INTEGER PRIMARY KEY, imdb_id TEXT, title TEXT, year NUMERIC, akas TEXT, runtimes NUMERIC, genres TEXT, countries TEXT, country_codes TEXT, certificates TEXT, rating TEXT, votes INTEGER, last_update NUMERIC);",
"CREATE TABLE processed_files (episode_id INTEGER, filename TEXT, md5 TEXT);",
"CREATE TABLE frenchtorrentdb_history (date TEXT, link TEXT);"
]
for query in queries:
self.connection.action(query)
class AddTvrId (InitialSchema):
def test(self):
return self.hasColumn("tv_shows", "tvr_id")
def execute(self):
self.addColumn("tv_shows", "tvr_id")
class AddTvrName (AddTvrId):
def test(self):
return self.hasColumn("tv_shows", "tvr_name")
def execute(self):
self.addColumn("tv_shows", "tvr_name", "TEXT", "")
class AddImdbId (InitialSchema):
def test(self):
return self.hasColumn("tv_shows", "imdb_id")
def execute(self):
self.addColumn("tv_shows", "imdb_id", "TEXT", "")
class AddAirdateIndex (AddTvrName):
def test(self):
return self.hasTable("idx_tv_episodes_showid_airdate")
def execute(self):
self.connection.action("CREATE INDEX idx_tv_episodes_showid_airdate ON tv_episodes(showid,airdate);")
class NumericProviders (AddAirdateIndex):
def test(self):
return self.connection.tableInfo("history")['provider']['type'] == 'TEXT'
histMap = {-1: 'unknown',
1: 'newzbin',
2: 'tvbinz',
3: 'nzbs',
4: 'eztv',
5: 'nzbmatrix',
6: 'tvnzb',
7: 'ezrss',
8: 'thepiratebay',
9: 'kat'}
def execute(self):
self.connection.action("ALTER TABLE history RENAME TO history_old;")
self.connection.action("CREATE TABLE history (action NUMERIC, date NUMERIC, showid NUMERIC, season NUMERIC, episode NUMERIC, quality NUMERIC, resource TEXT, provider TEXT);")
for x in self.histMap.keys():
self.upgradeHistory(x, self.histMap[x])
def upgradeHistory(self, number, name):
oldHistory = self.connection.action("SELECT * FROM history_old").fetchall()
for curResult in oldHistory:
sql = "INSERT INTO history (action, date, showid, season, episode, quality, resource, provider) VALUES (?,?,?,?,?,?,?,?)"
provider = 'unknown'
try:
provider = self.histMap[int(curResult["provider"])]
except ValueError:
provider = curResult["provider"]
args = [curResult["action"], curResult["date"], curResult["showid"], curResult["season"], curResult["episode"], curResult["quality"], curResult["resource"], provider]
self.connection.action(sql, args)
class NewQualitySettings (NumericProviders):
def test(self):
return self.hasTable("db_version")
def execute(self):
backupDatabase(0)
# old stuff that's been removed from common but we need it to upgrade
HD = 1
SD = 3
ANY = 2
BEST = 4
ACTION_SNATCHED = 1
ACTION_PRESNATCHED = 2
ACTION_DOWNLOADED = 3
PREDOWNLOADED = 3
MISSED = 6
BACKLOG = 7
DISCBACKLOG = 8
SNATCHED_BACKLOG = 10
### Update default quality
if sickbeard.QUALITY_DEFAULT == HD:
sickbeard.QUALITY_DEFAULT = common.HD
elif sickbeard.QUALITY_DEFAULT == SD:
sickbeard.QUALITY_DEFAULT = common.SD
elif sickbeard.QUALITY_DEFAULT == ANY:
sickbeard.QUALITY_DEFAULT = common.ANY
elif sickbeard.QUALITY_DEFAULT == BEST:
sickbeard.QUALITY_DEFAULT = common.BEST
### Update episode statuses
toUpdate = self.connection.select("SELECT episode_id, location, status FROM tv_episodes WHERE status IN (?, ?, ?, ?, ?, ?, ?)", [common.DOWNLOADED, common.SNATCHED, PREDOWNLOADED, MISSED, BACKLOG, DISCBACKLOG, SNATCHED_BACKLOG])
didUpdate = False
for curUpdate in toUpdate:
# remember that we changed something
didUpdate = True
newStatus = None
oldStatus = int(curUpdate["status"])
if oldStatus == common.SNATCHED:
newStatus = common.Quality.compositeStatus(common.SNATCHED, common.Quality.UNKNOWN)
elif oldStatus == PREDOWNLOADED:
newStatus = common.Quality.compositeStatus(common.DOWNLOADED, common.Quality.SDTV)
elif oldStatus in (MISSED, BACKLOG, DISCBACKLOG):
newStatus = common.WANTED
elif oldStatus == SNATCHED_BACKLOG:
newStatus = common.Quality.compositeStatus(common.SNATCHED, common.Quality.UNKNOWN)
if newStatus != None:
self.connection.action("UPDATE tv_episodes SET status = ? WHERE episode_id = ? ", [newStatus, curUpdate["episode_id"]])
continue
# if we get here status should be == DOWNLOADED
if not curUpdate["location"]:
continue
newQuality = common.Quality.nameQuality(curUpdate["location"])
if newQuality == common.Quality.UNKNOWN:
newQuality = common.Quality.assumeQuality(curUpdate["location"])
self.connection.action("UPDATE tv_episodes SET status = ? WHERE episode_id = ?", [common.Quality.compositeStatus(common.DOWNLOADED, newQuality), curUpdate["episode_id"]])
# if no updates were done then the backup is useless
if didUpdate:
os.remove(db.dbFilename(suffix='v0'))
### Update show qualities
toUpdate = self.connection.select("SELECT * FROM tv_shows")
for curUpdate in toUpdate:
if not curUpdate["quality"]:
continue
if int(curUpdate["quality"]) == HD:
newQuality = common.HD
elif int(curUpdate["quality"]) == SD:
newQuality = common.SD
elif int(curUpdate["quality"]) == ANY:
newQuality = common.ANY
elif int(curUpdate["quality"]) == BEST:
newQuality = common.BEST
else:
logger.log(u"Unknown show quality: " + str(curUpdate["quality"]), logger.WARNING)
newQuality = None
if newQuality:
self.connection.action("UPDATE tv_shows SET quality = ? WHERE show_id = ?", [newQuality, curUpdate["show_id"]])
### Update history
toUpdate = self.connection.select("SELECT * FROM history")
for curUpdate in toUpdate:
newAction = None
newStatus = None
if int(curUpdate["action"] == ACTION_SNATCHED):
newStatus = common.SNATCHED
elif int(curUpdate["action"] == ACTION_DOWNLOADED):
newStatus = common.DOWNLOADED
elif int(curUpdate["action"] == ACTION_PRESNATCHED):
newAction = common.Quality.compositeStatus(common.SNATCHED, common.Quality.SDTV)
if newAction == None and newStatus == None:
continue
if not newAction:
if int(curUpdate["quality"] == HD):
newAction = common.Quality.compositeStatus(newStatus, common.Quality.HDTV)
elif int(curUpdate["quality"] == SD):
newAction = common.Quality.compositeStatus(newStatus, common.Quality.SDTV)
else:
newAction = common.Quality.compositeStatus(newStatus, common.Quality.UNKNOWN)
self.connection.action("UPDATE history SET action = ? WHERE date = ? AND showid = ?", [newAction, curUpdate["date"], curUpdate["showid"]])
self.connection.action("CREATE TABLE db_version (db_version INTEGER);")
self.connection.action("INSERT INTO db_version (db_version) VALUES (?)", [1])
class DropOldHistoryTable(NewQualitySettings):
def test(self):
return self.checkDBVersion() >= 2
def execute(self):
self.connection.action("DROP TABLE history_old")
self.incDBVersion()
class UpgradeHistoryForGenericProviders(DropOldHistoryTable):
def test(self):
return self.checkDBVersion() >= 3
def execute(self):
providerMap = {'NZBs': 'NZBs.org',
'BinReq': 'Bin-Req',
'NZBsRUS': '''NZBs'R'US''',
'EZTV': 'EZTV@BT-Chat'}
for oldProvider in providerMap:
self.connection.action("UPDATE history SET provider = ? WHERE provider = ?", [providerMap[oldProvider], oldProvider])
self.incDBVersion()
class AddAirByDateOption(UpgradeHistoryForGenericProviders):
def test(self):
return self.checkDBVersion() >= 4
def execute(self):
self.connection.action("ALTER TABLE tv_shows ADD air_by_date NUMERIC")
self.incDBVersion()
class ChangeSabConfigFromIpToHost(AddAirByDateOption):
def test(self):
return self.checkDBVersion() >= 5
def execute(self):
sickbeard.SAB_HOST = 'http://' + sickbeard.SAB_HOST + '/sabnzbd/'
self.incDBVersion()
class FixSabHostURL(ChangeSabConfigFromIpToHost):
def test(self):
return self.checkDBVersion() >= 6
def execute(self):
if sickbeard.SAB_HOST.endswith('/sabnzbd/'):
sickbeard.SAB_HOST = sickbeard.SAB_HOST.replace('/sabnzbd/', '/')
sickbeard.save_config()
self.incDBVersion()
class AddLang (FixSabHostURL):
def test(self):
return self.hasColumn("tv_shows", "lang")
def execute(self):
self.addColumn("tv_shows", "lang", "TEXT", "fr")
class AddCustomSearchNames (AddLang):
def test(self):
return self.hasColumn("tv_shows", "custom_search_names")
def execute(self):
self.addColumn("tv_shows", "custom_search_names", "TEXT", "")
class PopulateRootDirs (AddCustomSearchNames):
def test(self):
return self.checkDBVersion() >= 7
def execute(self):
dir_results = self.connection.select("SELECT location FROM tv_shows")
dir_counts = {}
for cur_dir in dir_results:
cur_root_dir = ek.ek(os.path.dirname, ek.ek(os.path.normpath, cur_dir["location"]))
if cur_root_dir not in dir_counts:
dir_counts[cur_root_dir] = 1
else:
dir_counts[cur_root_dir] += 1
logger.log(u"Dir counts: " + str(dir_counts), logger.DEBUG)
if not dir_counts:
self.incDBVersion()
return
default_root_dir = dir_counts.values().index(max(dir_counts.values()))
new_root_dirs = str(default_root_dir) + '|' + '|'.join(dir_counts.keys())
logger.log(u"Setting ROOT_DIRS to: " + new_root_dirs, logger.DEBUG)
sickbeard.ROOT_DIRS = new_root_dirs
sickbeard.save_config()
self.incDBVersion()
class SetNzbTorrentSettings(PopulateRootDirs):
def test(self):
return self.checkDBVersion() >= 8
def execute(self):
use_torrents = False
use_nzbs = False
for cur_provider in sickbeard.providers.sortedProviderList():
if cur_provider.isEnabled():
if cur_provider.providerType == GenericProvider.NZB:
use_nzbs = True
logger.log(u"Provider " + cur_provider.name + " is enabled, enabling NZBs in the upgrade")
break
elif cur_provider.providerType == GenericProvider.TORRENT:
use_torrents = True
logger.log(u"Provider " + cur_provider.name + " is enabled, enabling Torrents in the upgrade")
break
sickbeard.USE_TORRENTS = use_torrents
sickbeard.USE_NZBS = use_nzbs
sickbeard.save_config()
self.incDBVersion()
class FixAirByDateSetting(SetNzbTorrentSettings):
def test(self):
return self.checkDBVersion() >= 9
def execute(self):
shows = self.connection.select("SELECT * FROM tv_shows")
for cur_show in shows:
if cur_show["genre"] and "talk show" in cur_show["genre"].lower():
self.connection.action("UPDATE tv_shows SET air_by_date = ? WHERE tvdb_id = ?", [1, cur_show["tvdb_id"]])
self.incDBVersion()
class AddAudioLang (FixAirByDateSetting):
def test(self):
return self.hasColumn("tv_shows", "audio_lang")
def execute(self):
self.addColumn("tv_shows", "audio_lang", "TEXT", "fr")
class AddShowLangsToEpisode (AddAudioLang):
def test(self):
return self.hasColumn("tv_episodes", "audio_langs")
def execute(self):
self.addColumn("tv_episodes", "audio_langs", "TEXT", "")
class AddSizeAndSceneNameFields(AddShowLangsToEpisode):
def test(self):
return self.checkDBVersion() >= 10
def execute(self):
backupDatabase(10)
if not self.hasColumn("tv_episodes", "file_size"):
self.addColumn("tv_episodes", "file_size")
if not self.hasColumn("tv_episodes", "release_name"):
self.addColumn("tv_episodes", "release_name", "TEXT", "")
ep_results = self.connection.select("SELECT episode_id, location, file_size FROM tv_episodes")
logger.log(u"Adding file size to all episodes in DB, please be patient")
for cur_ep in ep_results:
if not cur_ep["location"]:
continue
# if there is no size yet then populate it for us
if (not cur_ep["file_size"] or not int(cur_ep["file_size"])) and ek.ek(os.path.isfile, cur_ep["location"]):
cur_size = ek.ek(os.path.getsize, cur_ep["location"])
self.connection.action("UPDATE tv_episodes SET file_size = ? WHERE episode_id = ?", [cur_size, int(cur_ep["episode_id"])])
# check each snatch to see if we can use it to get a release name from
history_results = self.connection.select("SELECT * FROM history WHERE provider != -1 ORDER BY date ASC")
logger.log(u"Adding release name to all episodes still in history")
for cur_result in history_results:
# find the associated download, if there isn't one then ignore it
download_results = self.connection.select("SELECT resource FROM history WHERE provider = -1 AND showid = ? AND season = ? AND episode = ? AND date > ?",
[cur_result["showid"], cur_result["season"], cur_result["episode"], cur_result["date"]])
if not download_results:
logger.log(u"Found a snatch in the history for " + cur_result["resource"] + " but couldn't find the associated download, skipping it", logger.DEBUG)
continue
nzb_name = cur_result["resource"]
file_name = ek.ek(os.path.basename, download_results[0]["resource"])
# take the extension off the filename, it's not needed
if '.' in file_name:
file_name = file_name.rpartition('.')[0]
# find the associated episode on disk
ep_results = self.connection.select("SELECT episode_id, status FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ? AND location != ''",
[cur_result["showid"], cur_result["season"], cur_result["episode"]])
if not ep_results:
logger.log(u"The episode " + nzb_name + " was found in history but doesn't exist on disk anymore, skipping", logger.DEBUG)
continue
# get the status/quality of the existing ep and make sure it's what we expect
ep_status, ep_quality = common.Quality.splitCompositeStatus(int(ep_results[0]["status"]))
if ep_status != common.DOWNLOADED:
continue
if ep_quality != int(cur_result["quality"]):
continue
# make sure this is actually a real release name and not a season pack or something
for cur_name in (nzb_name, file_name):
logger.log(u"Checking if " + cur_name + " is actually a good release name", logger.DEBUG)
try:
np = NameParser(False)
parse_result = np.parse(cur_name)
except InvalidNameException:
continue
if parse_result.series_name and parse_result.season_number != None and parse_result.episode_numbers and parse_result.release_group:
# if all is well by this point we'll just put the release name into the database
self.connection.action("UPDATE tv_episodes SET release_name = ? WHERE episode_id = ?", [cur_name, ep_results[0]["episode_id"]])
break
# check each snatch to see if we can use it to get a release name from
empty_results = self.connection.select("SELECT episode_id, location FROM tv_episodes WHERE release_name = ''")
logger.log(u"Adding release name to all episodes with obvious scene filenames")
for cur_result in empty_results:
ep_file_name = ek.ek(os.path.basename, cur_result["location"])
ep_file_name = os.path.splitext(ep_file_name)[0]
# only want to find real scene names here so anything with a space in it is out
if ' ' in ep_file_name:
continue
try:
np = NameParser(False)
parse_result = np.parse(ep_file_name)
except InvalidNameException:
continue
if not parse_result.release_group:
continue
logger.log(u"Name " + ep_file_name + " gave release group of " + parse_result.release_group + ", seems valid", logger.DEBUG)
self.connection.action("UPDATE tv_episodes SET release_name = ? WHERE episode_id = ?", [ep_file_name, cur_result["episode_id"]])
self.incDBVersion()
class RenameSeasonFolders(AddSizeAndSceneNameFields):
def test(self):
return self.checkDBVersion() >= 11
def execute(self):
# rename the column
self.connection.action("ALTER TABLE tv_shows RENAME TO tmp_tv_shows")
self.connection.action("CREATE TABLE tv_shows (show_id INTEGER PRIMARY KEY, location TEXT, show_name TEXT, tvdb_id NUMERIC, network TEXT, genre TEXT, runtime NUMERIC, quality NUMERIC, airs TEXT, status TEXT, flatten_folders NUMERIC, paused NUMERIC, startyear NUMERIC, tvr_id NUMERIC, tvr_name TEXT, air_by_date NUMERIC, lang TEXT, custom_search_names TEXT, audio_lang TEXT)")
sql = "INSERT INTO tv_shows(show_id, location, show_name, tvdb_id, network, genre, runtime, quality, airs, status, flatten_folders, paused, startyear, tvr_id, tvr_name, air_by_date, lang, custom_search_names, audio_lang) SELECT show_id, location, show_name, tvdb_id, network, genre, runtime, quality, airs, status, seasonfolders, paused, startyear, tvr_id, tvr_name, air_by_date, lang, custom_search_names, audio_lang FROM tmp_tv_shows"
self.connection.action(sql)
# flip the values to be opposite of what they were before
self.connection.action("UPDATE tv_shows SET flatten_folders = 2 WHERE flatten_folders = 1")
self.connection.action("UPDATE tv_shows SET flatten_folders = 1 WHERE flatten_folders = 0")
self.connection.action("UPDATE tv_shows SET flatten_folders = 0 WHERE flatten_folders = 2")
self.connection.action("DROP TABLE tmp_tv_shows")
self.incDBVersion()
class Add1080pAndRawHDQualities(RenameSeasonFolders):
"""Add support for 1080p related qualities along with RawHD
Quick overview of what the upgrade needs to do:
quality | old | new
--------------------------
hdwebdl | 1<<3 | 1<<5
hdbluray | 1<<4 | 1<<7
fullhdbluray | 1<<5 | 1<<8
--------------------------
rawhdtv | | 1<<3
fullhdtv | | 1<<4
fullhdwebdl | | 1<<6
"""
def test(self):
return self.checkDBVersion() >= 12
def _update_status(self, old_status):
(status, quality) = common.Quality.splitCompositeStatus(old_status)
return common.Quality.compositeStatus(status, self._update_quality(quality))
def _update_quality(self, old_quality):
"""Update bitwise flags to reflect new quality values
Check flag bits (clear old then set their new locations) starting
with the highest bits so we dont overwrite data we need later on
"""
result = old_quality
# move fullhdbluray from 1<<5 to 1<<8 if set
if(result & (1<<5)):
result = result & ~(1<<5)
result = result | (1<<8)
# move hdbluray from 1<<4 to 1<<7 if set
if(result & (1<<4)):
result = result & ~(1<<4)
result = result | (1<<7)
# move hdwebdl from 1<<3 to 1<<5 if set
if(result & (1<<3)):
result = result & ~(1<<3)
result = result | (1<<5)
return result
def _update_composite_qualities(self, status):
"""Unpack, Update, Return new quality values
Unpack the composite archive/initial values.
Update either qualities if needed.
Then return the new compsite quality value.
"""
best = (status & (0xffff << 16)) >> 16
initial = status & (0xffff)
best = self._update_quality(best)
initial = self._update_quality(initial)
result = ((best << 16) | initial)
return result
def execute(self):
backupDatabase(self.checkDBVersion())
# update the default quality so we dont grab the wrong qualities after migration
sickbeard.QUALITY_DEFAULT = self._update_composite_qualities(sickbeard.QUALITY_DEFAULT)
sickbeard.save_config()
# upgrade previous HD to HD720p -- shift previous qualities to new placevalues
old_hd = common.Quality.combineQualities([common.Quality.HDTV, common.Quality.HDWEBDL >> 2, common.Quality.HDBLURAY >> 3], [])
new_hd = common.Quality.combineQualities([common.Quality.HDTV, common.Quality.HDWEBDL, common.Quality.HDBLURAY], [])
# update ANY -- shift existing qualities and add new 1080p qualities, note that rawHD was not added to the ANY template
old_any = common.Quality.combineQualities([common.Quality.SDTV, common.Quality.SDDVD, common.Quality.HDTV, common.Quality.HDWEBDL >> 2, common.Quality.HDBLURAY >> 3, common.Quality.UNKNOWN], [])
new_any = common.Quality.combineQualities([common.Quality.SDTV, common.Quality.SDDVD, common.Quality.HDTV, common.Quality.FULLHDTV, common.Quality.HDWEBDL, common.Quality.FULLHDWEBDL, common.Quality.HDBLURAY, common.Quality.FULLHDBLURAY, common.Quality.UNKNOWN], [])
# update qualities (including templates)
logger.log(u"[1/4] Updating pre-defined templates and the quality for each show...", logger.MESSAGE)
ql = []
shows = self.connection.select("SELECT * FROM tv_shows")
for cur_show in shows:
if cur_show["quality"] == old_hd:
new_quality = new_hd
elif cur_show["quality"] == old_any:
new_quality = new_any
else:
new_quality = self._update_composite_qualities(cur_show["quality"])
ql.append(["UPDATE tv_shows SET quality = ? WHERE show_id = ?", [new_quality, cur_show["show_id"]]])
self.connection.mass_action(ql)
# update status that are are within the old hdwebdl (1<<3 which is 8) and better -- exclude unknown (1<<15 which is 32768)
logger.log(u"[2/4] Updating the status for the episodes within each show...", logger.MESSAGE)
ql = []
episodes = self.connection.select("SELECT * FROM tv_episodes WHERE status < 3276800 AND status >= 800")
for cur_episode in episodes:
ql.append(["UPDATE tv_episodes SET status = ? WHERE episode_id = ?", [self._update_status(cur_episode["status"]), cur_episode["episode_id"]]])
self.connection.mass_action(ql)
# make two seperate passes through the history since snatched and downloaded (action & quality) may not always coordinate together
# update previous history so it shows the correct action
logger.log(u"[3/4] Updating history to reflect the correct action...", logger.MESSAGE)
ql = []
historyAction = self.connection.select("SELECT * FROM history WHERE action < 3276800 AND action >= 800")
for cur_entry in historyAction:
ql.append(["UPDATE history SET action = ? WHERE showid = ? AND date = ?", [self._update_status(cur_entry["action"]), cur_entry["showid"], cur_entry["date"]]])
self.connection.mass_action(ql)
# update previous history so it shows the correct quality
logger.log(u"[4/4] Updating history to reflect the correct quality...", logger.MESSAGE)
ql = []
historyQuality = self.connection.select("SELECT * FROM history WHERE quality < 32768 AND quality >= 8")
for cur_entry in historyQuality:
ql.append(["UPDATE history SET quality = ? WHERE showid = ? AND date = ?", [self._update_quality(cur_entry["quality"]), cur_entry["showid"], cur_entry["date"]]])
self.connection.mass_action(ql)
self.incDBVersion()
# cleanup and reduce db if any previous data was removed
logger.log(u"Performing a vacuum on the database.", logger.DEBUG)
self.connection.action("VACUUM")
class AddSubtitlesSupport(Add1080pAndRawHDQualities):
def test(self):
return self.checkDBVersion() >= 13
def execute(self):
self.addColumn("tv_shows", "subtitles")
self.addColumn("tv_episodes", "subtitles", "TEXT", "")
self.addColumn("tv_episodes", "subtitles_searchcount")
self.addColumn("tv_episodes", "subtitles_lastsearch", "TIMESTAMP", str(datetime.datetime.min))
self.incDBVersion()
class AddEpisodeLinkTable(AddSubtitlesSupport):
def test(self):
return self.checkDBVersion() >= 14
def execute(self):
if self.hasTable("episode_links") != True:
self.connection.action("CREATE TABLE episode_links (episode_id INTEGER, link TEXT)")
self.incDBVersion()
class AddIMDbInfo(AddEpisodeLinkTable):
def test(self):
return self.checkDBVersion() >= 15
def execute(self):
if self.hasTable("imdb_info") != True:
self.connection.action("CREATE TABLE imdb_info (tvdb_id INTEGER PRIMARY KEY, imdb_id TEXT, title TEXT, year NUMERIC, akas TEXT, runtimes NUMERIC, genres TEXT, countries TEXT, country_codes TEXT, certificates TEXT, rating TEXT, votes INTEGER, last_update NUMERIC)")
self.incDBVersion()
class AddProcessedFilesTable(AddIMDbInfo):
def test(self):
return self.checkDBVersion() >= 16
def execute(self):
if self.hasTable("processed_files") != True:
self.connection.action("CREATE TABLE processed_files (episode_id INTEGER, filename TEXT, md5 TEXT)")
self.incDBVersion()
class AddFrenchSearch (AddProcessedFilesTable):
def test(self):
return self.checkDBVersion() >= 17
def execute(self):
if self.hasColumn("tv_shows", "frenchsearch") != True:
self.addColumn("tv_shows", "frenchsearch", "NUMERIC", 0)
self.incDBVersion()
class AddSubtitlesIncrusted (AddFrenchSearch):
def test(self):
return self.hasColumn("tv_episodes", "embeded_subtitle")
def execute(self):
self.addColumn("tv_episodes", "embeded_subtitle", "TEXT")
class AddSceneNumbers(FixAirByDateSetting):
def test(self):
return self.checkDBVersion() >= 18
def execute(self):
self.addColumn("tv_episodes", "scene_episode", "NUMERIC", "NULL")
self.addColumn("tv_episodes", "scene_season", "NUMERIC", "NULL")
self.incDBVersion()
class AddFrenchTorrentDBHistoryTable(AddSceneNumbers):
def test(self):
return self.checkDBVersion() >= 19
def execute(self):
if self.hasTable("frenchtorrentdb_history") != True:
self.connection.action("CREATE TABLE frenchtorrentdb_history (date TEXT, link TEXT, is_sickbeard BOOLEAN)")
self.incDBVersion()
| Pakoach/Sick-Beard | sickbeard/databases/mainDB.py | Python | gpl-3.0 | 33,825 | 0.005026 |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for attention functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import tensorflow as tf
import numpy as np
from seq2seq.decoders.attention import AttentionLayerDot
from seq2seq.decoders.attention import AttentionLayerBahdanau
class AttentionLayerTest(tf.test.TestCase):
"""
Tests the AttentionLayer module.
"""
def setUp(self):
super(AttentionLayerTest, self).setUp()
tf.logging.set_verbosity(tf.logging.INFO)
self.batch_size = 8
self.attention_dim = 128
self.input_dim = 16
self.seq_len = 10
self.state_dim = 32
def _create_layer(self):
"""Creates the attention layer. Should be implemented by child classes"""
raise NotImplementedError
def _test_layer(self):
"""Tests Attention layer with a given score type"""
inputs_pl = tf.placeholder(tf.float32, (None, None, self.input_dim))
inputs_length_pl = tf.placeholder(tf.int32, [None])
state_pl = tf.placeholder(tf.float32, (None, self.state_dim))
attention_fn = self._create_layer()
scores, context = attention_fn(
query=state_pl,
keys=inputs_pl,
values=inputs_pl,
values_length=inputs_length_pl)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
feed_dict = {}
feed_dict[inputs_pl] = np.random.randn(self.batch_size, self.seq_len,
self.input_dim)
feed_dict[state_pl] = np.random.randn(self.batch_size, self.state_dim)
feed_dict[inputs_length_pl] = np.arange(self.batch_size) + 1
scores_, context_ = sess.run([scores, context], feed_dict)
np.testing.assert_array_equal(scores_.shape,
[self.batch_size, self.seq_len])
np.testing.assert_array_equal(context_.shape,
[self.batch_size, self.input_dim])
for idx, batch in enumerate(scores_, 1):
# All scores that are padded should be zero
np.testing.assert_array_equal(batch[idx:], np.zeros_like(batch[idx:]))
# Scores should sum to 1
scores_sum = np.sum(scores_, axis=1)
np.testing.assert_array_almost_equal(scores_sum, np.ones([self.batch_size]))
class AttentionLayerDotTest(AttentionLayerTest):
"""Tests the AttentionLayerDot class"""
def _create_layer(self):
return AttentionLayerDot(
params={"num_units": self.attention_dim},
mode=tf.contrib.learn.ModeKeys.TRAIN)
def test_layer(self):
self._test_layer()
class AttentionLayerBahdanauTest(AttentionLayerTest):
"""Tests the AttentionLayerBahdanau class"""
def _create_layer(self):
return AttentionLayerBahdanau(
params={"num_units": self.attention_dim},
mode=tf.contrib.learn.ModeKeys.TRAIN)
def test_layer(self):
self._test_layer()
if __name__ == "__main__":
tf.test.main()
| shashankrajput/seq2seq | seq2seq/test/attention_test.py | Python | apache-2.0 | 3,532 | 0.005663 |
# -*- coding: utf-8 -*-
from converters.circle import circle
from converters.currency import currency
from converters.electric import electric
from converters.force import force
from converters.pressure import pressure
from converters.speed import speed
from converters.temperature import temperature
class UnitsManager(object):
'''
Class responsible to manage the unit converters
of this application.
'''
_units = [
circle,
currency,
electric,
force,
pressure,
speed,
temperature,
]
def __iter__(self):
return (x for x in self._units)
def register(self, converter):
"""
Method that receives a new converter and adds it to
this manager.
Useful to add custom new methods without needing to edit
the core of this application.
"""
if converter is not None and callable(converter):
self._units.append(converter)
UNITS = UnitsManager()
| mattgd/UnitConverter | units/__init__.py | Python | mit | 1,001 | 0 |
# Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A very very simple mock object harness."""
from types import ModuleType
DONT_CARE = ''
class MockFunctionCall(object):
def __init__(self, name):
self.name = name
self.args = tuple()
self.return_value = None
self.when_called_handlers = []
def WithArgs(self, *args):
self.args = args
return self
def WillReturn(self, value):
self.return_value = value
return self
def WhenCalled(self, handler):
self.when_called_handlers.append(handler)
def VerifyEquals(self, got):
if self.name != got.name:
raise Exception('Self %s, got %s' % (repr(self), repr(got)))
if len(self.args) != len(got.args):
raise Exception('Self %s, got %s' % (repr(self), repr(got)))
for i in range(len(self.args)):
self_a = self.args[i]
got_a = got.args[i]
if self_a == DONT_CARE:
continue
if self_a != got_a:
raise Exception('Self %s, got %s' % (repr(self), repr(got)))
def __repr__(self):
def arg_to_text(a):
if a == DONT_CARE:
return '_'
return repr(a)
args_text = ', '.join([arg_to_text(a) for a in self.args])
if self.return_value in (None, DONT_CARE):
return '%s(%s)' % (self.name, args_text)
return '%s(%s)->%s' % (self.name, args_text, repr(self.return_value))
class MockTrace(object):
def __init__(self):
self.expected_calls = []
self.next_call_index = 0
class MockObject(object):
def __init__(self, parent_mock=None):
if parent_mock:
self._trace = parent_mock._trace # pylint: disable=protected-access
else:
self._trace = MockTrace()
def __setattr__(self, name, value):
if (not hasattr(self, '_trace') or
hasattr(value, 'is_hook')):
object.__setattr__(self, name, value)
return
assert isinstance(value, MockObject)
object.__setattr__(self, name, value)
def SetAttribute(self, name, value):
setattr(self, name, value)
def ExpectCall(self, func_name, *args):
assert self._trace.next_call_index == 0
if not hasattr(self, func_name):
self._install_hook(func_name)
call = MockFunctionCall(func_name)
self._trace.expected_calls.append(call)
call.WithArgs(*args)
return call
def _install_hook(self, func_name):
def handler(*args, **_):
got_call = MockFunctionCall(
func_name).WithArgs(*args).WillReturn(DONT_CARE)
if self._trace.next_call_index >= len(self._trace.expected_calls):
raise Exception(
'Call to %s was not expected, at end of programmed trace.' %
repr(got_call))
expected_call = self._trace.expected_calls[
self._trace.next_call_index]
expected_call.VerifyEquals(got_call)
self._trace.next_call_index += 1
for h in expected_call.when_called_handlers:
h(*args)
return expected_call.return_value
handler.is_hook = True
setattr(self, func_name, handler)
class MockTimer(object):
""" A mock timer to fake out the timing for a module.
Args:
module: module to fake out the time
"""
def __init__(self, module=None):
self._elapsed_time = 0
self._module = module
self._actual_time = None
if module:
assert isinstance(module, ModuleType)
self._actual_time = module.time
self._module.time = self
def sleep(self, time):
self._elapsed_time += time
def time(self):
return self._elapsed_time
def SetTime(self, time):
self._elapsed_time = time
def __del__(self):
self.Restore()
def Restore(self):
if self._module:
self._module.time = self._actual_time
self._module = None
self._actual_time = None
| hujiajie/chromium-crosswalk | tools/telemetry/telemetry/testing/simple_mock.py | Python | bsd-3-clause | 3,810 | 0.013386 |
# Natural Language Toolkit: Chunked Corpus Reader
#
# Copyright (C) 2001-2015 NLTK Project
# Author: Steven Bird <stevenbird1@gmail.com>
# Edward Loper <edloper@gmail.com>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
A reader for corpora that contain chunked (and optionally tagged)
documents.
"""
import os.path, codecs
import nltk
from nltk.corpus.reader.bracket_parse import BracketParseCorpusReader
from nltk import compat
from nltk.tree import Tree
from nltk.tokenize import *
from nltk.chunk import tagstr2tree
from nltk.corpus.reader.util import *
from nltk.corpus.reader.api import *
class ChunkedCorpusReader(CorpusReader):
"""
Reader for chunked (and optionally tagged) corpora. Paragraphs
are split using a block reader. They are then tokenized into
sentences using a sentence tokenizer. Finally, these sentences
are parsed into chunk trees using a string-to-chunktree conversion
function. Each of these steps can be performed using a default
function or a custom function. By default, paragraphs are split
on blank lines; sentences are listed one per line; and sentences
are parsed into chunk trees using ``nltk.chunk.tagstr2tree``.
"""
def __init__(self, root, fileids, extension='',
str2chunktree=tagstr2tree,
sent_tokenizer=RegexpTokenizer('\n', gaps=True),
para_block_reader=read_blankline_block,
encoding='utf8'):
"""
:param root: The root directory for this corpus.
:param fileids: A list or regexp specifying the fileids in this corpus.
"""
CorpusReader.__init__(self, root, fileids, encoding)
self._cv_args = (str2chunktree, sent_tokenizer, para_block_reader)
"""Arguments for corpus views generated by this corpus: a tuple
(str2chunktree, sent_tokenizer, para_block_tokenizer)"""
def raw(self, fileids=None):
"""
:return: the given file(s) as a single string.
:rtype: str
"""
if fileids is None: fileids = self._fileids
elif isinstance(fileids, compat.string_types): fileids = [fileids]
return concat([self.open(f).read() for f in fileids])
def words(self, fileids=None):
"""
:return: the given file(s) as a list of words
and punctuation symbols.
:rtype: list(str)
"""
return concat([ChunkedCorpusView(f, enc, 0, 0, 0, 0, *self._cv_args)
for (f, enc) in self.abspaths(fileids, True)])
def sents(self, fileids=None):
"""
:return: the given file(s) as a list of
sentences or utterances, each encoded as a list of word
strings.
:rtype: list(list(str))
"""
return concat([ChunkedCorpusView(f, enc, 0, 1, 0, 0, *self._cv_args)
for (f, enc) in self.abspaths(fileids, True)])
def paras(self, fileids=None):
"""
:return: the given file(s) as a list of
paragraphs, each encoded as a list of sentences, which are
in turn encoded as lists of word strings.
:rtype: list(list(list(str)))
"""
return concat([ChunkedCorpusView(f, enc, 0, 1, 1, 0, *self._cv_args)
for (f, enc) in self.abspaths(fileids, True)])
def tagged_words(self, fileids=None):
"""
:return: the given file(s) as a list of tagged
words and punctuation symbols, encoded as tuples
``(word,tag)``.
:rtype: list(tuple(str,str))
"""
return concat([ChunkedCorpusView(f, enc, 1, 0, 0, 0, *self._cv_args)
for (f, enc) in self.abspaths(fileids, True)])
def tagged_sents(self, fileids=None):
"""
:return: the given file(s) as a list of
sentences, each encoded as a list of ``(word,tag)`` tuples.
:rtype: list(list(tuple(str,str)))
"""
return concat([ChunkedCorpusView(f, enc, 1, 1, 0, 0, *self._cv_args)
for (f, enc) in self.abspaths(fileids, True)])
def tagged_paras(self, fileids=None):
"""
:return: the given file(s) as a list of
paragraphs, each encoded as a list of sentences, which are
in turn encoded as lists of ``(word,tag)`` tuples.
:rtype: list(list(list(tuple(str,str))))
"""
return concat([ChunkedCorpusView(f, enc, 1, 1, 1, 0, *self._cv_args)
for (f, enc) in self.abspaths(fileids, True)])
def chunked_words(self, fileids=None):
"""
:return: the given file(s) as a list of tagged
words and chunks. Words are encoded as ``(word, tag)``
tuples (if the corpus has tags) or word strings (if the
corpus has no tags). Chunks are encoded as depth-one
trees over ``(word,tag)`` tuples or word strings.
:rtype: list(tuple(str,str) and Tree)
"""
return concat([ChunkedCorpusView(f, enc, 1, 0, 0, 1, *self._cv_args)
for (f, enc) in self.abspaths(fileids, True)])
def chunked_sents(self, fileids=None):
"""
:return: the given file(s) as a list of
sentences, each encoded as a shallow Tree. The leaves
of these trees are encoded as ``(word, tag)`` tuples (if
the corpus has tags) or word strings (if the corpus has no
tags).
:rtype: list(Tree)
"""
return concat([ChunkedCorpusView(f, enc, 1, 1, 0, 1, *self._cv_args)
for (f, enc) in self.abspaths(fileids, True)])
def chunked_paras(self, fileids=None):
"""
:return: the given file(s) as a list of
paragraphs, each encoded as a list of sentences, which are
in turn encoded as a shallow Tree. The leaves of these
trees are encoded as ``(word, tag)`` tuples (if the corpus
has tags) or word strings (if the corpus has no tags).
:rtype: list(list(Tree))
"""
return concat([ChunkedCorpusView(f, enc, 1, 1, 1, 1, *self._cv_args)
for (f, enc) in self.abspaths(fileids, True)])
def _read_block(self, stream):
return [tagstr2tree(t) for t in read_blankline_block(stream)]
class ChunkedCorpusView(StreamBackedCorpusView):
def __init__(self, fileid, encoding, tagged, group_by_sent,
group_by_para, chunked, str2chunktree, sent_tokenizer,
para_block_reader):
StreamBackedCorpusView.__init__(self, fileid, encoding=encoding)
self._tagged = tagged
self._group_by_sent = group_by_sent
self._group_by_para = group_by_para
self._chunked = chunked
self._str2chunktree = str2chunktree
self._sent_tokenizer = sent_tokenizer
self._para_block_reader = para_block_reader
def read_block(self, stream):
block = []
for para_str in self._para_block_reader(stream):
para = []
for sent_str in self._sent_tokenizer.tokenize(para_str):
sent = self._str2chunktree(sent_str)
# If requested, throw away the tags.
if not self._tagged:
sent = self._untag(sent)
# If requested, throw away the chunks.
if not self._chunked:
sent = sent.leaves()
# Add the sentence to `para`.
if self._group_by_sent:
para.append(sent)
else:
para.extend(sent)
# Add the paragraph to `block`.
if self._group_by_para:
block.append(para)
else:
block.extend(para)
# Return the block
return block
def _untag(self, tree):
for i, child in enumerate(tree):
if isinstance(child, Tree):
self._untag(child)
elif isinstance(child, tuple):
tree[i] = child[0]
else:
raise ValueError('expected child to be Tree or tuple')
return tree
| Reagankm/KnockKnock | venv/lib/python3.4/site-packages/nltk/corpus/reader/chunked.py | Python | gpl-2.0 | 8,206 | 0.000731 |
import datetime
from dateutil import parser
from .numbers import is_number
from .strings import STRING_TYPES
DATE_TYPES = (datetime.date, datetime.datetime)
def parse_dates(d, default='today'):
""" Parses one or more dates from d """
if default == 'today':
default = datetime.datetime.today()
if d is None:
return default
elif isinstance(d, DATE_TYPES):
return d
elif is_number(d):
# Treat as milliseconds since 1970
d = d if isinstance(d, float) else float(d)
return datetime.datetime.utcfromtimestamp(d)
elif not isinstance(d, STRING_TYPES):
if hasattr(d, '__iter__'):
return [parse_dates(s, default) for s in d]
else:
return default
elif len(d) == 0:
# Behaves like dateutil.parser < version 2.5
return default
else:
try:
return parser.parse(d)
except (AttributeError, ValueError):
return default
| consbio/parserutils | parserutils/dates.py | Python | bsd-3-clause | 989 | 0 |
#!/usr/bin/python
import argparse
import requests,json
from requests.auth import HTTPBasicAuth
from subprocess import call
import time
import sys
import os
from vas_config_sw1 import *
DEFAULT_PORT='8181'
USERNAME='admin'
PASSWORD='admin'
OPER_OVSDB_TOPO='/restconf/operational/network-topology:network-topology/topology/ovsdb:1'
def get(host, port, uri):
url = 'http://' + host + ":" + port + uri
#print url
r = requests.get(url, auth=HTTPBasicAuth(USERNAME, PASSWORD))
jsondata=json.loads(r.text)
return jsondata
def put(host, port, uri, data, debug=False):
'''Perform a PUT rest operation, using the URL and data provided'''
url='http://'+host+":"+port+uri
headers = {'Content-type': 'application/yang.data+json',
'Accept': 'application/yang.data+json'}
if debug == True:
print "PUT %s" % url
print json.dumps(data, indent=4, sort_keys=True)
r = requests.put(url, data=json.dumps(data), headers=headers, auth=HTTPBasicAuth(USERNAME, PASSWORD))
if debug == True:
print r.text
r.raise_for_status()
def post(host, port, uri, data, debug=False):
'''Perform a POST rest operation, using the URL and data provided'''
url='http://'+host+":"+port+uri
headers = {'Content-type': 'application/yang.data+json',
'Accept': 'application/yang.data+json'}
if debug == True:
print "POST %s" % url
print json.dumps(data, indent=4, sort_keys=True)
r = requests.post(url, data=json.dumps(data), headers=headers, auth=HTTPBasicAuth(USERNAME, PASSWORD))
if debug == True:
print r.text
r.raise_for_status()
# Main definition - constants
# =======================
# MENUS FUNCTIONS
# =======================
# Main menu
# =======================
# MAIN PROGRAM
# =======================
# Main Program
NODE_ID_OVSDB = ''
SUBNET_2_LSW = {"10.0.35.1":"vswitch-1", "10.0.36.1":"vswitch-1"}
PORTIDX_OF_LSW = {"vswitch-1":1, "vswitch-2":1}
def rpc_create_logic_switch_uri():
return "/restconf/operations/fabric-service:create-logical-switch"
def rpc_create_logic_switch_data(name):
return {
"input" : {
"fabric-id": "fabric:1",
"name":name
}
}
def rpc_create_logic_router_uri():
return "/restconf/operations/fabric-service:create-logical-router"
def rpc_create_logic_router_data(name):
return {
"input" : {
"fabric-id": "fabric:1",
"name":name
}
}
def rpc_create_logic_port_uri():
return "/restconf/operations/fabric-service:create-logical-port"
def rpc_create_logic_port_data(deviceName, portName):
return {
"input" : {
"fabric-id": "fabric:1",
"name":portName,
"logical-device":deviceName
}
}
def rpc_register_endpoint_uri():
return "/restconf/operations/fabric-endpoint:register-endpoint"
BRIDGE_REF_P="/network-topology:network-topology/network-topology:topology[network-topology:topology-id='ovsdb:1']/network-topology:node[network-topology:node-id='%s']"
TP_REF_P="/network-topology:network-topology/network-topology:topology[network-topology:topology-id='ovsdb:1']/network-topology:node[network-topology:node-id='%s']/network-topology:termination-point[network-topology:tp-id='%s']"
def rpc_register_endpoint_data(host, nodeid):
mac = host["mac"]
ip = host["ip"].split("/")[0]
gw = host["gw"]
lsw = SUBNET_2_LSW[gw]
lport = lsw + "-p-" + str(PORTIDX_OF_LSW[lsw])
PORTIDX_OF_LSW[lsw] += 1
#physical location
bridge = host["switch"]
port = host["switch"] + "-eth" + str(host["ofport"])
noderef = BRIDGE_REF_P % (nodeid)
tpref = TP_REF_P % (nodeid, port)
return {
"input" : {
"fabric-id":"fabric:1",
"mac-address":mac,
"ip-address":ip,
"gateway":gw,
"logical-location" : {
"node-id": lsw,
"tp-id": lport
},
"location" : {
"node-ref": noderef,
"tp-ref": tpref,
"access-type":"vlan",
"access-segment":"111"
}
}
}
def rpc_create_gateway_uri():
return "/restconf/operations/fabric-service:create-gateway"
def rpc_create_gateway_data(ipaddr, network, switchName):
return {
"input" : {
"fabric-id": "fabric:1",
"ip-address":ipaddr,
"network":network,
"logical-router":"vrouter-1",
"logical-switch":switchName
}
}
def pause():
print "press Enter key to continue..."
raw_input()
if __name__ == "__main__":
# Launch main menu
# Some sensible defaults
controller = os.environ.get('ODL')
if controller == None:
sys.exit("No controller set.")
print "get ovsdb node-id"
ovsdb_topo = get(controller, DEFAULT_PORT,OPER_OVSDB_TOPO)["topology"]
for topo_item in ovsdb_topo:
if topo_item["node"] is not None:
for ovsdb_node in topo_item["node"]:
#if ovsdb_node.has_key("ovsdb:ovs-version"):
if ovsdb_node.has_key("ovsdb:bridge-name") and ovsdb_node["ovsdb:bridge-name"] == "sw1":
#uuid_ovsdb = ovsdb_node["node-id"][13:]
#NODE_ID_OVSDB = ovsdb_node["node-id"]
node_sw1 = ovsdb_node["node-id"]
print "sw1=", node_sw1
if ovsdb_node.has_key("ovsdb:bridge-name") and ovsdb_node["ovsdb:bridge-name"] == "sw2":
node_sw2 = ovsdb_node["node-id"]
print "sw2=", node_sw2
print "create_logic_switch ..."
pause()
post(controller, DEFAULT_PORT, rpc_create_logic_switch_uri(), rpc_create_logic_switch_data("vswitch-1"), True)
print "create_logic_port ..."
pause()
post(controller, DEFAULT_PORT, rpc_create_logic_port_uri(), rpc_create_logic_port_data("vswitch-1", "vswitch-1-p-1"), True)
post(controller, DEFAULT_PORT, rpc_create_logic_port_uri(), rpc_create_logic_port_data("vswitch-1", "vswitch-1-p-2"), True)
post(controller, DEFAULT_PORT, rpc_create_logic_port_uri(), rpc_create_logic_port_data("vswitch-1", "vswitch-1-p-3"), True)
post(controller, DEFAULT_PORT, rpc_create_logic_port_uri(), rpc_create_logic_port_data("vswitch-1", "vswitch-1-p-4"), True)
post(controller, DEFAULT_PORT, rpc_create_logic_port_uri(), rpc_create_logic_port_data("vswitch-1", "vswitch-1-p-5"), True)
print "registering endpoints ..."
pause()
for host in hosts:
if host["switch"] == "sw1":
post(controller, DEFAULT_PORT, rpc_register_endpoint_uri(), rpc_register_endpoint_data(host, node_sw1), True)
if host["switch"] == "sw2":
post(controller, DEFAULT_PORT, rpc_register_endpoint_uri(), rpc_register_endpoint_data(host, node_sw2), True)
| opendaylight/faas | demos/env_mininet/lsw1Demo.py | Python | epl-1.0 | 6,912 | 0.012297 |
from examples.isomorph import (
get_all_canonicals,
get_canonical,
get_translation_dict,
)
from pokertools import cards_from_str as flop
def test_isomorph():
assert len(get_all_canonicals()) == 1755
assert get_canonical(flop('6s 8d 7c')) == flop('6c 7d 8h')
assert get_translation_dict(flop('6s 8d 7c')) == {'c': 'd', 'd': 'h', 'h': 's', 's': 'c'}
assert get_canonical(flop('Qs Qd 4d')) == flop('4c Qc Qd')
assert get_translation_dict(flop('Qs Qd 4d')) == {'c': 'h', 'd': 'c', 'h': 's', 's': 'd'}
| mjwestcott/PyPokertools | tests/test_isomorph.py | Python | mit | 533 | 0.003752 |
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You may
# obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
import rejviz.tests.utils as tutils
from rejviz import utils
class UtilsTest(tutils.TestCase):
def test_parse_keyvals(self):
expected = {'a': 'b', 'c': 'd'}
self.assertEqual(expected, utils.parse_keyvals("a=b,c=d"))
self.assertEqual(expected, utils.parse_keyvals("a:b/c:d", '/', ':'))
def test_extract_domain_or_image_args(self):
args1 = ['--something', '-d', 'domain', 'somethingelse']
args2 = ['-b', '--something', '-a', 'image', 'somethingelse']
args3 = ['-b', '-c', '--something']
self.assertEqual(['-d', 'domain'],
utils.extract_domain_or_image_args(args1))
self.assertEqual(['-a', 'image'],
utils.extract_domain_or_image_args(args2))
self.assertRaises(ValueError,
utils.extract_domain_or_image_args, args3)
def test_extract_image_args_from_disks(self):
args1 = ['--disk', '/path/to/image,opt1=val1,opt2=val2']
args2 = ['--disk', 'opt1=val1,path=/path/to/image,opt2=val2']
args3 = ['-b', '-c', '--something']
self.assertEqual(['-a', '/path/to/image'],
utils.extract_image_args_from_disks(args1))
self.assertEqual(['-a', '/path/to/image'],
utils.extract_image_args_from_disks(args2))
self.assertRaises(ValueError,
utils.extract_domain_or_image_args, args3)
| jistr/rejviz | rejviz/tests/test_utils.py | Python | apache-2.0 | 1,998 | 0 |
"""Support for HomematicIP Cloud weather devices."""
import logging
from homematicip.aio.device import (
AsyncWeatherSensor, AsyncWeatherSensorPlus, AsyncWeatherSensorPro)
from homematicip.aio.home import AsyncHome
from homeassistant.components.weather import WeatherEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import TEMP_CELSIUS
from homeassistant.core import HomeAssistant
from . import DOMAIN as HMIPC_DOMAIN, HMIPC_HAPID, HomematicipGenericDevice
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up the HomematicIP Cloud weather sensor."""
pass
async def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry,
async_add_entities) -> None:
"""Set up the HomematicIP weather sensor from a config entry."""
home = hass.data[HMIPC_DOMAIN][config_entry.data[HMIPC_HAPID]].home
devices = []
for device in home.devices:
if isinstance(device, AsyncWeatherSensorPro):
devices.append(HomematicipWeatherSensorPro(home, device))
elif isinstance(device, (AsyncWeatherSensor, AsyncWeatherSensorPlus)):
devices.append(HomematicipWeatherSensor(home, device))
if devices:
async_add_entities(devices)
class HomematicipWeatherSensor(HomematicipGenericDevice, WeatherEntity):
"""representation of a HomematicIP Cloud weather sensor plus & basic."""
def __init__(self, home: AsyncHome, device) -> None:
"""Initialize the weather sensor."""
super().__init__(home, device)
@property
def name(self) -> str:
"""Return the name of the sensor."""
return self._device.label
@property
def temperature(self) -> float:
"""Return the platform temperature."""
return self._device.actualTemperature
@property
def temperature_unit(self) -> str:
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def humidity(self) -> int:
"""Return the humidity."""
return self._device.humidity
@property
def wind_speed(self) -> float:
"""Return the wind speed."""
return self._device.windSpeed
@property
def attribution(self) -> str:
"""Return the attribution."""
return "Powered by Homematic IP"
@property
def condition(self) -> str:
"""Return the current condition."""
if hasattr(self._device, "raining") and self._device.raining:
return 'rainy'
if self._device.storm:
return 'windy'
if self._device.sunshine:
return 'sunny'
return ''
class HomematicipWeatherSensorPro(HomematicipWeatherSensor):
"""representation of a HomematicIP weather sensor pro."""
@property
def wind_bearing(self) -> float:
"""Return the wind bearing."""
return self._device.windDirection
| jnewland/home-assistant | homeassistant/components/homematicip_cloud/weather.py | Python | apache-2.0 | 2,991 | 0 |
from bears.yml.RAMLLintBear import RAMLLintBear
from tests.LocalBearTestHelper import verify_local_bear
good_file = """
#%RAML 0.8
title: World Music API
baseUri: http://example.api.com/{version}
version: v1
"""
bad_file = """#%RAML 0.8
title: Failing RAML
version: 1
baseUri: http://example.com
/resource:
description: hello
post:
"""
RAMLLintBearTest = verify_local_bear(RAMLLintBear,
valid_files=(good_file,),
invalid_files=(bad_file,),
tempfile_kwargs={"suffix": ".raml"})
| SanketDG/coala-bears | tests/yml/RAMLLintBearTest.py | Python | agpl-3.0 | 602 | 0 |
import sys
import six
import logging
import ooxml
from ooxml import parse, serialize, importer
logging.basicConfig(filename='ooxml.log', level=logging.INFO)
if len(sys.argv) > 1:
file_name = sys.argv[1]
dfile = ooxml.read_from_file(file_name)
six.print_("\n-[HTML]-----------------------------\n")
six.print_(serialize.serialize(dfile.document))
six.print_("\n-[CSS STYLE]------------------------\n")
six.print_(serialize.serialize_styles(dfile.document))
six.print_("\n-[USED STYLES]----------------------\n")
six.print_(dfile.document.used_styles)
six.print_("\n-[USED FONT SIZES]------------------\n")
six.print_(dfile.document.used_font_size)
| LuoZijun/uOffice | temp/ooxmlx/samples/01_basic/parse.py | Python | gpl-3.0 | 697 | 0.001435 |
from bucket.local import LocalProvider
import config
import statestore
import logging
import os
import threading
import traceback
import messages
from send2trash import send2trash
from worker import BaseWorker
class Download(BaseWorker):
def __init__(self, objectStore, outputQueue):
BaseWorker.__init__(self)
self.objectStore = objectStore
self.outputQueue = outputQueue
self.localStore = LocalProvider()
c = config.Config()
self.localSyncPath = c.get_home_folder()
self.tempDownloadFolder = c.get_temporary_folder()
self.state = statestore.StateStore(c.username)
self.lock = threading.Lock()
self.running = True
self.trashFolder = c.get_trash_folder()
def stop(self):
logging.info('Download::stop')
self.objectStore.stop()
self.running = False
def _get_working_message(self):
return messages.Status('Looking for files to download')
def perform(self):
# get the current directory
#logging.debug('Download::perform')
self.outputQueue.put(self._get_working_message())
files = self.objectStore.list_dir(None)
for f in files:
if not self.running:
break
#logging.debug('f.path = %r' % f.path)
if f.isFolder:
if f.name == self.trashFolder:
# we don't download the trash folder
continue
else:
skipChildren = self.download_folder(f)
# if we deleted a bunch of stuff - it might
# mean our files list is out of wack
# so lets rather just break out - and restart
# next time round
if skipChildren:
logging.info('break')
break
else:
self.download_file(f)
self.outputQueue.put(messages.Status('Local files up to date'))
def download_file(self, f):
localPath = self.get_local_path(f.path)
if not os.path.exists(localPath):
self._set_hadWorkToDo(True)
#logging.debug('does not exist: %s' % localPath)
if self.already_synced_file(f.path):
# if we've already downloaded this file,
# it means we have to delete it remotely!
logging.info('delete remote version of %s' % localPath)
self.delete_remote_file(f.path)
else:
# lets get the file
head, tail = os.path.split(localPath)
self.outputQueue.put(messages.Status('Downloading %s' % tail))
tmpFile = self.get_tmp_filename()
if os.path.exists(tmpFile):
# if a temporary file with the same name
# exists, delete it
os.remove(tmpFile)
self.objectStore.download_object(f.path, tmpFile)
os.rename(tmpFile, localPath)
localMD = self.localStore.get_last_modified_date(localPath)
self.state.markObjectAsSynced(f.path, f.hash, localMD)
self.outputQueue.put(self._get_working_message())
else:
# the file already exists - do we overwrite it?
syncInfo = self.state.getObjectSyncInfo(f.path)
if syncInfo:
localMD = self.localStore.get_last_modified_date(localPath)
if syncInfo.dateModified != localMD:
# the dates differ! we need to calculate the hash!
localFileInfo = self.localStore.get_file_info(localPath)
if localFileInfo.hash != f.hash:
# hmm - ok, if the online one, has the same hash
# as I synced, then it means the local file
# has changed!
if syncInfo.hash == f.hash:
# online and synced have the same version!
# that means the local one has changed
# so we're not downloading anything
# the upload process should handle this
pass
else:
logging.warn('TODO: the files differ - which '
'one do I use?')
else:
# all good - the files are the same
# we can update our local sync info
self.state.markObjectAsSynced(f.path,
localFileInfo.hash,
localMD)
else:
# dates are the same, so we can assume the hash
# hasn't changed
if syncInfo.hash != f.hash:
# if the sync info is the same as the local file
# then it must mean the remote file has changed!
get_file_info = self.localStore.get_file_info
localFileInfo = get_file_info(localPath)
if localFileInfo.hash == syncInfo.hash:
self.replace_file(f, localPath)
else:
logging.info('remote hash: %r' % f.hash)
logging.info('local hash: %r' % localFileInfo.hash)
logging.info('sync hash: %r' % syncInfo.hash)
logging.warn('sync hash differs from local hash!')
else:
# sync hash is same as remote hash, and the file date
# hasn't changed. we assume this to mean, there have
# been no changes
pass
else:
# TODO: we need to do something here!
# the file exists locally, and remotely - but we don't have any
# record of having downloaded it
localFileInfo = self.localStore.get_file_info(localPath)
if localFileInfo.hash == f.hash:
localMD = self.localStore.get_last_modified_date(localPath)
self.state.markObjectAsSynced(f.path,
localFileInfo.hash,
localMD)
else:
# we don't have any history of this file - and the hash
# from local differs from remote! WHAT DO WE DO!
logging.error('TODO: HASH differs! Which is which????: %r'
% f.path)
pass
def replace_file(self, f, localPath):
self._set_hadWorkToDo(True)
head, tail = os.path.split(localPath)
self.outputQueue.put(messages.Status('Downloading %s' % tail))
tmpFile = self.get_tmp_filename()
if os.path.exists(tmpFile):
# if a temporary file with the same name exists, remove it
os.remove(tmpFile)
self.objectStore.download_object(f.path, tmpFile)
send2trash(localPath)
os.rename(tmpFile, localPath)
localMD = self.localStore.get_last_modified_date(localPath)
self.state.markObjectAsSynced(f.path,
f.hash,
localMD)
self.outputQueue.put(self._get_working_message())
def get_tmp_filename(self):
return os.path.join(self.tempDownloadFolder, 'tmpfile')
def download_folder(self, folder):
if not self.running:
# return true, to indicate that children can be skipped
return True
# does the folder exist locally?
#logging.debug('download_folder(%s)' % folder.path)
localPath = self.get_local_path(folder.path)
downloadFolderContents = True
skipChildren = False
if not os.path.exists(localPath):
self._set_hadWorkToDo(True)
# the path exists online, but NOT locally
# we do one of two things, we either
# a) delete it remotely
# if we know for a fact we've already downloaded this folder,
# then it not being here, can only mean we've deleted it
# b) download it
# if we haven't marked this folder as being downloaded,
# then we get it now
if self.already_downloaded_folder(folder.path):
logging.info('we need to delete %r!' % folder.path)
self.delete_remote_folder(folder.path)
downloadFolderContents = False
skipChildren = True
logging.info('done deleting remote folder')
else:
#logging.info('creating: %r' % localPath)
os.makedirs(localPath)
localMD = self.localStore.get_last_modified_date(localPath)
self.state.markObjectAsSynced(folder.path,
None,
localMD)
#logging.info('done creating %r' % localPath)
if downloadFolderContents:
try:
#logging.debug('downloading folder
# 'contents for %s' % folder.path)
files = self.objectStore.list_dir(folder.path)
#logging.debug('got %r files' % len(files))
for f in files:
if folder.path.strip('/') != f.path.strip('/'):
if f.isFolder:
skipChildren = self.download_folder(f)
if skipChildren:
break
else:
self.download_file(f)
except:
logging.error('failed to download %s' % folder.path)
logging.error(traceback.format_exc())
return skipChildren
def get_local_path(self, remote_path):
return os.path.join(self.localSyncPath, remote_path)
def already_downloaded_folder(self, path):
""" Establish if this folder was downloaded before
typical use: the folder doesn't exist locally, but it
does exist remotely - that would imply that if we'd already
downloaded it, it can only be missing if it was deleted, and
thusly, we delete it remotely.
"""
alreadySynced = False
syncInfo = self.state.getObjectSyncInfo(path)
if syncInfo:
# if we have sync info for this path - it means we've
# already download
# or uploaded it
logging.info('we have sync info for %s' % path)
alreadySynced = True
else:
# if we don't have sync info for this path
# - it means we haven't downloaded it yet
#logging.info('no sync info for %s' % path)
pass
return alreadySynced
def already_synced_file(self, path):
""" See: already_downloaded_folder
"""
syncInfo = self.state.getObjectSyncInfo(path)
if syncInfo:
remoteFileInfo = self.objectStore.get_file_info(path)
if remoteFileInfo.hash == syncInfo.hash:
# the hash of the file we synced, is the
# same as the one online.
# this means, we've already synced this file!
return True
return False
else:
return False
def delete_remote_folder(self, path):
logging.info('delete_remote_folder(path = %r)' % path)
# a folder has children - and we need to remove those!
self._set_hadWorkToDo(True)
children = self.objectStore.list_dir(path)
#for child in children:
# logging.info('%s [child] %s' % (path, child.path))
for child in children:
if child.isFolder:
# remove this child folder
self.delete_remote_folder(child.path)
else:
# remove this child file
self.delete_remote_file(child.path)
logging.info('going to attempt to delete: %r' % path)
self.delete_remote_file(path)
def delete_remote_file(self, path):
self._set_hadWorkToDo(True)
logging.info('delete remote file: %s' % path)
head, tail = os.path.split(path)
self.outputQueue.put(messages.Status('Deleting %s' % tail))
self.objectStore.delete_object(path, moveToTrash=True)
self.state.removeObjectSyncRecord(path)
self.outputQueue.put(self._get_working_message())
| Sybrand/digital-panda | panda-tray/download.py | Python | mit | 12,881 | 0.000854 |
# Nessus results viewing tools
#
# Developed by Felix Ingram, f.ingram@gmail.com, @lllamaboy
# http://www.github.com/nccgroup/lapith
#
# Released under AGPL. See LICENSE for more information
import wx
import os
from model.Nessus import NessusFile, NessusTreeItem, MergedNessusReport, NessusReport, NessusItem
import difflib
from drop_target import MyFileDropTarget
from view import (
ViewerView,
SaveDialog,
ID_Load_Files,
ID_Merge_Files,
ID_Generate_CSV,
ID_Generate_VulnXML,
ID_Generate_RST,
ID_About,
)
from wx.lib.wordwrap import wordwrap
import csv
from xml.sax.saxutils import escape
from datetime import datetime
from jinja2 import Template
SEVERITY = {0:"Other", 1:"Low", 2:"Med", 3:"High", 4:"Critical"}
OUTPUT_TEMPLATE=Template("""\
{{item.name}}
{{hosts_count}} hosts with this issue
{% for host in hosts %}
{{host}}{% endfor %}
---------------------------------------------
{% for host in identical_hosts %}
{{host}}{% endfor %}
{{ initial_output }}
""")
RST_TEMPLATE=Template("""\
{%- for vuln in vulns %}{% if not vuln.name.startswith("PORT:") %}{{ vuln.name }}
{% for a in vuln.name %}={% endfor %}
.. affectedhosts::{% for host in merged_scans.hosts_with_pid(vuln.pid) %}{% for item in host.items_for_pid(vuln.pid) %}
{{ host.address }}, {{ item.info_dict.port }}/{{ item.info_dict.protocol }}
{%- endfor %}{%- endfor %}
:severity:`{{ vuln.item.info_dict["severity_text"] }}`
:cvss:`{{ vuln.item.info_dict["cvss_base_score"] }}`
:cvss:`{{ vuln.item.info_dict["cvss_vector"] }}`
Description
-----------
{{ "\n".join(vuln.issue.initial_output.splitlines()[7:])|replace("Plugin Output:", "Plugin Output::\n") }}
{% endif %}
Recommendation
--------------
References
----------
{% if vuln.item.info_dict["cve"] %}
CVE:
{% for cve in vuln.item.info_dict["cve"] %}
{{ cve }}: `http://web.nvd.nist.gov/view/vuln/detail?vulnId={{ cve }}`
{%- endfor %}
{%- endif %}
{% if vuln.item.info_dict["bid"] %}
BID:
{% for bid in vuln.item.info_dict["bid"] %}
{{ bid }}: `http://www.securityfocus.com/bid/{{ bid }}`
{%- endfor %}
{%- endif %}
{% if vuln.item.info_dict["xref"] %}
Other References:
{% for xref in vuln.item.info_dict["xref"] %}
{{ xref }}
{%- endfor %}
{%- endif %}
{% if vuln.item.info_dict["see_also"] %}
See also:
{% for xref in vuln.item.info_dict["see_also"] %}
{{ xref }}
{%- endfor %}
{%- endif %}
{% endfor %}
""")
VULNXML_TEMPLATE=Template("""<?xml version="1.0"?>
<Results Date="{{ timestamp|e }}" Tool="Lapith">
<Hosts>{% for host in hosts %}
<Host dnsname="{{ host.dns_name|e }}" ipv6="" ipv4="{{ host.address|e }}">
<Vulns>
{% for vuln in host.items %}<Vuln TestPhase="" id="{{ vuln.pid|e }}">
<Data Type="afh:TCP Ports" encoding="">{{ vuln.info_dict.port }}/{{ vuln.info_dict.protocol }}</Data>
</Vuln>
{% endfor %}</Vulns>
</Host>
{% endfor %}</Hosts>
<Vulns>
{% for vuln in vulns %}
<Vuln group="" id="{{ vuln.pid|e }}">
<Title>{{ vuln.name|e }}</Title>
<Description encoding="">
{{ "\n".join(vuln.issue.initial_output.splitlines()[7:])|replace("Plugin Output:", "Plugin Output::\n") | e}}
------------------------
{{ vuln.diffs|e }}
</Description>
<Recommendation encoding=""></Recommendation>
<References/>
<Category/>
<Patches/>
<CVSS>
<OverallScore>{% if vuln.item.info_dict["cvss_base_score"] %}{{ vuln.item.info_dict["cvss_base_score"]|e }}{% else %}{{ vuln.severity|e }}{% endif %}</OverallScore>
<Vector>{{ vuln.item.info_dict["cvss_vector"]|replace("CVSS2#", "")|e }}</Vector>
</CVSS>
<Severity>{{ vuln.severity|e }}</Severity>
</Vuln>
{% endfor %}
</Vulns>
<Groups/>
</Results>
""")
ID_Save_Results = wx.NewId()
class ViewerController:
def __init__(self):
# def initView(self):
self.view = ViewerView()
## Instance vars
self.files = []
self.tests = []
self.tree_hooks = {}
self._search_text = ""
## Flags
self._in_search = False
## Dialog paths
self._save_path = os.getcwd()
self._open_path = os.getcwd()
self.create_tree()
drop_target = MyFileDropTarget(self.view.tree,
{
"nessus": self.drop_action,
},
self.view.display.write
)
self.view.tree.SetDropTarget(drop_target)
self.bind_events()
self.view.Layout()
self.view.Show()
#self.view.search.SetFocus()
def drop_action(self, file_):
self.files.append(NessusFile(file_))
self.create_scan_trees()
def on_do_search(self, event):
text = self.view.search.GetValue()
self.search(text)
def search(self, text):
self._in_search = True
self._search_text = text
for host in self.files:
pass
#hook = self.hooks[host.name][FILES]
#if self.view.tree.IsExpanded(hook): ## Only need to do it for expanded
#files = host.get_full_files(search=text)
#self.view.tree.DeleteChildren(hook)
#for f in files:
#item = self.view.tree.AppendItem(hook, f.name, 0)
#self.view.tree.SetPyData(item, f)
#self.view.tree.SortChildren(hook)
self.view.search.SetFocus()
self._in_search = False
def add_output_page(self, title, text, font="Courier New"):
display = self.view.CreateTextCtrl(font=font)
display.SetValue(text)
self.delete_page_with_title(title)
self.view.notebook.AddPage(display, title)
return self.view.notebook.GetPageIndex(display)
def load_files(self, event):
wildcard = "Nessus files (*.nessus)|*.nessus|" \
"All files (*.*)|*.*"
dlg = wx.FileDialog(
self.view, message="Choose a file",
defaultDir=os.getcwd(),
defaultFile="",
wildcard=wildcard,
style=wx.OPEN | wx.MULTIPLE | wx.CHANGE_DIR
)
if dlg.ShowModal() == wx.ID_OK:
# This returns a Python list of files that were selected.
paths = dlg.GetPaths()
if paths:
for path in paths:
self.files.append(NessusFile(path))
self._open_path = paths[0].rsplit(os.sep, 1)[0]
dlg.Destroy()
self.create_scan_trees()
def delete_page_with_title(self, title):
notebook = self.view.notebook
page_count = notebook.GetPageCount()
for i in xrange(page_count):
if notebook.GetPageText(i) == title:
notebook.DeletePage(i)
def create_tree(self):
self.view.tree.DeleteAllItems()
self.view.tree.AddRoot("Scans")
self.create_scan_trees()
self.view.tree.Expand(self.view.tree.GetRootItem())
def create_scan_trees(self):
scans = self.view.tree.GetRootItem()
self.view.tree.DeleteChildren(scans)
for file_ in self.files:
self.create_scan_tree(file_, scans)
self.view.tree.Expand(scans)
def sorted_tree_items(self, report, items):
list_ = list(set([NessusTreeItem(report, i) for i in items]))
list_.sort()
return list_
def create_scan_tree(self, file_, hosts):
reports = file_.get_all_reports()
scans_hook = self.view.tree.GetRootItem()
file_hook = self.view.tree.AppendItem(scans_hook, file_.short_name, 0)
for report in reports:
scan = self.view.tree.AppendItem(file_hook, report.reportname, 0)
self.view.tree.SetPyData(scan, report)
info = self.view.tree.AppendItem(scan, "Info", 0)
self.view.tree.SetPyData(info, report.info)
if report.policy:
policy = self.view.tree.AppendItem(scan, "Policy", 0)
self.view.tree.SetPyData(policy, report.policy)
hosts = self.view.tree.AppendItem(scan, "Hosts", 0)
self.view.tree.SetPyData(hosts, "\n".join(str(h) for h in report.hosts))
items_hook = self.view.tree.AppendItem(scan, "Findings", 0)
self.view.tree.SetPyData(items_hook, self.sorted_tree_items(report, report.criticals+report.highs+report.meds+report.lows+report.others))
critical_hook = self.view.tree.AppendItem(items_hook, "Criticals", 0)
self.view.tree.SetPyData(critical_hook, self.sorted_tree_items(report, report.criticals))
high_hook = self.view.tree.AppendItem(items_hook, "Highs", 0)
self.view.tree.SetPyData(high_hook, self.sorted_tree_items(report, report.highs))
med_hook = self.view.tree.AppendItem(items_hook, "Meds", 0)
self.view.tree.SetPyData(med_hook, self.sorted_tree_items(report, report.meds))
low_hook = self.view.tree.AppendItem(items_hook, "Lows", 0)
self.view.tree.SetPyData(low_hook, self.sorted_tree_items(report, report.lows))
other_hook = self.view.tree.AppendItem(items_hook, "Others", 0)
self.view.tree.SetPyData(other_hook, self.sorted_tree_items(report, report.others))
for crit in self.sorted_tree_items(report, report.criticals):
item = self.view.tree.AppendItem(critical_hook, str(crit), 0)
self.view.tree.SetPyData(item, crit)
for high in self.sorted_tree_items(report, report.highs):
item = self.view.tree.AppendItem(high_hook, str(high), 0)
self.view.tree.SetPyData(item, high)
for med in self.sorted_tree_items(report, report.meds):
item = self.view.tree.AppendItem(med_hook, str(med), 0)
self.view.tree.SetPyData(item, med)
for low in self.sorted_tree_items(report, report.lows):
item = self.view.tree.AppendItem(low_hook, str(low), 0)
self.view.tree.SetPyData(item, low)
for other in [NessusTreeItem(report, o) for o in report.others]:
item = self.view.tree.AppendItem(other_hook, str(other), 0)
self.view.tree.SetPyData(item, other)
def get_item_output(self, item):
hosts = item.report.hosts_with_pid(item.pid)
initial_output = hosts[0].plugin_output(item.pid)
diffs = []
for host in hosts[1:]:
diff = difflib.unified_diff(initial_output.splitlines(), host.plugin_output(item.pid).splitlines())
diffs.append((host, "\n".join(list(diff))))
initial_output = item.name.strip() + "\n\n" + initial_output
diff_output = ""
identical_hosts = [hosts[0]]
for (host, diff) in diffs:
if diff:
diff_output += "=" * 70 + "\n\n%s\n%s\n\n" % (host, diff)
else:
identical_hosts.append(host)
output = OUTPUT_TEMPLATE.render(
item=item,
hosts_count=len(hosts),
hosts=hosts,
identical_hosts=identical_hosts,
initial_output=initial_output
)
return output, diff_output, dict(item=item, hosts=hosts, identical_hosts=identical_hosts, initial_output=initial_output)
# output = item.name+"\n"
# output += "%s hosts with this issue\n" % len(hosts)
# output += "\n".join(str(i).split()[0] for i in hosts)
# output += "\n"+"-"*20+"\n"
# output += "\n".join(str(i) for i in identical_hosts) + "\n\n" + initial_output
# return output, diff_output
def show_nessus_item(self, item):
output, diff_output, _ = self.get_item_output(item)
diff_title = "Diffs"
self.delete_page_with_title(diff_title)
display = self.view.display
if diff_output:
self.add_output_page(diff_title, diff_output, font="Courier New")
display.SetValue(output)
def generate_rst(self, event):
saveas = SaveDialog(self.view, defaultDir=self._save_path, message="Save RST as...").get_choice()
if saveas:
merged_scans = MergedNessusReport(self.files)
if not saveas.endswith(".rst"):
saveas = saveas+".rst"
sorted_tree_items = self.sorted_tree_items(merged_scans, merged_scans.criticals+merged_scans.highs+merged_scans.meds+merged_scans.lows+merged_scans.others)
with open(saveas, "wb") as f:
for item in sorted_tree_items:
issue, diffs, meta = self.get_item_output(item)
item.issue = meta
item.diffs = diffs
item.severity = SEVERITY[item.item.severity]
f.write(RST_TEMPLATE.render(
timestamp=datetime.now(),
hosts=merged_scans.hosts,
vulns=sorted_tree_items,
merged_scans=merged_scans,
)
)
def generate_vulnxml(self, event):
saveas = SaveDialog(self.view, defaultDir=self._save_path, message="Save VulnXML as...").get_choice()
if saveas:
merged_scans = MergedNessusReport(self.files)
if not saveas.endswith(".xml"):
saveas = saveas+".xml"
sorted_tree_items = self.sorted_tree_items(merged_scans, merged_scans.criticals+merged_scans.highs+merged_scans.meds+merged_scans.lows+merged_scans.others)
with open(saveas, "wb") as f:
for item in sorted_tree_items:
issue, diffs, meta = self.get_item_output(item)
item.issue = meta
item.diffs = diffs
item.severity = SEVERITY[item.item.severity]
f.write(VULNXML_TEMPLATE.render(
timestamp=datetime.now(),
hosts=merged_scans.hosts,
vulns=sorted_tree_items,
merged_scans=merged_scans,
)
)
def generate_csv(self, event):
saveas = SaveDialog(self.view, defaultDir=self._save_path, message="Save csv as...").get_choice()
if saveas:
merged_scans = MergedNessusReport(self.files)
if not saveas.endswith(".csv"):
saveas = saveas+".csv"
sorted_tree_items = self.sorted_tree_items(merged_scans, merged_scans.criticals+merged_scans.highs+merged_scans.meds+merged_scans.lows+merged_scans.others)
with open(saveas, "wb") as f:
csv_writer = csv.writer(f)
csv_writer.writerow(["PID","Severity","Hosts","Output","Diffs"])
for item in sorted_tree_items:
csv_writer.writerow([
item.pid,
SEVERITY[item.item.severity],
"\n".join(x.address for x in merged_scans.hosts_with_pid(item.pid)),
self.get_item_output(item)[0],
self.get_item_output(item)[1],
]
)
def combine_files(self, event):
scans_hook = self.view.tree.GetRootItem()
merged_scans = MergedNessusReport(self.files)
if merged_scans.get_all_reports():
merge_hook = self.view.tree.AppendItem(scans_hook, "Merged Files", 0)
items_hook = self.view.tree.AppendItem(merge_hook, "Findings", 0)
self.view.tree.SetPyData(items_hook, self.sorted_tree_items(merged_scans, merged_scans.criticals+merged_scans.highs+merged_scans.meds+merged_scans.lows+merged_scans.others))
critical_hook = self.view.tree.AppendItem(items_hook, "Critical", 0)
self.view.tree.SetPyData(critical_hook, self.sorted_tree_items(merged_scans, merged_scans.criticals))
high_hook = self.view.tree.AppendItem(items_hook, "Highs", 0)
self.view.tree.SetPyData(high_hook, self.sorted_tree_items(merged_scans, merged_scans.highs))
med_hook = self.view.tree.AppendItem(items_hook, "Meds", 0)
self.view.tree.SetPyData(med_hook, self.sorted_tree_items(merged_scans, merged_scans.meds))
low_hook = self.view.tree.AppendItem(items_hook, "Lows", 0)
self.view.tree.SetPyData(low_hook, self.sorted_tree_items(merged_scans, merged_scans.lows))
other_hook = self.view.tree.AppendItem(items_hook, "Others", 0)
self.view.tree.SetPyData(other_hook, self.sorted_tree_items(merged_scans, merged_scans.others))
for crit in self.sorted_tree_items(merged_scans, merged_scans.criticals):
item = self.view.tree.AppendItem(critical_hook, str(crit), 0)
self.view.tree.SetPyData(item, crit)
for high in self.sorted_tree_items(merged_scans, merged_scans.highs):
item = self.view.tree.AppendItem(high_hook, str(high), 0)
self.view.tree.SetPyData(item, high)
for med in self.sorted_tree_items(merged_scans, merged_scans.meds):
item = self.view.tree.AppendItem(med_hook, str(med), 0)
self.view.tree.SetPyData(item, med)
for low in self.sorted_tree_items(merged_scans, merged_scans.lows):
item = self.view.tree.AppendItem(low_hook, str(low), 0)
self.view.tree.SetPyData(item, low)
for other in merged_scans.others:
item = self.view.tree.AppendItem(other_hook, str(other), 0)
self.view.tree.SetPyData(item, other)
self.view.tree.Expand(scans_hook)
def bind_events(self):
# Toolbar events
self.view.Bind(wx.EVT_TOOL, self.load_files, id=ID_Load_Files)
self.view.Bind(wx.EVT_TOOL, self.combine_files, id=ID_Merge_Files)
self.view.Bind(wx.EVT_TOOL, self.generate_csv, id=ID_Generate_CSV)
self.view.Bind(wx.EVT_TOOL, self.generate_vulnxml, id=ID_Generate_VulnXML)
self.view.Bind(wx.EVT_TOOL, self.generate_rst, id=ID_Generate_RST)
# Tree clicking and selections
self.view.tree.Bind(wx.EVT_TREE_SEL_CHANGED, self.on_sel_changed, self.view.tree)
self.view.tree.Bind(wx.EVT_TREE_ITEM_MENU, self.on_right_click, self.view.tree)
# Tab close event - will prevent closing the output tab
self.view.Bind(wx.aui.EVT_AUINOTEBOOK_PAGE_CLOSE, self.on_page_close)
# Menu stuff
self.view.Bind(wx.EVT_MENU, self.load_files, id=wx.ID_OPEN)
self.view.Bind(wx.EVT_MENU, self.extract_results, id=ID_Save_Results)
self.view.Bind(wx.EVT_MENU, self.on_exit, id=wx.ID_EXIT)
self.view.Bind(wx.EVT_MENU, self.on_about, id=ID_About)
## Search
#self.view.search.Bind(wx.EVT_TEXT_ENTER, self.on_do_search)
#self.view.search.Bind(wx.EVT_TEXT, self.on_do_search)
def extract_results(self, event):
item = self.view.tree.GetSelection()
data = self.view.tree.GetItemData(item).GetData()
saveas = SaveDialog(self.view, defaultDir=self._save_path, message="Save results as...").get_choice()
if saveas:
with open(saveas, "w") as f:
output = ""
if isinstance(data, list):
for item in data:
output, diff_output, _ = self.get_item_output(item)
f.write("="*20+"\n")
f.write(output)
f.write(diff_output)
elif isinstance(data, NessusReport):
pass
elif isinstance(data, MergedNessusReport):
pass
def on_right_click(self, event):
item = event.GetItem()
self.view.tree.SelectItem(item)
data = self.view.tree.GetItemData(item).GetData()
if isinstance(data, NessusReport) or isinstance(data, MergedNessusReport) or isinstance(data, list):
menu = wx.Menu()
menu.Append(ID_Save_Results, "Save all results")
self.view.PopupMenu(menu)
menu.Destroy()
def on_page_close(self, event):
## We don't want the user to be able to close any tabs
## TODO Find a way to diable the cross on the GUI
event.Veto()
def on_sel_changed(self, event):
item = event.GetItem()
tree = self.view.tree
data = tree.GetItemData(item).GetData()
if isinstance(data, NessusReport):
self.view.display.Clear()
self.view.display.SetValue(data.reportname)
self.view.notebook.SetSelection(0)
self.view.tree.SetFocus()
elif isinstance(data, NessusItem):
self.view.display.Clear()
self.view.display.SetValue(data.output.replace('\\n', "\n"))
self.view.notebook.SetSelection(0)
self.view.tree.SetFocus()
elif isinstance(data, NessusTreeItem):
self.show_nessus_item(data)
self.view.tree.SetFocus()
elif isinstance(data, str):
self.view.display.Clear()
self.view.display.SetValue(data.replace('\\n', "\n"))
self.view.notebook.SetSelection(0)
self.view.tree.SetFocus()
def on_exit(self, event):
self.view.Close()
def on_about(self, event):
## Just display a dialog box
info = wx.AboutDialogInfo()
info.Name = "Nessus Results - The right way around"
info.Version = "1.0.2\n"
info.Copyright = "(C) 2012 Felix Ingram\n"
info.Description = wordwrap(
"Sometimes you need Nessus results on a per-issue basis, "
"sometimes you need to combine a load of reports into one.",
350, wx.ClientDC(self.view))
info.Developers = [ "Felix Ingram",]
## Then we call wx.AboutBox giving it that info object
wx.AboutBox(info)
| nccgroup/lapith | controller/viewer_controller.py | Python | agpl-3.0 | 22,502 | 0.004 |
# -*- coding: utf-8 -*-
import re
import logging
from completor.utils import check_subseq
from .utils import parse_uri
word_pat = re.compile(r'([\d\w]+)', re.U)
word_ends = re.compile(r'[\d\w]+$', re.U)
logger = logging.getLogger("completor")
# [
# [{
# u'range': {
# u'start': {u'line': 273, u'character': 5},
# u'end': {u'line': 273, u'character': 12}
# },
# u'uri': u'file:///home/linuxbrew/.linuxbrew/Cellar/go/1.12.4/libexec/src/fmt/print.go' # noqa
# }]
# ]
def gen_jump_list(ft, name, data):
res = []
if not data:
return res
items = data[0]
if items is None:
return res
for item in items:
uri = parse_uri(item['uri'])
if ft == 'go':
uri = uri.replace('%21', '!')
start = item['range']['start']
res.append({
'filename': uri,
'lnum': start['line'] + 1,
'col': start['character'] + 1,
'name': name,
})
return res
# [
# [
# {
# u'newText': u'',
# u'range': {
# u'start': {u'line': 8, u'character': 0},
# u'end': {u'line': 9, u'character': 0}
# }
# }, {
# u'newText': u'',
# u'range': {
# u'start': {u'line': 9, u'character': 0},
# u'end': {u'line': 10, u'character': 0}
# }
# }, {
# u'newText': u'\tfmt.Println()\n',
# u'range': {
# u'start': {u'line': 10, u'character': 0},
# u'end': {u'line': 10, u'character': 0}
# }
# }, {
# u'newText': u'}\n',
# u'range': {
# u'start': {u'line': 10, u'character': 0},
# u'end': {u'line': 10, u'character': 0}
# }
# }
# ]
# ]
def format_text(data):
if not data:
return
for item in data[0]:
pass
def get_completion_word(item, insert_text):
if insert_text != b'label':
try:
return item['textEdit']['newText'], \
item['textEdit']['range']['start']['character']
except KeyError:
pass
label = item['label'].strip()
match = word_pat.match(label)
return match.groups()[0] if match else '', -1
hiddenLines = ["on pkg.go.dev"]
escapes = re.compile(r'''\\([\\\x60*{}[\]()#+\-.!_>~|"$%&'\/:;<=?@^])''',
re.UNICODE)
escape_types = ['go', 'json']
def _shouldHidden(line):
for item in hiddenLines:
if item in line:
return True
return False
def gen_hover_doc(ft, value):
if ft not in escape_types:
return value
lines = []
for l in value.split("\n"):
if _shouldHidden(l):
continue
lines.append(escapes.sub(r"\1", l).replace(' ', ' '))
return "\n".join(lines)
def filter_items(items, input_data):
target = ''
match = word_ends.search(input_data)
if match:
target = match.group()
if not target:
return items
filtered = []
for item in items:
score = check_subseq(target, item[1])
if score is None:
continue
filtered.append((item, score))
filtered.sort(key=lambda x: x[1])
return [e for e, _ in filtered]
| maralla/completor.vim | pythonx/completers/lsp/action.py | Python | mit | 3,344 | 0.000299 |
from ctypes import POINTER
from ctypes import c_long
from ctypes import c_uint32
from ctypes import c_void_p
CFIndex = c_long
CFStringEncoding = c_uint32
CFString = c_void_p
CFArray = c_void_p
CFDictionary = c_void_p
CFError = c_void_p
CFType = c_void_p
CFAllocatorRef = c_void_p
CFStringRef = POINTER(CFString)
CFArrayRef = POINTER(CFArray)
CFDictionaryRef = POINTER(CFDictionary)
CFErrorRef = POINTER(CFError)
CFTypeRef = POINTER(CFType)
kCFStringEncodingUTF8 = CFStringEncoding(0x08000100)
kCGWindowListOptionAll = 0
kCGWindowListOptionOnScreenOnly = (1 << 0)
kCGNullWindowID = 0
| vasily-v-ryabov/ui-automation-course | 1_Lyalyushkin/objc_constants.py | Python | bsd-3-clause | 588 | 0 |
#!/usr/bin/env python
from numpy import array, dtype, int32
traindat = '../data/fm_train_real.dat'
testdat = '../data/fm_test_real.dat'
label_traindat = '../data/label_train_multiclass.dat'
# set both input attributes as continuous i.e. 2
feattypes = array([2, 2],dtype=int32)
parameter_list = [[traindat,testdat,label_traindat,feattypes]]
def multiclass_chaidtree_modular(train=traindat,test=testdat,labels=label_traindat,ft=feattypes):
try:
from modshogun import RealFeatures, MulticlassLabels, CSVFile, CHAIDTree
except ImportError:
print("Could not import Shogun modules")
return
# wrap features and labels into Shogun objects
feats_train=RealFeatures(CSVFile(train))
feats_test=RealFeatures(CSVFile(test))
train_labels=MulticlassLabels(CSVFile(labels))
# CHAID Tree formation with nominal dependent variable
c=CHAIDTree(0,feattypes,10)
c.set_labels(train_labels)
c.train(feats_train)
# Classify test data
output=c.apply_multiclass(feats_test).get_labels()
return c,output
if __name__=='__main__':
print('CHAIDTree')
multiclass_chaidtree_modular(*parameter_list[0])
| AzamYahya/shogun | examples/undocumented/python_modular/multiclass_chaidtree_modular.py | Python | gpl-3.0 | 1,100 | 0.033636 |
for _ in range(int(input())):
A, B, C, D = map(int, input().split())
if A < B or C + D < B:
print("No")
continue
elif C >= B - 1:
print("Yes")
continue
ret = []
s_set = set()
now = A
while True:
now %= B
if now in s_set:
print("Yes", ret)
break
else:
s_set.add(now)
if now <= C:
now += D
ret.append(now)
else:
print("No", ret)
break
| knuu/competitive-programming | atcoder/agc/agc026_b.py | Python | mit | 517 | 0 |
import pytest
import pwny
target_little_endian = pwny.Target(arch=pwny.Target.Arch.unknown, endian=pwny.Target.Endian.little)
target_big_endian = pwny.Target(arch=pwny.Target.Arch.unknown, endian=pwny.Target.Endian.big)
def test_pack():
assert pwny.pack('I', 0x41424344) == b'DCBA'
def test_pack_format_with_endian():
assert pwny.pack('>I', 0x41424344) == b'ABCD'
def test_pack_explicit_endian():
assert pwny.pack('I', 0x41424344, endian=pwny.Target.Endian.big) == b'ABCD'
def test_pack_explicit_target():
assert pwny.pack('I', 0x41424344, target=target_big_endian) == b'ABCD'
@pytest.mark.xfail(raises=NotImplementedError)
def test_pack_invalid_endian():
pwny.pack('I', 1, endian='invalid')
def test_unpack():
assert pwny.unpack('I', b'DCBA') == (0x41424344,)
def test_unpack_format_with_endian():
assert pwny.unpack('>I', b'ABCD') == (0x41424344,)
def test_unpack_explicit_endian():
assert pwny.unpack('I', b'ABCD', endian=pwny.Target.Endian.big) == (0x41424344,)
def test_unpack_explicit_target():
assert pwny.unpack('I', b'ABCD', target=target_big_endian) == (0x41424344,)
@pytest.mark.xfail(raises=NotImplementedError)
def test_unpack_invalid_endian():
pwny.unpack('I', 'AAAA', endian='invalid')
def test_pack_size():
# This tests both pack_size in general as well as not padding the byte.
assert pwny.pack_size('bq') == 9
short_signed_data = [
[8, -0x7f, b'\x81'],
[16, -0x7fff, b'\x80\x01'],
[32, -0x7fffffff, b'\x80\x00\x00\x01'],
[64, -0x7fffffffffffffff, b'\x80\x00\x00\x00\x00\x00\x00\x01'],
]
short_unsigned_data = [
[8, 0x61, b'a'],
[16, 0x6162, b'ab'],
[32, 0x61626364, b'abcd'],
[64, 0x6162636465666768, b'abcdefgh'],
]
def test_short_form_pack():
for width, num, bytestr in short_signed_data:
f = 'p%d' % width
yield check_short_form_pack, f, num, bytestr[::-1]
yield check_short_form_pack_endian, f, num, bytestr[::-1], pwny.Target.Endian.little
yield check_short_form_pack_endian, f, num, bytestr, pwny.Target.Endian.big
for width, num, bytestr in short_unsigned_data:
f = 'P%d' % width
yield check_short_form_pack, f, num, bytestr[::-1]
yield check_short_form_pack_endian, f, num, bytestr[::-1], pwny.Target.Endian.little
yield check_short_form_pack_endian, f, num, bytestr, pwny.Target.Endian.big
def test_short_form_unpack():
for width, num, bytestr in short_signed_data:
f = 'u%d' % width
yield check_short_form_unpack, f, num, bytestr[::-1]
yield check_short_form_unpack_endian, f, num, bytestr[::-1], pwny.Target.Endian.little
yield check_short_form_unpack_endian, f, num, bytestr, pwny.Target.Endian.big
for width, num, bytestr in short_unsigned_data:
f = 'U%d' % width
yield check_short_form_unpack, f, num, bytestr[::-1]
yield check_short_form_unpack_endian, f, num, bytestr[::-1], pwny.Target.Endian.little
yield check_short_form_unpack_endian, f, num, bytestr, pwny.Target.Endian.big
def test_pointer_pack():
yield check_short_form_pack, 'p', -66052, b'\xfc\xfd\xfe\xff'
yield check_short_form_pack_endian, 'p', -66052, b'\xfc\xfd\xfe\xff', pwny.Target.Endian.little
yield check_short_form_pack_endian, 'p', -66052, b'\xff\xfe\xfd\xfc', pwny.Target.Endian.big
yield check_short_form_pack, 'P', 4294901244, b'\xfc\xfd\xfe\xff'
yield check_short_form_pack_endian, 'P', 4294901244, b'\xfc\xfd\xfe\xff', pwny.Target.Endian.little
yield check_short_form_pack_endian, 'P', 4294901244, b'\xff\xfe\xfd\xfc', pwny.Target.Endian.big
def test_pointer_unpack():
yield check_short_form_unpack, 'u', -66052, b'\xfc\xfd\xfe\xff'
yield check_short_form_unpack_endian, 'u', -66052, b'\xfc\xfd\xfe\xff', pwny.Target.Endian.little
yield check_short_form_unpack_endian, 'u', -66052, b'\xff\xfe\xfd\xfc', pwny.Target.Endian.big
yield check_short_form_unpack, 'U', 4294901244, b'\xfc\xfd\xfe\xff'
yield check_short_form_unpack_endian, 'U', 4294901244, b'\xfc\xfd\xfe\xff', pwny.Target.Endian.little
yield check_short_form_unpack_endian, 'U', 4294901244, b'\xff\xfe\xfd\xfc', pwny.Target.Endian.big
def check_short_form_pack(f, num, bytestr):
assert getattr(pwny, f)(num) == bytestr
def check_short_form_pack_endian(f, num, bytestr, endian):
assert getattr(pwny, f)(num, endian=endian) == bytestr
def check_short_form_unpack(f, num, bytestr):
assert getattr(pwny, f)(bytestr) == num
def check_short_form_unpack_endian(f, num, bytestr, endian):
assert getattr(pwny, f)(bytestr, endian=endian) == num
| edibledinos/pwnypack | tests/test_packing.py | Python | mit | 4,642 | 0.004093 |
from django.conf.urls import include, url
from django.views.generic import TemplateView
from kuma.attachments.feeds import AttachmentsFeed
from kuma.attachments.views import edit_attachment
from . import feeds, views
from .constants import DOCUMENT_PATH_RE
# These patterns inherit (?P<document_path>[^\$]+).
document_patterns = [
url(r'^$',
views.document.document,
name='wiki.document'),
url(r'^\$revision/(?P<revision_id>\d+)$',
views.revision.revision,
name='wiki.revision'),
url(r'^\$history$',
views.list.revisions,
name='wiki.document_revisions'),
url(r'^\$edit$',
views.edit.edit,
name='wiki.edit'),
url(r'^\$files$',
edit_attachment,
name='attachments.edit_attachment'),
url(r'^\$edit/(?P<revision_id>\d+)$',
views.edit.edit,
name='wiki.new_revision_based_on'),
url(r'^\$compare$',
views.revision.compare,
name='wiki.compare_revisions'),
url(r'^\$children$',
views.document.children,
name='wiki.children'),
url(r'^\$translate$',
views.translate.translate,
name='wiki.translate'),
url(r'^\$locales$',
views.translate.select_locale,
name='wiki.select_locale'),
url(r'^\$json$',
views.document.as_json,
name='wiki.json_slug'),
url(r'^\$styles$',
views.document.styles,
name='wiki.styles'),
url(r'^\$toc$',
views.document.toc,
name='wiki.toc'),
url(r'^\$move$',
views.document.move,
name='wiki.move'),
url(r'^\$quick-review$',
views.revision.quick_review,
name='wiki.quick_review'),
url(r'^\$samples/(?P<sample_name>.+)/files/(?P<attachment_id>\d+)/(?P<filename>.+)$',
views.code.raw_code_sample_file,
name='wiki.raw_code_sample_file'),
url(r'^\$samples/(?P<sample_name>.+)$',
views.code.code_sample,
name='wiki.code_sample'),
url(r'^\$revert/(?P<revision_id>\d+)$',
views.delete.revert_document,
name='wiki.revert_document'),
url(r'^\$repair_breadcrumbs$',
views.document.repair_breadcrumbs,
name='wiki.repair_breadcrumbs'),
url(r'^\$delete$',
views.delete.delete_document,
name='wiki.delete_document'),
url(r'^\$restore$',
views.delete.restore_document,
name='wiki.restore_document'),
url(r'^\$purge$',
views.delete.purge_document,
name='wiki.purge_document'),
# Un/Subscribe to document edit notifications.
url(r'^\$subscribe$',
views.document.subscribe,
name='wiki.subscribe'),
# Un/Subscribe to document tree edit notifications.
url(r'^\$subscribe_to_tree$',
views.document.subscribe_to_tree,
name='wiki.subscribe_to_tree'),
]
urlpatterns = [
url(r'^/ckeditor_config.js$',
views.misc.ckeditor_config,
name='wiki.ckeditor_config'),
# internals
url(r'^.json$',
views.document.as_json,
name='wiki.json'),
url(r'^/preview-wiki-content$',
views.revision.preview,
name='wiki.preview'),
url(r'^/move-requested$',
TemplateView.as_view(template_name='wiki/move_requested.html'),
name='wiki.move_requested'),
url(r'^/get-documents$',
views.misc.autosuggest_documents,
name='wiki.autosuggest_documents'),
url(r'^/load/$',
views.misc.load_documents,
name='wiki.load_documents'),
# Special pages
url(r'^/templates$',
views.list.templates,
name='wiki.list_templates'),
url(r'^/tags$',
views.list.tags,
name='wiki.list_tags'),
url(r'^/tag/(?P<tag>.+)$',
views.list.documents,
name='wiki.tag'),
url(r'^/new$',
views.create.create,
name='wiki.create'),
url(r'^/all$',
views.list.documents,
name='wiki.all_documents'),
url(r'^/with-errors$',
views.list.with_errors,
name='wiki.errors'),
url(r'^/without-parent$',
views.list.without_parent,
name='wiki.without_parent'),
url(r'^/top-level$',
views.list.top_level,
name='wiki.top_level'),
url(r'^/needs-review/(?P<tag>[^/]+)$',
views.list.needs_review,
name='wiki.list_review_tag'),
url(r'^/needs-review/?',
views.list.needs_review,
name='wiki.list_review'),
url(r'^/localization-tag/(?P<tag>[^/]+)$',
views.list.with_localization_tag,
name='wiki.list_with_localization_tag'),
url(r'^/localization-tag/?',
views.list.with_localization_tag,
name='wiki.list_with_localization_tags'),
# Akismet Revision
url(r'^/submit_akismet_spam$',
views.akismet_revision.submit_akismet_spam,
name='wiki.submit_akismet_spam'),
# Feeds
url(r'^/feeds/(?P<format>[^/]+)/all/?',
feeds.DocumentsRecentFeed(),
name="wiki.feeds.recent_documents"),
url(r'^/feeds/(?P<format>[^/]+)/l10n-updates/?',
feeds.DocumentsUpdatedTranslationParentFeed(),
name="wiki.feeds.l10n_updates"),
url(r'^/feeds/(?P<format>[^/]+)/tag/(?P<tag>[^/]+)',
feeds.DocumentsRecentFeed(),
name="wiki.feeds.recent_documents"),
url(r'^/feeds/(?P<format>[^/]+)/needs-review/(?P<tag>[^/]+)',
feeds.DocumentsReviewFeed(),
name="wiki.feeds.list_review_tag"),
url(r'^/feeds/(?P<format>[^/]+)/needs-review/?',
feeds.DocumentsReviewFeed(),
name="wiki.feeds.list_review"),
url(r'^/feeds/(?P<format>[^/]+)/revisions/?',
feeds.RevisionsFeed(),
name="wiki.feeds.recent_revisions"),
url(r'^/feeds/(?P<format>[^/]+)/files/?',
AttachmentsFeed(),
name="attachments.feeds.recent_files"),
url(r'^/(?P<document_path>%s)' % DOCUMENT_PATH_RE.pattern,
include(document_patterns)),
]
| jgmize/kuma | kuma/wiki/urls.py | Python | mpl-2.0 | 5,894 | 0.00017 |
#!/usr/bin/env python
from datetime import timedelta
import numpy as np
from opendrift.readers import reader_basemap_landmask
from opendrift.readers import reader_netCDF_CF_generic
from opendrift.models.oceandrift import OceanDrift
o = OceanDrift(loglevel=0) # Set loglevel to 0 for debug information
reader_norkyst = reader_netCDF_CF_generic.Reader(o.test_data_folder() +
'16Nov2015_NorKyst_z_surface/norkyst800_subset_16Nov2015.nc')
# Landmask (Basemap)
reader_basemap = reader_basemap_landmask.Reader(
llcrnrlon=4.0, llcrnrlat=59.9,
urcrnrlon=5.5, urcrnrlat=61.2,
resolution='h', projection='merc')
o.add_reader([reader_basemap, reader_norkyst])
# Seeding some particles
lons = np.linspace(4.4, 4.6, 10)
lats = np.linspace(60.0, 60.1, 10)
lons, lats = np.meshgrid(lons, lats)
lons = lons.ravel()
lats = lats.ravel()
# Seed oil elements on a grid at regular time interval
start_time = reader_norkyst.start_time
time_step = timedelta(hours=6)
num_steps = 10
for i in range(num_steps+1):
o.seed_elements(lons, lats, radius=0, number=100,
time=start_time + i*time_step)
# Running model (until end of driver data)
o.run(steps=66*4, time_step=900)
# Print and plot results
print(o)
o.animation()
| knutfrode/opendrift | examples/example_grid_time.py | Python | gpl-2.0 | 1,294 | 0.001546 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Guewen Baconnier
# Copyright 2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{'name': 'Invoices Reference',
'version': '1.0',
'author': 'Camptocamp',
'maintainer': 'Camptocamp',
'license': 'AGPL-3',
'category': 'category',
'complexity': "easy",
'depends': ['account',
],
'description': """
Invoices Reference
==================
Aims to simplify the "references" fields on the invoices.
We observed difficulties for the users to file the references (name,
origin, free reference) and above all, to understand which field will be
copied in the reference field of the move and move lines.
The approach here is to state simple rules with one concern: consistency.
The reference of the move lines must be the number of the document at their very
origin (number of a sales order, of an external document like a supplier
invoice, ...). The goal is for the accountant to be able to trace to the
source document from a ledger).
The description of a line should always be... well, a description. Not a number
or a cryptic reference.
It particularly fits with other modules of the bank-statement-reconcile series
as account_advanced_reconcile_transaction_ref.
Fields
------
Enumerating the information we need in an invoice, we find that the
mandatory fields are:
* Invoice Number
* Description
* Internal Reference ("our reference")
* External Reference ("customer or supplier reference")
* Optionally, a technical transaction reference (credit card payment gateways,
SEPA, ...)
Now, on the move lines:
* Name
* Reference
* Optionally, a technical transaction reference (added by the module
`base_transaction_id`)
Let's see how the information will be organized with this module.
Customers Invoices / Refunds
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-----------------+-----------------+------------------------------+
| Information | Invoice field | Instead of (in base modules) |
+=================+=================+==============================+
| Invoice number | Invoice number | Invoice number |
+-----------------+-----------------+------------------------------+
| Description | Name | -- |
+-----------------+-----------------+------------------------------+
| Internal Ref | Origin | Origin |
+-----------------+-----------------+------------------------------+
| External Ref | Reference | Name |
+-----------------+-----------------+------------------------------+
Information propagated to the move lines:
+-----------------+------------------------------------+
| Move line field | Invoice field |
+=================+====================================+
| Description | Name |
+-----------------+------------------------------------+
| Reference | Origin, or Invoice number if empty |
+-----------------+------------------------------------+
Supplier Invoices / Refunds
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Supplier invoices have an additional field `supplier_invoice_number`
that we consider as redundant with the reference field. This field is kept
and even set as mandatory, while the reference field is hidden.
+-----------------+-----------------+------------------------------+
| Information | Invoice field | Instead of (in base modules) |
+=================+=================+==============================+
| Invoice number | Invoice number | Invoice number |
+-----------------+-----------------+------------------------------+
| Description | Name | -- |
+-----------------+-----------------+------------------------------+
| Internal Ref | Origin | Origin |
+-----------------+-----------------+------------------------------+
| External Ref | Supplier number | Supplier number |
+-----------------+-----------------+------------------------------+
The reference field is hidden when the reference type is "free reference",
because it is already filed in the Supplier invoice number.
Information propagated to the move lines:
+-----------------+---------------------------------------------+
| Move line field | Invoice field |
+=================+=============================================+
| Description | Name |
+-----------------+---------------------------------------------+
| Reference | Supplier number, or Invoice number if empty |
+-----------------+---------------------------------------------+
""",
'website': 'http://www.camptocamp.com',
'data': ['account_invoice_view.xml',
],
'test': ['test/out_invoice_with_origin.yml',
'test/out_invoice_without_origin.yml',
'test/in_invoice_with_supplier_number.yml',
'test/in_invoice_without_supplier_number.yml',
'test/out_refund_with_origin.yml',
'test/out_refund_without_origin.yml',
'test/in_refund_with_supplier_number.yml',
'test/in_refund_without_supplier_number.yml',
],
'installable': False,
'auto_install': False,
}
| kmee/bank-statement-reconcile | __unported__/account_invoice_reference/__openerp__.py | Python | agpl-3.0 | 6,213 | 0.000161 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""code generator for GL/GLES extension wrangler."""
import os
import collections
import re
import sys
GL_FUNCTIONS = [
{ 'return_type': 'void',
'names': ['glActiveTexture'],
'arguments': 'GLenum texture', },
{ 'return_type': 'void',
'names': ['glAttachShader'],
'arguments': 'GLuint program, GLuint shader', },
{ 'return_type': 'void',
'names': ['glBeginQuery'],
'arguments': 'GLenum target, GLuint id', },
{ 'return_type': 'void',
'names': ['glBeginQueryARB', 'glBeginQueryEXT'],
'arguments': 'GLenum target, GLuint id', },
{ 'return_type': 'void',
'names': ['glBindAttribLocation'],
'arguments': 'GLuint program, GLuint index, const char* name', },
{ 'return_type': 'void',
'names': ['glBindBuffer'],
'arguments': 'GLenum target, GLuint buffer', },
{ 'return_type': 'void',
'names': ['glBindFragDataLocation'],
'arguments': 'GLuint program, GLuint colorNumber, const char* name', },
{ 'return_type': 'void',
'names': ['glBindFragDataLocationIndexed'],
'arguments':
'GLuint program, GLuint colorNumber, GLuint index, const char* name', },
{ 'return_type': 'void',
'names': ['glBindFramebufferEXT', 'glBindFramebuffer'],
'arguments': 'GLenum target, GLuint framebuffer', },
{ 'return_type': 'void',
'names': ['glBindRenderbufferEXT', 'glBindRenderbuffer'],
'arguments': 'GLenum target, GLuint renderbuffer', },
{ 'return_type': 'void',
'names': ['glBindTexture'],
'arguments': 'GLenum target, GLuint texture', },
{ 'return_type': 'void',
'names': ['glBlendColor'],
'arguments': 'GLclampf red, GLclampf green, GLclampf blue, GLclampf alpha', },
{ 'return_type': 'void',
'names': ['glBlendEquation'],
'arguments': ' GLenum mode ', },
{ 'return_type': 'void',
'names': ['glBlendEquationSeparate'],
'arguments': 'GLenum modeRGB, GLenum modeAlpha', },
{ 'return_type': 'void',
'names': ['glBlendFunc'],
'arguments': 'GLenum sfactor, GLenum dfactor', },
{ 'return_type': 'void',
'names': ['glBlendFuncSeparate'],
'arguments':
'GLenum srcRGB, GLenum dstRGB, GLenum srcAlpha, GLenum dstAlpha', },
{ 'return_type': 'void',
'names': ['glBlitFramebufferEXT', 'glBlitFramebuffer'],
'arguments': 'GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, '
'GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, '
'GLbitfield mask, GLenum filter', },
{ 'return_type': 'void',
'names': ['glBlitFramebufferANGLE', 'glBlitFramebuffer'],
'arguments': 'GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, '
'GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, '
'GLbitfield mask, GLenum filter', },
{ 'return_type': 'void',
'names': ['glBufferData'],
'arguments': 'GLenum target, GLsizei size, const void* data, GLenum usage', },
{ 'return_type': 'void',
'names': ['glBufferSubData'],
'arguments': 'GLenum target, GLint offset, GLsizei size, const void* data', },
{ 'return_type': 'GLenum',
'names': ['glCheckFramebufferStatusEXT',
'glCheckFramebufferStatus'],
'arguments': 'GLenum target',
'logging_code': """
GL_SERVICE_LOG("GL_RESULT: " << GLES2Util::GetStringEnum(result));
""", },
{ 'return_type': 'void',
'names': ['glClear'],
'arguments': 'GLbitfield mask', },
{ 'return_type': 'void',
'names': ['glClearColor'],
'arguments': 'GLclampf red, GLclampf green, GLclampf blue, GLclampf alpha', },
{ 'return_type': 'void',
'names': ['glClearDepth'],
'arguments': 'GLclampd depth', },
{ 'return_type': 'void',
'names': ['glClearDepthf'],
'arguments': 'GLclampf depth', },
{ 'return_type': 'void',
'names': ['glClearStencil'],
'arguments': 'GLint s', },
{ 'return_type': 'void',
'names': ['glColorMask'],
'arguments':
'GLboolean red, GLboolean green, GLboolean blue, GLboolean alpha', },
{ 'return_type': 'void',
'names': ['glCompileShader'],
'arguments': 'GLuint shader', },
{ 'return_type': 'void',
'names': ['glCompressedTexImage2D'],
'arguments':
'GLenum target, GLint level, GLenum internalformat, GLsizei width, '
'GLsizei height, GLint border, GLsizei imageSize, const void* data', },
{ 'return_type': 'void',
'names': ['glCompressedTexSubImage2D'],
'arguments':
'GLenum target, GLint level, GLint xoffset, GLint yoffset, '
'GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, '
'const void* data', },
{ 'return_type': 'void',
'names': ['glCopyTexImage2D'],
'arguments':
'GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, '
'GLsizei width, GLsizei height, GLint border', },
{ 'return_type': 'void',
'names': ['glCopyTexSubImage2D'],
'arguments':
'GLenum target, GLint level, GLint xoffset, '
'GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height', },
{ 'return_type': 'GLuint',
'names': ['glCreateProgram'],
'arguments': 'void', },
{ 'return_type': 'GLuint',
'names': ['glCreateShader'],
'arguments': 'GLenum type', },
{ 'return_type': 'void',
'names': ['glCullFace'],
'arguments': 'GLenum mode', },
{ 'return_type': 'void',
'names': ['glDeleteBuffersARB', 'glDeleteBuffers'],
'arguments': 'GLsizei n, const GLuint* buffers', },
{ 'return_type': 'void',
'names': ['glDeleteFramebuffersEXT', 'glDeleteFramebuffers'],
'arguments': 'GLsizei n, const GLuint* framebuffers', },
{ 'return_type': 'void',
'names': ['glDeleteProgram'],
'arguments': 'GLuint program', },
{ 'return_type': 'void',
'names': ['glDeleteQueries'],
'arguments': 'GLsizei n, const GLuint* ids', },
{ 'return_type': 'void',
'names': ['glDeleteQueriesARB', 'glDeleteQueriesEXT'],
'arguments': 'GLsizei n, const GLuint* ids', },
{ 'return_type': 'void',
'names': ['glDeleteRenderbuffersEXT', 'glDeleteRenderbuffers'],
'arguments': 'GLsizei n, const GLuint* renderbuffers', },
{ 'return_type': 'void',
'names': ['glDeleteShader'],
'arguments': 'GLuint shader', },
{ 'return_type': 'void',
'names': ['glDeleteTextures'],
'arguments': 'GLsizei n, const GLuint* textures', },
{ 'return_type': 'void',
'names': ['glDepthFunc'],
'arguments': 'GLenum func', },
{ 'return_type': 'void',
'names': ['glDepthMask'],
'arguments': 'GLboolean flag', },
{ 'return_type': 'void',
'names': ['glDepthRange'],
'arguments': 'GLclampd zNear, GLclampd zFar', },
{ 'return_type': 'void',
'names': ['glDepthRangef'],
'arguments': 'GLclampf zNear, GLclampf zFar', },
{ 'return_type': 'void',
'names': ['glDetachShader'],
'arguments': 'GLuint program, GLuint shader', },
{ 'return_type': 'void',
'names': ['glDisable'],
'arguments': 'GLenum cap', },
{ 'return_type': 'void',
'names': ['glDisableVertexAttribArray'],
'arguments': 'GLuint index', },
{ 'return_type': 'void',
'names': ['glDrawArrays'],
'arguments': 'GLenum mode, GLint first, GLsizei count', },
{ 'return_type': 'void',
'names': ['glDrawBuffer'],
'arguments': 'GLenum mode', },
{ 'return_type': 'void',
'names': ['glDrawBuffersARB'],
'arguments': 'GLsizei n, const GLenum* bufs', },
{ 'return_type': 'void',
'names': ['glDrawElements'],
'arguments':
'GLenum mode, GLsizei count, GLenum type, const void* indices', },
{ 'return_type': 'void',
'names': ['glEGLImageTargetTexture2DOES'],
'arguments': 'GLenum target, GLeglImageOES image', },
{ 'return_type': 'void',
'names': ['glEGLImageTargetRenderbufferStorageOES'],
'arguments': 'GLenum target, GLeglImageOES image', },
{ 'return_type': 'void',
'names': ['glEnable'],
'arguments': 'GLenum cap', },
{ 'return_type': 'void',
'names': ['glEnableVertexAttribArray'],
'arguments': 'GLuint index', },
{ 'return_type': 'void',
'names': ['glEndQuery'],
'arguments': 'GLenum target', },
{ 'return_type': 'void',
'names': ['glEndQueryARB', 'glEndQueryEXT'],
'arguments': 'GLenum target', },
{ 'return_type': 'void',
'names': ['glFinish'],
'arguments': 'void', },
{ 'return_type': 'void',
'names': ['glFlush'],
'arguments': 'void', },
{ 'return_type': 'void',
'names': ['glFramebufferRenderbufferEXT', 'glFramebufferRenderbuffer'],
'arguments': \
'GLenum target, GLenum attachment, GLenum renderbuffertarget, '
'GLuint renderbuffer', },
{ 'return_type': 'void',
'names': ['glFramebufferTexture2DEXT', 'glFramebufferTexture2D'],
'arguments':
'GLenum target, GLenum attachment, GLenum textarget, GLuint texture, '
'GLint level', },
{ 'return_type': 'void',
'names': ['glFrontFace'],
'arguments': 'GLenum mode', },
{ 'return_type': 'void',
'names': ['glGenBuffersARB', 'glGenBuffers'],
'arguments': 'GLsizei n, GLuint* buffers', },
{ 'return_type': 'void',
'names': ['glGenQueries'],
'arguments': 'GLsizei n, GLuint* ids', },
{ 'return_type': 'void',
'names': ['glGenQueriesARB', 'glGenQueriesEXT'],
'arguments': 'GLsizei n, GLuint* ids', },
{ 'return_type': 'void',
'names': ['glGenerateMipmapEXT', 'glGenerateMipmap'],
'arguments': 'GLenum target', },
{ 'return_type': 'void',
'names': ['glGenFramebuffersEXT', 'glGenFramebuffers'],
'arguments': 'GLsizei n, GLuint* framebuffers', },
{ 'return_type': 'void',
'names': ['glGenRenderbuffersEXT', 'glGenRenderbuffers'],
'arguments': 'GLsizei n, GLuint* renderbuffers', },
{ 'return_type': 'void',
'names': ['glGenTextures'],
'arguments': 'GLsizei n, GLuint* textures', },
{ 'return_type': 'void',
'names': ['glGetActiveAttrib'],
'arguments':
'GLuint program, GLuint index, GLsizei bufsize, GLsizei* length, '
'GLint* size, GLenum* type, char* name', },
{ 'return_type': 'void',
'names': ['glGetActiveUniform'],
'arguments':
'GLuint program, GLuint index, GLsizei bufsize, GLsizei* length, '
'GLint* size, GLenum* type, char* name', },
{ 'return_type': 'void',
'names': ['glGetAttachedShaders'],
'arguments':
'GLuint program, GLsizei maxcount, GLsizei* count, GLuint* shaders', },
{ 'return_type': 'GLint',
'names': ['glGetAttribLocation'],
'arguments': 'GLuint program, const char* name', },
{ 'return_type': 'void',
'names': ['glGetBooleanv'],
'arguments': 'GLenum pname, GLboolean* params', },
{ 'return_type': 'void',
'names': ['glGetBufferParameteriv'],
'arguments': 'GLenum target, GLenum pname, GLint* params', },
{ 'return_type': 'GLenum',
'names': ['glGetError'],
'arguments': 'void',
'logging_code': """
GL_SERVICE_LOG("GL_RESULT: " << GLES2Util::GetStringError(result));
""", },
{ 'return_type': 'void',
'names': ['glGetFloatv'],
'arguments': 'GLenum pname, GLfloat* params', },
{ 'return_type': 'void',
'names': ['glGetFramebufferAttachmentParameterivEXT',
'glGetFramebufferAttachmentParameteriv'],
'arguments': 'GLenum target, '
'GLenum attachment, GLenum pname, GLint* params', },
{ 'return_type': 'GLenum',
'names': ['glGetGraphicsResetStatusARB'],
'arguments': 'void', },
{ 'return_type': 'void',
'names': ['glGetIntegerv'],
'arguments': 'GLenum pname, GLint* params', },
{ 'return_type': 'void',
'names': ['glGetProgramiv'],
'arguments': 'GLuint program, GLenum pname, GLint* params', },
{ 'return_type': 'void',
'names': ['glGetProgramInfoLog'],
'arguments':
'GLuint program, GLsizei bufsize, GLsizei* length, char* infolog', },
{ 'return_type': 'void',
'names': ['glGetQueryiv'],
'arguments': 'GLenum target, GLenum pname, GLint* params', },
{ 'return_type': 'void',
'names': ['glGetQueryivARB', 'glGetQueryivEXT'],
'arguments': 'GLenum target, GLenum pname, GLint* params', },
{ 'return_type': 'void',
'names': ['glGetQueryObjecti64v'],
'arguments': 'GLuint id, GLenum pname, GLint64* params', },
{ 'return_type': 'void',
'names': ['glGetQueryObjectiv'],
'arguments': 'GLuint id, GLenum pname, GLint* params', },
{ 'return_type': 'void',
'names': ['glGetQueryObjectui64v'],
'arguments': 'GLuint id, GLenum pname, GLuint64* params', },
{ 'return_type': 'void',
'names': ['glGetQueryObjectuiv'],
'arguments': 'GLuint id, GLenum pname, GLuint* params', },
{ 'return_type': 'void',
'names': ['glGetQueryObjectuivARB', 'glGetQueryObjectuivEXT'],
'arguments': 'GLuint id, GLenum pname, GLuint* params', },
{ 'return_type': 'void',
'names': ['glGetRenderbufferParameterivEXT', 'glGetRenderbufferParameteriv'],
'arguments': 'GLenum target, GLenum pname, GLint* params', },
{ 'return_type': 'void',
'names': ['glGetShaderiv'],
'arguments': 'GLuint shader, GLenum pname, GLint* params', },
{ 'return_type': 'void',
'names': ['glGetShaderInfoLog'],
'arguments':
'GLuint shader, GLsizei bufsize, GLsizei* length, char* infolog', },
{ 'return_type': 'void',
'names': ['glGetShaderPrecisionFormat'],
'arguments': 'GLenum shadertype, GLenum precisiontype, '
'GLint* range, GLint* precision', },
{ 'return_type': 'void',
'names': ['glGetShaderSource'],
'arguments':
'GLuint shader, GLsizei bufsize, GLsizei* length, char* source', },
{ 'return_type': 'const GLubyte*',
'names': ['glGetString'],
'arguments': 'GLenum name', },
{ 'return_type': 'void',
'names': ['glGetTexLevelParameterfv'],
'arguments': 'GLenum target, GLint level, GLenum pname, GLfloat* params', },
{ 'return_type': 'void',
'names': ['glGetTexLevelParameteriv'],
'arguments': 'GLenum target, GLint level, GLenum pname, GLint* params', },
{ 'return_type': 'void',
'names': ['glGetTexParameterfv'],
'arguments': 'GLenum target, GLenum pname, GLfloat* params', },
{ 'return_type': 'void',
'names': ['glGetTexParameteriv'],
'arguments': 'GLenum target, GLenum pname, GLint* params', },
{ 'return_type': 'void',
'names': ['glGetTranslatedShaderSourceANGLE'],
'arguments':
'GLuint shader, GLsizei bufsize, GLsizei* length, char* source', },
{ 'return_type': 'void',
'names': ['glGetUniformfv'],
'arguments': 'GLuint program, GLint location, GLfloat* params', },
{ 'return_type': 'void',
'names': ['glGetUniformiv'],
'arguments': 'GLuint program, GLint location, GLint* params', },
{ 'return_type': 'GLint',
'names': ['glGetUniformLocation'],
'arguments': 'GLuint program, const char* name', },
{ 'return_type': 'void',
'names': ['glGetVertexAttribfv'],
'arguments': 'GLuint index, GLenum pname, GLfloat* params', },
{ 'return_type': 'void',
'names': ['glGetVertexAttribiv'],
'arguments': 'GLuint index, GLenum pname, GLint* params', },
{ 'return_type': 'void',
'names': ['glGetVertexAttribPointerv'],
'arguments': 'GLuint index, GLenum pname, void** pointer', },
{ 'return_type': 'void',
'names': ['glHint'],
'arguments': 'GLenum target, GLenum mode', },
{ 'return_type': 'GLboolean',
'names': ['glIsBuffer'],
'arguments': 'GLuint buffer', },
{ 'return_type': 'GLboolean',
'names': ['glIsEnabled'],
'arguments': 'GLenum cap', },
{ 'return_type': 'GLboolean',
'names': ['glIsFramebufferEXT', 'glIsFramebuffer'],
'arguments': 'GLuint framebuffer', },
{ 'return_type': 'GLboolean',
'names': ['glIsProgram'],
'arguments': 'GLuint program', },
{ 'return_type': 'GLboolean',
'names': ['glIsQueryARB', 'glIsQueryEXT'],
'arguments': 'GLuint query', },
{ 'return_type': 'GLboolean',
'names': ['glIsRenderbufferEXT', 'glIsRenderbuffer'],
'arguments': 'GLuint renderbuffer', },
{ 'return_type': 'GLboolean',
'names': ['glIsShader'],
'arguments': 'GLuint shader', },
{ 'return_type': 'GLboolean',
'names': ['glIsTexture'],
'arguments': 'GLuint texture', },
{ 'return_type': 'void',
'names': ['glLineWidth'],
'arguments': 'GLfloat width', },
{ 'return_type': 'void',
'names': ['glLinkProgram'],
'arguments': 'GLuint program', },
{ 'return_type': 'void*',
'names': ['glMapBuffer', 'glMapBufferOES'],
'arguments': 'GLenum target, GLenum access', },
{ 'return_type': 'void',
'names': ['glPixelStorei'],
'arguments': 'GLenum pname, GLint param', },
{ 'return_type': 'void',
'names': ['glPointParameteri'],
'arguments': 'GLenum pname, GLint param', },
{ 'return_type': 'void',
'names': ['glPolygonOffset'],
'arguments': 'GLfloat factor, GLfloat units', },
{ 'return_type': 'void',
'names': ['glQueryCounter'],
'arguments': 'GLuint id, GLenum target', },
{ 'return_type': 'void',
'names': ['glReadBuffer'],
'arguments': 'GLenum src', },
{ 'return_type': 'void',
'names': ['glReadPixels'],
'arguments':
'GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, '
'GLenum type, void* pixels', },
{ 'return_type': 'void',
'names': ['glReleaseShaderCompiler'],
'arguments': 'void', },
{ 'return_type': 'void',
'names': ['glRenderbufferStorageMultisampleEXT',
'glRenderbufferStorageMultisample'],
'arguments': 'GLenum target, GLsizei samples, GLenum internalformat, '
'GLsizei width, GLsizei height', },
{ 'return_type': 'void',
'names': ['glRenderbufferStorageMultisampleANGLE',
'glRenderbufferStorageMultisample'],
'arguments': 'GLenum target, GLsizei samples, GLenum internalformat, '
'GLsizei width, GLsizei height', },
{ 'return_type': 'void',
'names': ['glRenderbufferStorageEXT', 'glRenderbufferStorage'],
'arguments':
'GLenum target, GLenum internalformat, GLsizei width, GLsizei height', },
{ 'return_type': 'void',
'names': ['glSampleCoverage'],
'arguments': 'GLclampf value, GLboolean invert', },
{ 'return_type': 'void',
'names': ['glScissor'],
'arguments': 'GLint x, GLint y, GLsizei width, GLsizei height', },
{ 'return_type': 'void',
'names': ['glShaderBinary'],
'arguments': 'GLsizei n, const GLuint* shaders, GLenum binaryformat, '
'const void* binary, GLsizei length', },
{ 'return_type': 'void',
'names': ['glShaderSource'],
'arguments':
'GLuint shader, GLsizei count, const char** str, const GLint* length',
'logging_code': """
GL_SERVICE_LOG_CODE_BLOCK({
for (GLsizei ii = 0; ii < count; ++ii) {
if (str[ii]) {
if (length && length[ii] >= 0) {
std::string source(str[ii], length[ii]);
GL_SERVICE_LOG(" " << ii << ": ---\\n" << source << "\\n---");
} else {
GL_SERVICE_LOG(" " << ii << ": ---\\n" << str[ii] << "\\n---");
}
} else {
GL_SERVICE_LOG(" " << ii << ": NULL");
}
}
});
""", },
{ 'return_type': 'void',
'names': ['glStencilFunc'],
'arguments': 'GLenum func, GLint ref, GLuint mask', },
{ 'return_type': 'void',
'names': ['glStencilFuncSeparate'],
'arguments': 'GLenum face, GLenum func, GLint ref, GLuint mask', },
{ 'return_type': 'void',
'names': ['glStencilMask'],
'arguments': 'GLuint mask', },
{ 'return_type': 'void',
'names': ['glStencilMaskSeparate'],
'arguments': 'GLenum face, GLuint mask', },
{ 'return_type': 'void',
'names': ['glStencilOp'],
'arguments': 'GLenum fail, GLenum zfail, GLenum zpass', },
{ 'return_type': 'void',
'names': ['glStencilOpSeparate'],
'arguments': 'GLenum face, GLenum fail, GLenum zfail, GLenum zpass', },
{ 'return_type': 'void',
'names': ['glTexImage2D'],
'arguments':
'GLenum target, GLint level, GLint internalformat, GLsizei width, '
'GLsizei height, GLint border, GLenum format, GLenum type, '
'const void* pixels', },
{ 'return_type': 'void',
'names': ['glTexParameterf'],
'arguments': 'GLenum target, GLenum pname, GLfloat param', },
{ 'return_type': 'void',
'names': ['glTexParameterfv'],
'arguments': 'GLenum target, GLenum pname, const GLfloat* params', },
{ 'return_type': 'void',
'names': ['glTexParameteri'],
'arguments': 'GLenum target, GLenum pname, GLint param', },
{ 'return_type': 'void',
'names': ['glTexParameteriv'],
'arguments': 'GLenum target, GLenum pname, const GLint* params', },
{ 'return_type': 'void',
'names': ['glTexStorage2DEXT'],
'arguments': 'GLenum target, GLsizei levels, GLenum internalformat, '
'GLsizei width, GLsizei height', },
{ 'return_type': 'void',
'names': ['glTexSubImage2D'],
'arguments':
'GLenum target, GLint level, GLint xoffset, GLint yoffset, '
'GLsizei width, GLsizei height, GLenum format, GLenum type, '
'const void* pixels', },
{ 'return_type': 'void',
'names': ['glUniform1f'],
'arguments': 'GLint location, GLfloat x', },
{ 'return_type': 'void',
'names': ['glUniform1fv'],
'arguments': 'GLint location, GLsizei count, const GLfloat* v', },
{ 'return_type': 'void',
'names': ['glUniform1i'],
'arguments': 'GLint location, GLint x', },
{ 'return_type': 'void',
'names': ['glUniform1iv'],
'arguments': 'GLint location, GLsizei count, const GLint* v', },
{ 'return_type': 'void',
'names': ['glUniform2f'],
'arguments': 'GLint location, GLfloat x, GLfloat y', },
{ 'return_type': 'void',
'names': ['glUniform2fv'],
'arguments': 'GLint location, GLsizei count, const GLfloat* v', },
{ 'return_type': 'void',
'names': ['glUniform2i'],
'arguments': 'GLint location, GLint x, GLint y', },
{ 'return_type': 'void',
'names': ['glUniform2iv'],
'arguments': 'GLint location, GLsizei count, const GLint* v', },
{ 'return_type': 'void',
'names': ['glUniform3f'],
'arguments': 'GLint location, GLfloat x, GLfloat y, GLfloat z', },
{ 'return_type': 'void',
'names': ['glUniform3fv'],
'arguments': 'GLint location, GLsizei count, const GLfloat* v', },
{ 'return_type': 'void',
'names': ['glUniform3i'],
'arguments': 'GLint location, GLint x, GLint y, GLint z', },
{ 'return_type': 'void',
'names': ['glUniform3iv'],
'arguments': 'GLint location, GLsizei count, const GLint* v', },
{ 'return_type': 'void',
'names': ['glUniform4f'],
'arguments': 'GLint location, GLfloat x, GLfloat y, GLfloat z, GLfloat w', },
{ 'return_type': 'void',
'names': ['glUniform4fv'],
'arguments': 'GLint location, GLsizei count, const GLfloat* v', },
{ 'return_type': 'void',
'names': ['glUniform4i'],
'arguments': 'GLint location, GLint x, GLint y, GLint z, GLint w', },
{ 'return_type': 'void',
'names': ['glUniform4iv'],
'arguments': 'GLint location, GLsizei count, const GLint* v', },
{ 'return_type': 'void',
'names': ['glUniformMatrix2fv'],
'arguments': 'GLint location, GLsizei count, '
'GLboolean transpose, const GLfloat* value', },
{ 'return_type': 'void',
'names': ['glUniformMatrix3fv'],
'arguments': 'GLint location, GLsizei count, '
'GLboolean transpose, const GLfloat* value', },
{ 'return_type': 'void',
'names': ['glUniformMatrix4fv'],
'arguments': 'GLint location, GLsizei count, '
'GLboolean transpose, const GLfloat* value', },
{ 'return_type': 'GLboolean',
'names': ['glUnmapBuffer', 'glUnmapBufferOES'],
'arguments': 'GLenum target', },
{ 'return_type': 'void',
'names': ['glUseProgram'],
'arguments': 'GLuint program', },
{ 'return_type': 'void',
'names': ['glValidateProgram'],
'arguments': 'GLuint program', },
{ 'return_type': 'void',
'names': ['glVertexAttrib1f'],
'arguments': 'GLuint indx, GLfloat x', },
{ 'return_type': 'void',
'names': ['glVertexAttrib1fv'],
'arguments': 'GLuint indx, const GLfloat* values', },
{ 'return_type': 'void',
'names': ['glVertexAttrib2f'],
'arguments': 'GLuint indx, GLfloat x, GLfloat y', },
{ 'return_type': 'void',
'names': ['glVertexAttrib2fv'],
'arguments': 'GLuint indx, const GLfloat* values', },
{ 'return_type': 'void',
'names': ['glVertexAttrib3f'],
'arguments': 'GLuint indx, GLfloat x, GLfloat y, GLfloat z', },
{ 'return_type': 'void',
'names': ['glVertexAttrib3fv'],
'arguments': 'GLuint indx, const GLfloat* values', },
{ 'return_type': 'void',
'names': ['glVertexAttrib4f'],
'arguments': 'GLuint indx, GLfloat x, GLfloat y, GLfloat z, GLfloat w', },
{ 'return_type': 'void',
'names': ['glVertexAttrib4fv'],
'arguments': 'GLuint indx, const GLfloat* values', },
{ 'return_type': 'void',
'names': ['glVertexAttribPointer'],
'arguments': 'GLuint indx, GLint size, GLenum type, GLboolean normalized, '
'GLsizei stride, const void* ptr', },
{ 'return_type': 'void',
'names': ['glViewport'],
'arguments': 'GLint x, GLint y, GLsizei width, GLsizei height', },
{ 'return_type': 'void',
'names': ['glGenFencesNV'],
'arguments': 'GLsizei n, GLuint* fences', },
{ 'return_type': 'void',
'names': ['glDeleteFencesNV'],
'arguments': 'GLsizei n, const GLuint* fences', },
{ 'return_type': 'void',
'names': ['glSetFenceNV'],
'arguments': 'GLuint fence, GLenum condition', },
{ 'return_type': 'GLboolean',
'names': ['glTestFenceNV'],
'arguments': 'GLuint fence', },
{ 'return_type': 'void',
'names': ['glFinishFenceNV'],
'arguments': 'GLuint fence', },
{ 'return_type': 'GLboolean',
'names': ['glIsFenceNV'],
'arguments': 'GLuint fence', },
{ 'return_type': 'void',
'names': ['glGetFenceivNV'],
'arguments': 'GLuint fence, GLenum pname, GLint* params', },
{ 'return_type': 'GLsync',
'names': ['glFenceSync'],
'arguments': 'GLenum condition, GLbitfield flags', },
{ 'return_type': 'void',
'names': ['glDeleteSync'],
'arguments': 'GLsync sync', },
{ 'return_type': 'void',
'names': ['glGetSynciv'],
'arguments':
'GLsync sync, GLenum pname, GLsizei bufSize, GLsizei* length,'
'GLint* values', },
{ 'return_type': 'void',
'names': ['glDrawArraysInstancedANGLE', 'glDrawArraysInstancedARB'],
'arguments': 'GLenum mode, GLint first, GLsizei count, GLsizei primcount', },
{ 'return_type': 'void',
'names': ['glDrawElementsInstancedANGLE', 'glDrawElementsInstancedARB'],
'arguments':
'GLenum mode, GLsizei count, GLenum type, const void* indices, '
'GLsizei primcount', },
{ 'return_type': 'void',
'names': ['glVertexAttribDivisorANGLE', 'glVertexAttribDivisorARB'],
'arguments':
'GLuint index, GLuint divisor', },
]
OSMESA_FUNCTIONS = [
{ 'return_type': 'OSMesaContext',
'names': ['OSMesaCreateContext'],
'arguments': 'GLenum format, OSMesaContext sharelist', },
{ 'return_type': 'OSMesaContext',
'names': ['OSMesaCreateContextExt'],
'arguments':
'GLenum format, GLint depthBits, GLint stencilBits, GLint accumBits, '
'OSMesaContext sharelist', },
{ 'return_type': 'void',
'names': ['OSMesaDestroyContext'],
'arguments': 'OSMesaContext ctx', },
{ 'return_type': 'GLboolean',
'names': ['OSMesaMakeCurrent'],
'arguments': 'OSMesaContext ctx, void* buffer, GLenum type, GLsizei width, '
'GLsizei height', },
{ 'return_type': 'OSMesaContext',
'names': ['OSMesaGetCurrentContext'],
'arguments': 'void', },
{ 'return_type': 'void',
'names': ['OSMesaPixelStore'],
'arguments': 'GLint pname, GLint value', },
{ 'return_type': 'void',
'names': ['OSMesaGetIntegerv'],
'arguments': 'GLint pname, GLint* value', },
{ 'return_type': 'GLboolean',
'names': ['OSMesaGetDepthBuffer'],
'arguments':
'OSMesaContext c, GLint* width, GLint* height, GLint* bytesPerValue, '
'void** buffer', },
{ 'return_type': 'GLboolean',
'names': ['OSMesaGetColorBuffer'],
'arguments': 'OSMesaContext c, GLint* width, GLint* height, GLint* format, '
'void** buffer', },
{ 'return_type': 'OSMESAproc',
'names': ['OSMesaGetProcAddress'],
'arguments': 'const char* funcName', },
{ 'return_type': 'void',
'names': ['OSMesaColorClamp'],
'arguments': 'GLboolean enable', },
]
EGL_FUNCTIONS = [
{ 'return_type': 'EGLint',
'names': ['eglGetError'],
'arguments': 'void', },
{ 'return_type': 'EGLDisplay',
'names': ['eglGetDisplay'],
'arguments': 'EGLNativeDisplayType display_id', },
{ 'return_type': 'EGLBoolean',
'names': ['eglInitialize'],
'arguments': 'EGLDisplay dpy, EGLint* major, EGLint* minor', },
{ 'return_type': 'EGLBoolean',
'names': ['eglTerminate'],
'arguments': 'EGLDisplay dpy', },
{ 'return_type': 'const char*',
'names': ['eglQueryString'],
'arguments': 'EGLDisplay dpy, EGLint name', },
{ 'return_type': 'EGLBoolean',
'names': ['eglGetConfigs'],
'arguments': 'EGLDisplay dpy, EGLConfig* configs, EGLint config_size, '
'EGLint* num_config', },
{ 'return_type': 'EGLBoolean',
'names': ['eglChooseConfig'],
'arguments': 'EGLDisplay dpy, const EGLint* attrib_list, EGLConfig* configs, '
'EGLint config_size, EGLint* num_config', },
{ 'return_type': 'EGLBoolean',
'names': ['eglGetConfigAttrib'],
'arguments':
'EGLDisplay dpy, EGLConfig config, EGLint attribute, EGLint* value', },
{ 'return_type': 'EGLImageKHR',
'names': ['eglCreateImageKHR'],
'arguments':
'EGLDisplay dpy, EGLContext ctx, EGLenum target, EGLClientBuffer buffer, '
'const EGLint* attrib_list',
'other_extensions': ['EGL_KHR_image_base'] },
{ 'return_type': 'EGLBoolean',
'names': ['eglDestroyImageKHR'],
'arguments': 'EGLDisplay dpy, EGLImageKHR image',
'other_extensions': ['EGL_KHR_image_base'] },
{ 'return_type': 'EGLSurface',
'names': ['eglCreateWindowSurface'],
'arguments': 'EGLDisplay dpy, EGLConfig config, EGLNativeWindowType win, '
'const EGLint* attrib_list', },
{ 'return_type': 'EGLSurface',
'names': ['eglCreatePbufferSurface'],
'arguments': 'EGLDisplay dpy, EGLConfig config, const EGLint* attrib_list', },
{ 'return_type': 'EGLSurface',
'names': ['eglCreatePixmapSurface'],
'arguments': 'EGLDisplay dpy, EGLConfig config, EGLNativePixmapType pixmap, '
'const EGLint* attrib_list', },
{ 'return_type': 'EGLBoolean',
'names': ['eglDestroySurface'],
'arguments': 'EGLDisplay dpy, EGLSurface surface', },
{ 'return_type': 'EGLBoolean',
'names': ['eglQuerySurface'],
'arguments':
'EGLDisplay dpy, EGLSurface surface, EGLint attribute, EGLint* value', },
{ 'return_type': 'EGLBoolean',
'names': ['eglBindAPI'],
'arguments': 'EGLenum api', },
{ 'return_type': 'EGLenum',
'names': ['eglQueryAPI'],
'arguments': 'void', },
{ 'return_type': 'EGLBoolean',
'names': ['eglWaitClient'],
'arguments': 'void', },
{ 'return_type': 'EGLBoolean',
'names': ['eglReleaseThread'],
'arguments': 'void', },
{ 'return_type': 'EGLSurface',
'names': ['eglCreatePbufferFromClientBuffer'],
'arguments':
'EGLDisplay dpy, EGLenum buftype, void* buffer, EGLConfig config, '
'const EGLint* attrib_list', },
{ 'return_type': 'EGLBoolean',
'names': ['eglSurfaceAttrib'],
'arguments':
'EGLDisplay dpy, EGLSurface surface, EGLint attribute, EGLint value', },
{ 'return_type': 'EGLBoolean',
'names': ['eglBindTexImage'],
'arguments': 'EGLDisplay dpy, EGLSurface surface, EGLint buffer', },
{ 'return_type': 'EGLBoolean',
'names': ['eglReleaseTexImage'],
'arguments': 'EGLDisplay dpy, EGLSurface surface, EGLint buffer', },
{ 'return_type': 'EGLBoolean',
'names': ['eglSwapInterval'],
'arguments': 'EGLDisplay dpy, EGLint interval', },
{ 'return_type': 'EGLContext',
'names': ['eglCreateContext'],
'arguments': 'EGLDisplay dpy, EGLConfig config, EGLContext share_context, '
'const EGLint* attrib_list', },
{ 'return_type': 'EGLBoolean',
'names': ['eglDestroyContext'],
'arguments': 'EGLDisplay dpy, EGLContext ctx', },
{ 'return_type': 'EGLBoolean',
'names': ['eglMakeCurrent'],
'arguments':
'EGLDisplay dpy, EGLSurface draw, EGLSurface read, EGLContext ctx', },
{ 'return_type': 'EGLContext',
'names': ['eglGetCurrentContext'],
'arguments': 'void', },
{ 'return_type': 'EGLSurface',
'names': ['eglGetCurrentSurface'],
'arguments': 'EGLint readdraw', },
{ 'return_type': 'EGLDisplay',
'names': ['eglGetCurrentDisplay'],
'arguments': 'void', },
{ 'return_type': 'EGLBoolean',
'names': ['eglQueryContext'],
'arguments':
'EGLDisplay dpy, EGLContext ctx, EGLint attribute, EGLint* value', },
{ 'return_type': 'EGLBoolean',
'names': ['eglWaitGL'],
'arguments': 'void', },
{ 'return_type': 'EGLBoolean',
'names': ['eglWaitNative'],
'arguments': 'EGLint engine', },
{ 'return_type': 'EGLBoolean',
'names': ['eglSwapBuffers'],
'arguments': 'EGLDisplay dpy, EGLSurface surface', },
{ 'return_type': 'EGLBoolean',
'names': ['eglCopyBuffers'],
'arguments':
'EGLDisplay dpy, EGLSurface surface, EGLNativePixmapType target', },
{ 'return_type': '__eglMustCastToProperFunctionPointerType',
'names': ['eglGetProcAddress'],
'arguments': 'const char* procname', },
{ 'return_type': 'EGLBoolean',
'names': ['eglPostSubBufferNV'],
'arguments': 'EGLDisplay dpy, EGLSurface surface, '
'EGLint x, EGLint y, EGLint width, EGLint height', },
{ 'return_type': 'EGLBoolean',
'names': ['eglQuerySurfacePointerANGLE'],
'arguments':
'EGLDisplay dpy, EGLSurface surface, EGLint attribute, void** value', },
]
WGL_FUNCTIONS = [
{ 'return_type': 'HGLRC',
'names': ['wglCreateContext'],
'arguments': 'HDC hdc', },
{ 'return_type': 'HGLRC',
'names': ['wglCreateLayerContext'],
'arguments': 'HDC hdc, int iLayerPlane', },
{ 'return_type': 'BOOL',
'names': ['wglCopyContext'],
'arguments': 'HGLRC hglrcSrc, HGLRC hglrcDst, UINT mask', },
{ 'return_type': 'BOOL',
'names': ['wglDeleteContext'],
'arguments': 'HGLRC hglrc', },
{ 'return_type': 'HGLRC',
'names': ['wglGetCurrentContext'],
'arguments': '', },
{ 'return_type': 'HDC',
'names': ['wglGetCurrentDC'],
'arguments': '', },
{ 'return_type': 'BOOL',
'names': ['wglMakeCurrent'],
'arguments': 'HDC hdc, HGLRC hglrc', },
{ 'return_type': 'BOOL',
'names': ['wglShareLists'],
'arguments': 'HGLRC hglrc1, HGLRC hglrc2', },
{ 'return_type': 'BOOL',
'names': ['wglSwapIntervalEXT'],
'arguments': 'int interval', },
{ 'return_type': 'BOOL',
'names': ['wglSwapLayerBuffers'],
'arguments': 'HDC hdc, UINT fuPlanes', },
{ 'return_type': 'const char*',
'names': ['wglGetExtensionsStringARB'],
'arguments': 'HDC hDC', },
{ 'return_type': 'const char*',
'names': ['wglGetExtensionsStringEXT'],
'arguments': '', },
{ 'return_type': 'BOOL',
'names': ['wglChoosePixelFormatARB'],
'arguments':
'HDC dc, const int* int_attrib_list, const float* float_attrib_list, '
'UINT max_formats, int* formats, UINT* num_formats', },
{ 'return_type': 'HPBUFFERARB',
'names': ['wglCreatePbufferARB'],
'arguments': 'HDC hDC, int iPixelFormat, int iWidth, int iHeight, '
'const int* piAttribList', },
{ 'return_type': 'HDC',
'names': ['wglGetPbufferDCARB'],
'arguments': 'HPBUFFERARB hPbuffer', },
{ 'return_type': 'int',
'names': ['wglReleasePbufferDCARB'],
'arguments': 'HPBUFFERARB hPbuffer, HDC hDC', },
{ 'return_type': 'BOOL',
'names': ['wglDestroyPbufferARB'],
'arguments': 'HPBUFFERARB hPbuffer', },
{ 'return_type': 'BOOL',
'names': ['wglQueryPbufferARB'],
'arguments': 'HPBUFFERARB hPbuffer, int iAttribute, int* piValue', },
]
GLX_FUNCTIONS = [
{ 'return_type': 'XVisualInfo*',
'names': ['glXChooseVisual'],
'arguments': 'Display* dpy, int screen, int* attribList', },
{ 'return_type': 'void',
'names': ['glXCopySubBufferMESA'],
'arguments': 'Display* dpy, GLXDrawable drawable, '
'int x, int y, int width, int height', },
{ 'return_type': 'GLXContext',
'names': ['glXCreateContext'],
'arguments':
'Display* dpy, XVisualInfo* vis, GLXContext shareList, int direct', },
{ 'return_type': 'void',
'names': ['glXBindTexImageEXT'],
'arguments':
'Display* dpy, GLXDrawable drawable, int buffer, int* attribList', },
{ 'return_type': 'void',
'names': ['glXReleaseTexImageEXT'],
'arguments': 'Display* dpy, GLXDrawable drawable, int buffer', },
{ 'return_type': 'void',
'names': ['glXDestroyContext'],
'arguments': 'Display* dpy, GLXContext ctx', },
{ 'return_type': 'int',
'names': ['glXMakeCurrent'],
'arguments': 'Display* dpy, GLXDrawable drawable, GLXContext ctx', },
{ 'return_type': 'void',
'names': ['glXCopyContext'],
'arguments':
'Display* dpy, GLXContext src, GLXContext dst, unsigned long mask', },
{ 'return_type': 'void',
'names': ['glXSwapBuffers'],
'arguments': 'Display* dpy, GLXDrawable drawable', },
{ 'return_type': 'GLXPixmap',
'names': ['glXCreateGLXPixmap'],
'arguments': 'Display* dpy, XVisualInfo* visual, Pixmap pixmap', },
{ 'return_type': 'void',
'names': ['glXDestroyGLXPixmap'],
'arguments': 'Display* dpy, GLXPixmap pixmap', },
{ 'return_type': 'int',
'names': ['glXQueryExtension'],
'arguments': 'Display* dpy, int* errorb, int* event', },
{ 'return_type': 'int',
'names': ['glXQueryVersion'],
'arguments': 'Display* dpy, int* maj, int* min', },
{ 'return_type': 'int',
'names': ['glXIsDirect'],
'arguments': 'Display* dpy, GLXContext ctx', },
{ 'return_type': 'int',
'names': ['glXGetConfig'],
'arguments': 'Display* dpy, XVisualInfo* visual, int attrib, int* value', },
{ 'return_type': 'GLXContext',
'names': ['glXGetCurrentContext'],
'arguments': 'void', },
{ 'return_type': 'GLXDrawable',
'names': ['glXGetCurrentDrawable'],
'arguments': 'void', },
{ 'return_type': 'void',
'names': ['glXWaitGL'],
'arguments': 'void', },
{ 'return_type': 'void',
'names': ['glXWaitX'],
'arguments': 'void', },
{ 'return_type': 'void',
'names': ['glXUseXFont'],
'arguments': 'Font font, int first, int count, int list', },
{ 'return_type': 'const char*',
'names': ['glXQueryExtensionsString'],
'arguments': 'Display* dpy, int screen', },
{ 'return_type': 'const char*',
'names': ['glXQueryServerString'],
'arguments': 'Display* dpy, int screen, int name', },
{ 'return_type': 'const char*',
'names': ['glXGetClientString'],
'arguments': 'Display* dpy, int name', },
{ 'return_type': 'Display*',
'names': ['glXGetCurrentDisplay'],
'arguments': 'void', },
{ 'return_type': 'GLXFBConfig*',
'names': ['glXChooseFBConfig'],
'arguments':
'Display* dpy, int screen, const int* attribList, int* nitems', },
{ 'return_type': 'int',
'names': ['glXGetFBConfigAttrib'],
'arguments': 'Display* dpy, GLXFBConfig config, int attribute, int* value', },
{ 'return_type': 'GLXFBConfig*',
'names': ['glXGetFBConfigs'],
'arguments': 'Display* dpy, int screen, int* nelements', },
{ 'return_type': 'XVisualInfo*',
'names': ['glXGetVisualFromFBConfig'],
'arguments': 'Display* dpy, GLXFBConfig config', },
{ 'return_type': 'GLXWindow',
'names': ['glXCreateWindow'],
'arguments':
'Display* dpy, GLXFBConfig config, Window win, const int* attribList', },
{ 'return_type': 'void',
'names': ['glXDestroyWindow'],
'arguments': 'Display* dpy, GLXWindow window', },
{ 'return_type': 'GLXPixmap',
'names': ['glXCreatePixmap'],
'arguments': 'Display* dpy, GLXFBConfig config, '
'Pixmap pixmap, const int* attribList', },
{ 'return_type': 'void',
'names': ['glXDestroyPixmap'],
'arguments': 'Display* dpy, GLXPixmap pixmap', },
{ 'return_type': 'GLXPbuffer',
'names': ['glXCreatePbuffer'],
'arguments': 'Display* dpy, GLXFBConfig config, const int* attribList', },
{ 'return_type': 'void',
'names': ['glXDestroyPbuffer'],
'arguments': 'Display* dpy, GLXPbuffer pbuf', },
{ 'return_type': 'void',
'names': ['glXQueryDrawable'],
'arguments':
'Display* dpy, GLXDrawable draw, int attribute, unsigned int* value', },
{ 'return_type': 'GLXContext',
'names': ['glXCreateNewContext'],
'arguments': 'Display* dpy, GLXFBConfig config, int renderType, '
'GLXContext shareList, int direct', },
{ 'return_type': 'int',
'names': ['glXMakeContextCurrent'],
'arguments':
'Display* dpy, GLXDrawable draw, GLXDrawable read, GLXContext ctx', },
{ 'return_type': 'GLXDrawable',
'names': ['glXGetCurrentReadDrawable'],
'arguments': 'void', },
{ 'return_type': 'int',
'names': ['glXQueryContext'],
'arguments': 'Display* dpy, GLXContext ctx, int attribute, int* value', },
{ 'return_type': 'void',
'names': ['glXSelectEvent'],
'arguments': 'Display* dpy, GLXDrawable drawable, unsigned long mask', },
{ 'return_type': 'void',
'names': ['glXGetSelectedEvent'],
'arguments': 'Display* dpy, GLXDrawable drawable, unsigned long* mask', },
{ 'return_type': 'void',
'names': ['glXSwapIntervalMESA'],
'arguments': 'unsigned int interval', },
{ 'return_type': 'void',
'names': ['glXSwapIntervalEXT'],
'arguments': 'Display* dpy, GLXDrawable drawable, int interval', },
{ 'return_type': 'GLXFBConfig',
'names': ['glXGetFBConfigFromVisualSGIX'],
'arguments': 'Display* dpy, XVisualInfo* visualInfo', },
{ 'return_type': 'GLXContext',
'names': ['glXCreateContextAttribsARB'],
'arguments':
'Display* dpy, GLXFBConfig config, GLXContext share_context, int direct, '
'const int* attrib_list', },
]
FUNCTION_SETS = [
[GL_FUNCTIONS, 'gl', ['../../third_party/mesa/MesaLib/include/GL/glext.h',
'../../third_party/khronos/GLES2/gl2ext.h'], []],
[OSMESA_FUNCTIONS, 'osmesa', [], []],
[EGL_FUNCTIONS, 'egl', ['../../third_party/khronos/EGL/eglext.h'],
[
'EGL_ANGLE_d3d_share_handle_client_buffer',
'EGL_ANGLE_surface_d3d_texture_2d_share_handle',
],
],
[WGL_FUNCTIONS, 'wgl', [
'../../third_party/mesa/MesaLib/include/GL/wglext.h'], []],
[GLX_FUNCTIONS, 'glx', [
'../../third_party/mesa/MesaLib/include/GL/glx.h',
'../../third_party/mesa/MesaLib/include/GL/glxext.h'], []],
]
def GenerateHeader(file, functions, set_name, used_extension_functions):
"""Generates gl_binding_autogen_x.h"""
# Write file header.
file.write(
"""// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file is automatically generated.
#ifndef UI_GFX_GL_GL_BINDINGS_AUTOGEN_%(name)s_H_
#define UI_GFX_GL_GL_BINDINGS_AUTOGEN_%(name)s_H_
namespace gfx {
class GLContext;
void InitializeGLBindings%(name)s();
void InitializeGLExtensionBindings%(name)s(GLContext* context);
void InitializeDebugGLBindings%(name)s();
void ClearGLBindings%(name)s();
""" % {'name': set_name.upper()})
# Write typedefs for function pointer types. Always use the GL name for the
# typedef.
file.write('\n')
for func in functions:
file.write('typedef %s (GL_BINDING_CALL *%sProc)(%s);\n' %
(func['return_type'], func['names'][0], func['arguments']))
# Write declarations for booleans indicating which extensions are available.
file.write('\n')
for extension, ext_functions in used_extension_functions:
file.write('GL_EXPORT extern bool g_%s;\n' % extension)
# Write declarations for function pointers. Always use the GL name for the
# declaration.
file.write('\n')
for func in functions:
file.write('GL_EXPORT extern %sProc g_%s;\n' %
(func['names'][0], func['names'][0]))
file.write('\n')
file.write( '} // namespace gfx\n')
# Write macros to invoke function pointers. Always use the GL name for the
# macro.
file.write('\n')
for func in functions:
file.write('#define %s ::gfx::g_%s\n' %
(func['names'][0], func['names'][0]))
file.write('\n')
file.write('#endif // UI_GFX_GL_GL_BINDINGS_AUTOGEN_%s_H_\n' %
set_name.upper())
def GenerateSource(file, functions, set_name, used_extension_functions):
"""Generates gl_binding_autogen_x.cc"""
# Write file header.
file.write(
"""// Copyright (c) 2011 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file is automatically generated.
#include <string>
#include "gpu/command_buffer/common/gles2_cmd_utils.h"
#include "ui/gl/gl_bindings.h"
#include "ui/gl/gl_context.h"
#include "ui/gl/gl_implementation.h"
using gpu::gles2::GLES2Util;
namespace gfx {
""")
# Write definitions for booleans indicating which extensions are available.
for extension, ext_functions in used_extension_functions:
file.write('bool g_%s;\n' % extension)
# Write definitions of function pointers.
file.write('\n')
file.write('static bool g_debugBindingsInitialized;\n')
file.write('static void UpdateDebugGLExtensionBindings();\n')
file.write('\n')
for func in functions:
file.write('%sProc g_%s;\n' % (func['names'][0], func['names'][0]))
file.write('\n')
for func in functions:
file.write('static %sProc g_debug_%s;\n' %
(func['names'][0], func['names'][0]))
# Write function to initialize the core function pointers. The code assumes
# any non-NULL pointer returned by GetGLCoreProcAddress() is valid, although
# it may be overwritten by an extension function pointer later.
file.write('\n')
file.write('void InitializeGLBindings%s() {\n' % set_name.upper())
for func in functions:
first_name = func['names'][0]
for i, name in enumerate(func['names']):
if i:
file.write(' if (!g_%s)\n ' % first_name)
file.write(
' g_%s = reinterpret_cast<%sProc>(GetGLCoreProcAddress("%s"));\n' %
(first_name, first_name, name))
file.write('}\n')
file.write('\n')
# Write function to initialize the extension function pointers. This function
# uses a current context to query which extensions are actually supported.
file.write('void InitializeGLExtensionBindings%s(GLContext* context) {\n' %
set_name.upper())
file.write(' DCHECK(context && context->IsCurrent(NULL));\n')
for extension, ext_functions in used_extension_functions:
file.write(' g_%s = context->HasExtension("%s");\n' %
(extension, extension))
file.write(' if (g_%s) {\n' %
(extension))
queried_entry_points = set()
for entry_point_name, function_name in ext_functions:
# Replace the pointer unconditionally unless this extension has several
# alternatives for the same entry point (e.g.,
# GL_ARB_blend_func_extended).
if entry_point_name in queried_entry_points:
file.write(' if (!g_%s)\n ' % entry_point_name)
file.write(
' g_%s = reinterpret_cast<%sProc>(GetGLProcAddress("%s"));\n' %
(entry_point_name, entry_point_name, function_name))
queried_entry_points.add(entry_point_name)
file.write(' }\n')
file.write(' if (g_debugBindingsInitialized)\n')
file.write(' UpdateDebugGLExtensionBindings();\n')
file.write('}\n')
file.write('\n')
# Write logging wrappers for each function.
file.write('extern "C" {\n')
for func in functions:
names = func['names']
return_type = func['return_type']
arguments = func['arguments']
file.write('\n')
file.write('static %s GL_BINDING_CALL Debug_%s(%s) {\n' %
(return_type, names[0], arguments))
argument_names = re.sub(
r'(const )?[a-zA-Z0-9_]+\** ([a-zA-Z0-9_]+)', r'\2', arguments)
argument_names = re.sub(
r'(const )?[a-zA-Z0-9_]+\** ([a-zA-Z0-9_]+)', r'\2', argument_names)
log_argument_names = re.sub(
r'const char\* ([a-zA-Z0-9_]+)', r'CONSTCHAR_\1', arguments)
log_argument_names = re.sub(
r'(const )?[a-zA-Z0-9_]+\* ([a-zA-Z0-9_]+)',
r'CONSTVOID_\2', log_argument_names)
log_argument_names = re.sub(
r'(?<!E)GLenum ([a-zA-Z0-9_]+)', r'GLenum_\1', log_argument_names)
log_argument_names = re.sub(
r'(?<!E)GLboolean ([a-zA-Z0-9_]+)', r'GLboolean_\1', log_argument_names)
log_argument_names = re.sub(
r'(const )?[a-zA-Z0-9_]+\** ([a-zA-Z0-9_]+)', r'\2',
log_argument_names)
log_argument_names = re.sub(
r'(const )?[a-zA-Z0-9_]+\** ([a-zA-Z0-9_]+)', r'\2',
log_argument_names)
log_argument_names = re.sub(
r'CONSTVOID_([a-zA-Z0-9_]+)',
r'static_cast<const void*>(\1)', log_argument_names);
log_argument_names = re.sub(
r'CONSTCHAR_([a-zA-Z0-9_]+)', r'\1', log_argument_names);
log_argument_names = re.sub(
r'GLenum_([a-zA-Z0-9_]+)', r'GLES2Util::GetStringEnum(\1)',
log_argument_names)
log_argument_names = re.sub(
r'GLboolean_([a-zA-Z0-9_]+)', r'GLES2Util::GetStringBool(\1)',
log_argument_names)
log_argument_names = log_argument_names.replace(',', ' << ", " <<')
if argument_names == 'void' or argument_names == '':
argument_names = ''
log_argument_names = ''
else:
log_argument_names = " << " + log_argument_names
function_name = names[0]
if return_type == 'void':
file.write(' GL_SERVICE_LOG("%s" << "(" %s << ")");\n' %
(function_name, log_argument_names))
file.write(' g_debug_%s(%s);\n' %
(function_name, argument_names))
if 'logging_code' in func:
file.write("%s\n" % func['logging_code'])
else:
file.write(' GL_SERVICE_LOG("%s" << "(" %s << ")");\n' %
(function_name, log_argument_names))
file.write(' %s result = g_debug_%s(%s);\n' %
(return_type, function_name, argument_names))
if 'logging_code' in func:
file.write("%s\n" % func['logging_code'])
else:
file.write(' GL_SERVICE_LOG("GL_RESULT: " << result);\n');
file.write(' return result;\n')
file.write('}\n')
file.write('} // extern "C"\n')
# Write function to initialize the debug function pointers.
file.write('\n')
file.write('void InitializeDebugGLBindings%s() {\n' % set_name.upper())
for func in functions:
first_name = func['names'][0]
file.write(' if (!g_debug_%s) {\n' % first_name)
file.write(' g_debug_%s = g_%s;\n' % (first_name, first_name))
file.write(' g_%s = Debug_%s;\n' % (first_name, first_name))
file.write(' }\n')
file.write(' g_debugBindingsInitialized = true;\n')
file.write('}\n')
# Write function to update the debug function pointers to extension functions
# after the extensions have been initialized.
file.write('\n')
file.write('static void UpdateDebugGLExtensionBindings() {\n')
for extension, ext_functions in used_extension_functions:
for name, _ in ext_functions:
file.write(' if (g_debug_%s != g_%s &&\n' % (name, name))
file.write(' g_%s != Debug_%s) {\n' % (name, name))
file.write(' g_debug_%s = g_%s;\n' % (name, name))
file.write(' g_%s = Debug_%s;\n' % (name, name))
file.write(' }\n')
file.write('}\n')
# Write function to clear all function pointers.
file.write('\n')
file.write('void ClearGLBindings%s() {\n' % set_name.upper())
# Clear the availability of GL extensions.
for extension, ext_functions in used_extension_functions:
file.write(' g_%s = false;\n' % extension)
# Clear GL bindings.
file.write('\n')
for func in functions:
file.write(' g_%s = NULL;\n' % func['names'][0])
# Clear debug GL bindings.
file.write('\n')
for func in functions:
file.write(' g_debug_%s = NULL;\n' % func['names'][0])
file.write(' g_debugBindingsInitialized = false;\n')
file.write('}\n')
file.write('\n')
file.write('} // namespace gfx\n')
def GenerateMockSource(file, functions):
"""Generates functions that invoke a mock GLInterface"""
file.write(
"""// Copyright (c) 2011 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file is automatically generated.
#include <string.h>
#include "ui/gl/gl_interface.h"
namespace gfx {
""")
# Write function that trampoline into the GLInterface.
for func in functions:
file.write('\n')
file.write('%s GL_BINDING_CALL Mock_%s(%s) {\n' %
(func['return_type'], func['names'][0], func['arguments']))
argument_names = re.sub(r'(const )?[a-zA-Z0-9]+\** ([a-zA-Z0-9]+)', r'\2',
func['arguments'])
if argument_names == 'void':
argument_names = ''
function_name = func['names'][0][2:]
if func['return_type'] == 'void':
file.write(' GLInterface::GetGLInterface()->%s(%s);\n' %
(function_name, argument_names))
else:
file.write(' return GLInterface::GetGLInterface()->%s(%s);\n' %
(function_name, argument_names))
file.write('}\n')
# Write an 'invalid' function to catch code calling through uninitialized
# function pointers or trying to interpret the return value of
# GLProcAddress().
file.write('\n')
file.write('static void MockInvalidFunction() {\n')
file.write(' NOTREACHED();\n')
file.write('}\n')
# Write a function to lookup a mock GL function based on its name.
file.write('\n')
file.write('void* GL_BINDING_CALL GetMockGLProcAddress(const char* name) {\n')
for func in functions:
first_name = func['names'][0]
file.write(' if (strcmp(name, "%s") == 0)\n' % first_name)
file.write(' return reinterpret_cast<void*>(Mock_%s);\n' % first_name)
# Always return a non-NULL pointer like some EGL implementations do.
file.write(' return reinterpret_cast<void*>(&MockInvalidFunction);\n')
file.write('}\n');
file.write('\n')
file.write('} // namespace gfx\n')
def ParseExtensionFunctionsFromHeader(header_file):
"""Parse a C extension header file and return a map from extension names to
a list of functions.
Args:
header_file: Line-iterable C header file.
Returns:
Map of extension name => functions.
"""
extension_start = re.compile(r'#define ([A-Z]+_[A-Z]+_[a-zA-Z]\w+) 1')
extension_function = re.compile(r'.+\s+([a-z]+\w+)\s*\(.+\);')
typedef = re.compile(r'typedef .*')
macro_start = re.compile(r'^#(if|ifdef|ifndef).*')
macro_end = re.compile(r'^#endif.*')
macro_depth = 0
current_extension = None
current_extension_depth = 0
extensions = collections.defaultdict(lambda: [])
for line in header_file:
if macro_start.match(line):
macro_depth += 1
elif macro_end.match(line):
macro_depth -= 1
if macro_depth < current_extension_depth:
current_extension = None
match = extension_start.match(line)
if match:
current_extension = match.group(1)
current_extension_depth = macro_depth
assert current_extension not in extensions, \
"Duplicate extension: " + current_extension
match = extension_function.match(line)
if match and current_extension and not typedef.match(line):
extensions[current_extension].append(match.group(1))
return extensions
def GetExtensionFunctions(extension_headers):
"""Parse extension functions from a list of header files.
Args:
extension_headers: List of header file names.
Returns:
Map of extension name => list of functions.
"""
extensions = {}
for header in extension_headers:
extensions.update(ParseExtensionFunctionsFromHeader(open(header)))
return extensions
def GetFunctionToExtensionMap(extensions):
"""Construct map from a function names to extensions which define the
function.
Args:
extensions: Map of extension name => functions.
Returns:
Map of function name => extension name.
"""
function_to_extensions = {}
for extension, functions in extensions.items():
for function in functions:
if not function in function_to_extensions:
function_to_extensions[function] = []
function_to_extensions[function].append(extension)
return function_to_extensions
def LooksLikeExtensionFunction(function):
"""Heuristic to see if a function name is consistent with extension function
naming."""
vendor = re.match(r'\w+?([A-Z][A-Z]+)$', function)
return vendor is not None and not vendor.group(1) in ['GL', 'API', 'DC']
def GetUsedExtensionFunctions(functions, extension_headers, extra_extensions):
"""Determine which functions belong to extensions.
Args:
functions: List of (return type, function names, arguments).
extension_headers: List of header file names.
Returns:
List of (extension name, [function name alternatives]) sorted with least
preferred extensions first.
"""
# Parse known extensions.
extensions = GetExtensionFunctions(extension_headers)
functions_to_extensions = GetFunctionToExtensionMap(extensions)
# Collect all used extension functions.
used_extension_functions = collections.defaultdict(lambda: [])
for func in functions:
for name in func['names']:
# Make sure we know about all extension functions.
if (LooksLikeExtensionFunction(name) and
not name in functions_to_extensions):
raise RuntimeError('%s looks like an extension function but does not '
'belong to any of the known extensions.' % name)
if name in functions_to_extensions:
extensions = functions_to_extensions[name][:]
if 'other_extensions' in func:
extensions.extend(func['other_extensions'])
for extension in extensions:
used_extension_functions[extension].append((func['names'][0], name))
# Add extensions that do not have any functions.
used_extension_functions.update(dict(
[(e, []) for e in extra_extensions if e not in used_extension_functions]))
def ExtensionSortKey(name):
# Prefer ratified extensions and EXTs.
preferences = ['_ARB_', '_OES_', '_EXT_', '']
for i, category in enumerate(preferences):
if category in name:
return -i
used_extension_functions = sorted(used_extension_functions.items(),
key = lambda item: ExtensionSortKey(item[0]))
return used_extension_functions
def main(argv):
"""This is the main function."""
if len(argv) >= 1:
dir = argv[0]
else:
dir = '.'
for [functions, set_name, extension_headers, extensions] in FUNCTION_SETS:
used_extension_functions = GetUsedExtensionFunctions(
functions, extension_headers, extensions)
header_file = open(
os.path.join(dir, 'gl_bindings_autogen_%s.h' % set_name), 'wb')
GenerateHeader(header_file, functions, set_name, used_extension_functions)
header_file.close()
source_file = open(
os.path.join(dir, 'gl_bindings_autogen_%s.cc' % set_name), 'wb')
GenerateSource(source_file, functions, set_name, used_extension_functions)
source_file.close()
source_file = open(os.path.join(dir, 'gl_bindings_autogen_mock.cc'), 'wb')
GenerateMockSource(source_file, GL_FUNCTIONS)
source_file.close()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| keishi/chromium | ui/gl/generate_bindings.py | Python | bsd-3-clause | 57,794 | 0.014759 |
import sys
import numpy as np
def check_symmetric(a, tol=1e-8):
return np.allclose(a, a.T, atol=tol)
for line in sys.stdin:
a = np.matrix(line)
f = check_symmetric(a)
if not f:
print("Not symmetric")
else:
print("Symmetric")
| DT9/programming-problems | 2017/microsoft17/1.py | Python | apache-2.0 | 262 | 0.007634 |
from diofant import (Derivative, Function, Integral, bell, besselj, cos, exp,
legendre, oo, symbols)
from diofant.printing.conventions import requires_partial, split_super_sub
__all__ = ()
def test_super_sub():
assert split_super_sub('beta_13_2') == ('beta', [], ['13', '2'])
assert split_super_sub('beta_132_20') == ('beta', [], ['132', '20'])
assert split_super_sub('beta_13') == ('beta', [], ['13'])
assert split_super_sub('x_a_b') == ('x', [], ['a', 'b'])
assert split_super_sub('x_1_2_3') == ('x', [], ['1', '2', '3'])
assert split_super_sub('x_a_b1') == ('x', [], ['a', 'b1'])
assert split_super_sub('x_a_1') == ('x', [], ['a', '1'])
assert split_super_sub('x_1_a') == ('x', [], ['1', 'a'])
assert split_super_sub('x_1^aa') == ('x', ['aa'], ['1'])
assert split_super_sub('x_1__aa') == ('x', ['aa'], ['1'])
assert split_super_sub('x_11^a') == ('x', ['a'], ['11'])
assert split_super_sub('x_11__a') == ('x', ['a'], ['11'])
assert split_super_sub('x_a_b_c_d') == ('x', [], ['a', 'b', 'c', 'd'])
assert split_super_sub('x_a_b^c^d') == ('x', ['c', 'd'], ['a', 'b'])
assert split_super_sub('x_a_b__c__d') == ('x', ['c', 'd'], ['a', 'b'])
assert split_super_sub('x_a^b_c^d') == ('x', ['b', 'd'], ['a', 'c'])
assert split_super_sub('x_a__b_c__d') == ('x', ['b', 'd'], ['a', 'c'])
assert split_super_sub('x^a^b_c_d') == ('x', ['a', 'b'], ['c', 'd'])
assert split_super_sub('x__a__b_c_d') == ('x', ['a', 'b'], ['c', 'd'])
assert split_super_sub('x^a^b^c^d') == ('x', ['a', 'b', 'c', 'd'], [])
assert split_super_sub('x__a__b__c__d') == ('x', ['a', 'b', 'c', 'd'], [])
assert split_super_sub('alpha_11') == ('alpha', [], ['11'])
assert split_super_sub('alpha_11_11') == ('alpha', [], ['11', '11'])
assert split_super_sub('') == ('', [], [])
def test_requires_partial():
x, y, z, t, nu = symbols('x y z t nu')
n = symbols('n', integer=True)
f = x * y
assert requires_partial(Derivative(f, x)) is True
assert requires_partial(Derivative(f, y)) is True
# integrating out one of the variables
assert requires_partial(Derivative(Integral(exp(-x * y), (x, 0, oo)), y, evaluate=False)) is False
# bessel function with smooth parameter
f = besselj(nu, x)
assert requires_partial(Derivative(f, x)) is True
assert requires_partial(Derivative(f, nu)) is True
# bessel function with integer parameter
f = besselj(n, x)
assert requires_partial(Derivative(f, x)) is False
# this is not really valid (differentiating with respect to an integer)
# but there's no reason to use the partial derivative symbol there. make
# sure we don't throw an exception here, though
assert requires_partial(Derivative(f, n)) is False
# bell polynomial
f = bell(n, x)
assert requires_partial(Derivative(f, x)) is False
# again, invalid
assert requires_partial(Derivative(f, n)) is False
# legendre polynomial
f = legendre(0, x)
assert requires_partial(Derivative(f, x)) is False
f = legendre(n, x)
assert requires_partial(Derivative(f, x)) is False
# again, invalid
assert requires_partial(Derivative(f, n)) is False
f = x ** n
assert requires_partial(Derivative(f, x)) is False
assert requires_partial(Derivative(Integral((x*y) ** n * exp(-x * y), (x, 0, oo)), y, evaluate=False)) is False
# parametric equation
f = (exp(t), cos(t))
g = sum(f)
assert requires_partial(Derivative(g, t)) is False
# function of unspecified variables
f = symbols('f', cls=Function)
assert requires_partial(Derivative(f, x)) is False
assert requires_partial(Derivative(f, x, y)) is True
| skirpichev/omg | diofant/tests/printing/test_conventions.py | Python | bsd-3-clause | 3,727 | 0.000537 |
#---------------------------------------------------------------------------
# Introdução a Programação de Computadores - IPC
# Universidade do Estado do Amazonas - UEA
# Prof. Jucimar Jr
# Gabriel de Queiroz Sousa 1715310044
# Lucas Gabriel Silveira Duarte 1715310053
# Matheus de Oliveira Marques 1515310514
# Rodrigo Duarte de Souza 1115140049
#
# Leet é uma forma de se escrever o alfabeto latino usando outros símbolos em lugar das letras,
# como números por exemplo. A própria palavra leet admite muitas variações, como l33t ou 1337.
# O uso do leet reflete uma subcultura relacionada ao mundo dos jogos de computador e internet,
# sendo muito usada para confundir os iniciantes e afirmar-se como parte de um grupo. Pesquise
# sobre as principais formas de traduzir as letras. Depois, faça um programa que peça uma texto
# e transforme-o para a grafia leet speak.
#---------------------------------------------------------------------------
leet = (('a', '4'), ('l', '1'), ('e', '3'), ('s', '5'), ('g', '6'), ('r', '12'), ('t', '7'), ('q', '9'))
sring = input("Informe palavra = ")
nova = sring
print("Inicialmente: ", sring)
for antigo, novo in leet:
nova = nova.replace(antigo, novo)
print("Finalmente = ", nova) | jucimarjr/IPC_2017-1 | lista06/lista06_lista04_questao14.py | Python | apache-2.0 | 1,317 | 0.006897 |
# Copyright (c) 2018 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
from typing import Any, cast, List, Optional
from PyQt5.QtCore import pyqtProperty, pyqtSignal, QObject
from UM.Application import Application
from UM.Decorators import override
from UM.FlameProfiler import pyqtSlot
from UM.Logger import Logger
from UM.Settings.ContainerStack import ContainerStack, InvalidContainerStackError
from UM.Settings.InstanceContainer import InstanceContainer
from UM.Settings.DefinitionContainer import DefinitionContainer
from UM.Settings.ContainerRegistry import ContainerRegistry
from UM.Settings.Interfaces import ContainerInterface, DefinitionContainerInterface
from cura.Settings import cura_empty_instance_containers
from . import Exceptions
## Base class for Cura related stacks that want to enforce certain containers are available.
#
# This class makes sure that the stack has the following containers set: user changes, quality
# changes, quality, material, variant, definition changes and finally definition. Initially,
# these will be equal to the empty instance container.
#
# The container types are determined based on the following criteria:
# - user: An InstanceContainer with the metadata entry "type" set to "user".
# - quality changes: An InstanceContainer with the metadata entry "type" set to "quality_changes".
# - quality: An InstanceContainer with the metadata entry "type" set to "quality".
# - material: An InstanceContainer with the metadata entry "type" set to "material".
# - variant: An InstanceContainer with the metadata entry "type" set to "variant".
# - definition changes: An InstanceContainer with the metadata entry "type" set to "definition_changes".
# - definition: A DefinitionContainer.
#
# Internally, this class ensures the mentioned containers are always there and kept in a specific order.
# This also means that operations on the stack that modifies the container ordering is prohibited and
# will raise an exception.
class CuraContainerStack(ContainerStack):
def __init__(self, container_id: str) -> None:
super().__init__(container_id)
self._empty_instance_container = cura_empty_instance_containers.empty_container #type: InstanceContainer
self._empty_quality_changes = cura_empty_instance_containers.empty_quality_changes_container #type: InstanceContainer
self._empty_quality = cura_empty_instance_containers.empty_quality_container #type: InstanceContainer
self._empty_material = cura_empty_instance_containers.empty_material_container #type: InstanceContainer
self._empty_variant = cura_empty_instance_containers.empty_variant_container #type: InstanceContainer
self._containers = [self._empty_instance_container for i in range(len(_ContainerIndexes.IndexTypeMap))] #type: List[ContainerInterface]
self._containers[_ContainerIndexes.QualityChanges] = self._empty_quality_changes
self._containers[_ContainerIndexes.Quality] = self._empty_quality
self._containers[_ContainerIndexes.Material] = self._empty_material
self._containers[_ContainerIndexes.Variant] = self._empty_variant
self.containersChanged.connect(self._onContainersChanged)
import cura.CuraApplication #Here to prevent circular imports.
self.setMetaDataEntry("setting_version", cura.CuraApplication.CuraApplication.SettingVersion)
# This is emitted whenever the containersChanged signal from the ContainerStack base class is emitted.
pyqtContainersChanged = pyqtSignal()
## Set the user changes container.
#
# \param new_user_changes The new user changes container. It is expected to have a "type" metadata entry with the value "user".
def setUserChanges(self, new_user_changes: InstanceContainer) -> None:
self.replaceContainer(_ContainerIndexes.UserChanges, new_user_changes)
## Get the user changes container.
#
# \return The user changes container. Should always be a valid container, but can be equal to the empty InstanceContainer.
@pyqtProperty(InstanceContainer, fset = setUserChanges, notify = pyqtContainersChanged)
def userChanges(self) -> InstanceContainer:
return cast(InstanceContainer, self._containers[_ContainerIndexes.UserChanges])
## Set the quality changes container.
#
# \param new_quality_changes The new quality changes container. It is expected to have a "type" metadata entry with the value "quality_changes".
def setQualityChanges(self, new_quality_changes: InstanceContainer, postpone_emit = False) -> None:
self.replaceContainer(_ContainerIndexes.QualityChanges, new_quality_changes, postpone_emit = postpone_emit)
## Get the quality changes container.
#
# \return The quality changes container. Should always be a valid container, but can be equal to the empty InstanceContainer.
@pyqtProperty(InstanceContainer, fset = setQualityChanges, notify = pyqtContainersChanged)
def qualityChanges(self) -> InstanceContainer:
return cast(InstanceContainer, self._containers[_ContainerIndexes.QualityChanges])
## Set the quality container.
#
# \param new_quality The new quality container. It is expected to have a "type" metadata entry with the value "quality".
def setQuality(self, new_quality: InstanceContainer, postpone_emit: bool = False) -> None:
self.replaceContainer(_ContainerIndexes.Quality, new_quality, postpone_emit = postpone_emit)
## Get the quality container.
#
# \return The quality container. Should always be a valid container, but can be equal to the empty InstanceContainer.
@pyqtProperty(InstanceContainer, fset = setQuality, notify = pyqtContainersChanged)
def quality(self) -> InstanceContainer:
return cast(InstanceContainer, self._containers[_ContainerIndexes.Quality])
## Set the material container.
#
# \param new_material The new material container. It is expected to have a "type" metadata entry with the value "material".
def setMaterial(self, new_material: InstanceContainer, postpone_emit: bool = False) -> None:
self.replaceContainer(_ContainerIndexes.Material, new_material, postpone_emit = postpone_emit)
## Get the material container.
#
# \return The material container. Should always be a valid container, but can be equal to the empty InstanceContainer.
@pyqtProperty(InstanceContainer, fset = setMaterial, notify = pyqtContainersChanged)
def material(self) -> InstanceContainer:
return cast(InstanceContainer, self._containers[_ContainerIndexes.Material])
## Set the variant container.
#
# \param new_variant The new variant container. It is expected to have a "type" metadata entry with the value "variant".
def setVariant(self, new_variant: InstanceContainer) -> None:
self.replaceContainer(_ContainerIndexes.Variant, new_variant)
## Get the variant container.
#
# \return The variant container. Should always be a valid container, but can be equal to the empty InstanceContainer.
@pyqtProperty(InstanceContainer, fset = setVariant, notify = pyqtContainersChanged)
def variant(self) -> InstanceContainer:
return cast(InstanceContainer, self._containers[_ContainerIndexes.Variant])
## Set the definition changes container.
#
# \param new_definition_changes The new definition changes container. It is expected to have a "type" metadata entry with the value "definition_changes".
def setDefinitionChanges(self, new_definition_changes: InstanceContainer) -> None:
self.replaceContainer(_ContainerIndexes.DefinitionChanges, new_definition_changes)
## Get the definition changes container.
#
# \return The definition changes container. Should always be a valid container, but can be equal to the empty InstanceContainer.
@pyqtProperty(InstanceContainer, fset = setDefinitionChanges, notify = pyqtContainersChanged)
def definitionChanges(self) -> InstanceContainer:
return cast(InstanceContainer, self._containers[_ContainerIndexes.DefinitionChanges])
## Set the definition container.
#
# \param new_definition The new definition container. It is expected to have a "type" metadata entry with the value "definition".
def setDefinition(self, new_definition: DefinitionContainerInterface) -> None:
self.replaceContainer(_ContainerIndexes.Definition, new_definition)
def getDefinition(self) -> "DefinitionContainer":
return cast(DefinitionContainer, self._containers[_ContainerIndexes.Definition])
definition = pyqtProperty(QObject, fget = getDefinition, fset = setDefinition, notify = pyqtContainersChanged)
@override(ContainerStack)
def getBottom(self) -> "DefinitionContainer":
return self.definition
@override(ContainerStack)
def getTop(self) -> "InstanceContainer":
return self.userChanges
## Check whether the specified setting has a 'user' value.
#
# A user value here is defined as the setting having a value in either
# the UserChanges or QualityChanges container.
#
# \return True if the setting has a user value, False if not.
@pyqtSlot(str, result = bool)
def hasUserValue(self, key: str) -> bool:
if self._containers[_ContainerIndexes.UserChanges].hasProperty(key, "value"):
return True
if self._containers[_ContainerIndexes.QualityChanges].hasProperty(key, "value"):
return True
return False
## Set a property of a setting.
#
# This will set a property of a specified setting. Since the container stack does not contain
# any settings itself, it is required to specify a container to set the property on. The target
# container is matched by container type.
#
# \param key The key of the setting to set.
# \param property_name The name of the property to set.
# \param new_value The new value to set the property to.
def setProperty(self, key: str, property_name: str, property_value: Any, container: "ContainerInterface" = None, set_from_cache: bool = False) -> None:
container_index = _ContainerIndexes.UserChanges
self._containers[container_index].setProperty(key, property_name, property_value, container, set_from_cache)
## Overridden from ContainerStack
#
# Since we have a fixed order of containers in the stack and this method would modify the container
# ordering, we disallow this operation.
@override(ContainerStack)
def addContainer(self, container: ContainerInterface) -> None:
raise Exceptions.InvalidOperationError("Cannot add a container to Global stack")
## Overridden from ContainerStack
#
# Since we have a fixed order of containers in the stack and this method would modify the container
# ordering, we disallow this operation.
@override(ContainerStack)
def insertContainer(self, index: int, container: ContainerInterface) -> None:
raise Exceptions.InvalidOperationError("Cannot insert a container into Global stack")
## Overridden from ContainerStack
#
# Since we have a fixed order of containers in the stack and this method would modify the container
# ordering, we disallow this operation.
@override(ContainerStack)
def removeContainer(self, index: int = 0) -> None:
raise Exceptions.InvalidOperationError("Cannot remove a container from Global stack")
## Overridden from ContainerStack
#
# Replaces the container at the specified index with another container.
# This version performs checks to make sure the new container has the expected metadata and type.
#
# \throws Exception.InvalidContainerError Raised when trying to replace a container with a container that has an incorrect type.
@override(ContainerStack)
def replaceContainer(self, index: int, container: ContainerInterface, postpone_emit: bool = False) -> None:
expected_type = _ContainerIndexes.IndexTypeMap[index]
if expected_type == "definition":
if not isinstance(container, DefinitionContainer):
raise Exceptions.InvalidContainerError("Cannot replace container at index {index} with a container that is not a DefinitionContainer".format(index = index))
elif container != self._empty_instance_container and container.getMetaDataEntry("type") != expected_type:
raise Exceptions.InvalidContainerError("Cannot replace container at index {index} with a container that is not of {type} type, but {actual_type} type.".format(index = index, type = expected_type, actual_type = container.getMetaDataEntry("type")))
current_container = self._containers[index]
if current_container.getId() == container.getId():
return
super().replaceContainer(index, container, postpone_emit)
## Overridden from ContainerStack
#
# This deserialize will make sure the internal list of containers matches with what we expect.
# It will first check to see if the container at a certain index already matches with what we
# expect. If it does not, it will search for a matching container with the correct type. Should
# no container with the correct type be found, it will use the empty container.
#
# \throws InvalidContainerStackError Raised when no definition can be found for the stack.
@override(ContainerStack)
def deserialize(self, serialized: str, file_name: Optional[str] = None) -> str:
# update the serialized data first
serialized = super().deserialize(serialized, file_name)
new_containers = self._containers.copy()
while len(new_containers) < len(_ContainerIndexes.IndexTypeMap):
new_containers.append(self._empty_instance_container)
# Validate and ensure the list of containers matches with what we expect
for index, type_name in _ContainerIndexes.IndexTypeMap.items():
container = None
try:
container = new_containers[index]
except IndexError:
pass
if type_name == "definition":
if not container or not isinstance(container, DefinitionContainer):
definition = self.findContainer(container_type = DefinitionContainer)
if not definition:
raise InvalidContainerStackError("Stack {id} does not have a definition!".format(id = self.getId()))
new_containers[index] = definition
continue
if not container or container.getMetaDataEntry("type") != type_name:
actual_container = self.findContainer(type = type_name)
if actual_container:
new_containers[index] = actual_container
else:
new_containers[index] = self._empty_instance_container
self._containers = new_containers
# CURA-5281
# Some stacks can have empty definition_changes containers which will cause problems.
# Make sure that all stacks here have non-empty definition_changes containers.
if isinstance(new_containers[_ContainerIndexes.DefinitionChanges], type(self._empty_instance_container)):
from cura.Settings.CuraStackBuilder import CuraStackBuilder
CuraStackBuilder.createDefinitionChangesContainer(self, self.getId() + "_settings")
## TODO; Deserialize the containers.
return serialized
## protected:
# Helper to make sure we emit a PyQt signal on container changes.
def _onContainersChanged(self, container: Any) -> None:
Application.getInstance().callLater(self.pyqtContainersChanged.emit)
# Helper that can be overridden to get the "machine" definition, that is, the definition that defines the machine
# and its properties rather than, for example, the extruder. Defaults to simply returning the definition property.
def _getMachineDefinition(self) -> DefinitionContainer:
return self.definition
## Find the ID that should be used when searching for instance containers for a specified definition.
#
# This handles the situation where the definition specifies we should use a different definition when
# searching for instance containers.
#
# \param machine_definition The definition to find the "quality definition" for.
#
# \return The ID of the definition container to use when searching for instance containers.
@classmethod
def _findInstanceContainerDefinitionId(cls, machine_definition: DefinitionContainerInterface) -> str:
quality_definition = machine_definition.getMetaDataEntry("quality_definition")
if not quality_definition:
return machine_definition.id #type: ignore
definitions = ContainerRegistry.getInstance().findDefinitionContainers(id = quality_definition)
if not definitions:
Logger.log("w", "Unable to find parent definition {parent} for machine {machine}", parent = quality_definition, machine = machine_definition.id) #type: ignore
return machine_definition.id #type: ignore
return cls._findInstanceContainerDefinitionId(definitions[0])
## getProperty for extruder positions, with translation from -1 to default extruder number
def getExtruderPositionValueWithDefault(self, key):
value = self.getProperty(key, "value")
if value == -1:
value = int(Application.getInstance().getMachineManager().defaultExtruderPosition)
return value
## private:
# Private helper class to keep track of container positions and their types.
class _ContainerIndexes:
UserChanges = 0
QualityChanges = 1
Quality = 2
Material = 3
Variant = 4
DefinitionChanges = 5
Definition = 6
# Simple hash map to map from index to "type" metadata entry
IndexTypeMap = {
UserChanges: "user",
QualityChanges: "quality_changes",
Quality: "quality",
Material: "material",
Variant: "variant",
DefinitionChanges: "definition_changes",
Definition: "definition",
}
# Reverse lookup: type -> index
TypeIndexMap = dict([(v, k) for k, v in IndexTypeMap.items()])
| Patola/Cura | cura/Settings/CuraContainerStack.py | Python | lgpl-3.0 | 18,426 | 0.011397 |
import nose
import angr
import logging
l = logging.getLogger("angr_tests.path_groups")
import os
location = str(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../binaries/tests'))
addresses_fauxware = {
'armel': 0x8524,
'armhf': 0x104c9, # addr+1 to force thumb
#'i386': 0x8048524, # commenting out because of the freaking stack check
'mips': 0x400710,
'mipsel': 0x4006d0,
'ppc': 0x1000054c,
'ppc64': 0x10000698,
'x86_64': 0x400664
}
def run_fauxware(arch, threads):
p = angr.Project(location + '/' + arch + '/fauxware', load_options={'auto_load_libs': False})
pg = p.factory.path_group(threads=threads)
nose.tools.assert_equal(len(pg.active), 1)
nose.tools.assert_equal(pg.active[0].length, 0)
# step until the backdoor split occurs
pg2 = pg.step(until=lambda lpg: len(lpg.active) > 1, step_func=lambda lpg: lpg.prune())
nose.tools.assert_equal(len(pg2.active), 2)
nose.tools.assert_true(any("SOSNEAKY" in s for s in pg2.mp_active.state.posix.dumps(0).mp_items))
nose.tools.assert_false(all("SOSNEAKY" in s for s in pg2.mp_active.state.posix.dumps(0).mp_items))
# separate out the backdoor and normal paths
pg3 = pg2.stash(lambda path: "SOSNEAKY" in path.state.posix.dumps(0), to_stash="backdoor").stash_all(to_stash="auth")
nose.tools.assert_equal(len(pg3.active), 0)
nose.tools.assert_equal(len(pg3.backdoor), 1)
nose.tools.assert_equal(len(pg3.auth), 1)
# step the backdoor path until it returns to main
pg4 = pg3.step(until=lambda lpg: lpg.backdoor[0].jumpkinds[-1] == 'Ijk_Ret', stash='backdoor')
main_addr = pg4.backdoor[0].addr
nose.tools.assert_equal(len(pg4.active), 0)
nose.tools.assert_equal(len(pg4.backdoor), 1)
nose.tools.assert_equal(len(pg4.auth), 1)
# now step the real path until the real authentication paths return to the same place
pg5 = pg4.explore(find=main_addr, num_find=2, stash='auth').unstash_all(from_stash='found', to_stash='auth')
nose.tools.assert_equal(len(pg5.active), 0)
nose.tools.assert_equal(len(pg5.backdoor), 1)
nose.tools.assert_equal(len(pg5.auth), 2)
# now unstash everything
pg6 = pg5.unstash_all(from_stash='backdoor').unstash_all(from_stash='auth')
nose.tools.assert_equal(len(pg6.active), 3)
nose.tools.assert_equal(len(pg6.backdoor), 0)
nose.tools.assert_equal(len(pg6.auth), 0)
nose.tools.assert_equal(len(set(pg6.mp_active.addr.mp_items)), 1)
# now merge them!
pg7 = pg6.merge()
nose.tools.assert_equal(len(pg7.active), 1)
nose.tools.assert_equal(len(pg7.backdoor), 0)
nose.tools.assert_equal(len(pg7.auth), 0)
#import ipdb; ipdb.set_trace()
#print pg2.mp_active.addr.mp_map(hex).mp_items
# test selecting paths to step
pg_a = p.factory.path_group(immutable=True)
pg_b = pg_a.step(until=lambda lpg: len(lpg.active) > 1, step_func=lambda lpg: lpg.prune().drop(stash='pruned'))
pg_c = pg_b.step(selector_func=lambda p: p is pg_b.active[0], step_func=lambda lpg: lpg.prune().drop(stash='pruned'))
nose.tools.assert_is(pg_b.active[1], pg_c.active[0])
nose.tools.assert_is_not(pg_b.active[0], pg_c.active[1])
total_active = len(pg_c.active)
# test special stashes
nose.tools.assert_equals(len(pg_c.stashed), 0)
pg_d = pg_c.stash(filter_func=lambda p: p is pg_c.active[1], to_stash='asdf')
nose.tools.assert_equals(len(pg_d.stashed), 0)
nose.tools.assert_equals(len(pg_d.asdf), 1)
nose.tools.assert_equals(len(pg_d.active), total_active-1)
pg_e = pg_d.stash(from_stash=pg_d.ALL, to_stash='fdsa')
nose.tools.assert_equals(len(pg_e.asdf), 0)
nose.tools.assert_equals(len(pg_e.active), 0)
nose.tools.assert_equals(len(pg_e.fdsa), total_active)
pg_f = pg_e.stash(from_stash=pg_e.ALL, to_stash=pg_e.DROP)
nose.tools.assert_true(all(len(s) == 0 for s in pg_f.stashes.values()))
def test_fauxware():
for arch in addresses_fauxware:
yield run_fauxware, arch, None
yield run_fauxware, arch, 2
if __name__ == "__main__":
for func, march, threads in test_fauxware():
print 'testing ' + march
func(march, threads)
| haylesr/angr | tests/test_path_groups.py | Python | bsd-2-clause | 4,184 | 0.00478 |
#!/usr/bin/env python3
import os # makedirs
import sys # argv, exit
import csv # DictReader
def cutoffdict(cdict):
rdict = dict()
for key in cdict.keys():
candi = cdict[key]
top = max(candi, key = candi.get)
if candi[top] > (sum(candi.values())*0.5):
rdict[key] = top
return rdict
def groupbyprefix(src_path):
des_path = src_path.split('/')[-1]
src_file = open(src_path, 'r')
src_csv = csv.DictReader(src_file)
des_file = open('./dbdays/' + des_path, 'w')
des_csv = csv.DictWriter(des_file, fieldnames = [
'ipprefix', 'district', 'city'])
des_csv.writeheader()
cdict = dict()
for row in src_csv:
cprefix = row['ipprefix']
ccity = row['district'] +' ' + row['city']
cdict[cprefix] = {ccity: cdict.get(cprefix, dict()).get(ccity, 0) + 1}
wdict = cutoffdict(cdict)
for prefix in wdict.keys():
district = wdict[prefix].split(' ')[0]
city = wdict[prefix].split(' ')[1]
des_csv.writerow({'ipprefix': prefix,
'district': district,
'city': city})
def main(argv):
if len(argv) < 2:
print('We need 1 arguments')
print('.py [SRC]')
sys.exit()
src_path = argv[1]
os.makedirs('./dbdays', exist_ok = True)
sit = os.scandir(src_path)
for entry in sit:
if not entry.name.startswith('.') and entry.is_file():
cip = entry.path
groupbyprefix(cip)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| munhyunsu/UsedMarketAnalysis | ruliweb_analyzer/db_5th_group.py | Python | gpl-3.0 | 1,617 | 0.009895 |
# -*- coding: utf-8 -*-
"""The EWF image path specification implementation."""
from dfvfs.lib import definitions
from dfvfs.path import factory
from dfvfs.path import path_spec
class EWFPathSpec(path_spec.PathSpec):
"""EWF image path specification."""
TYPE_INDICATOR = definitions.TYPE_INDICATOR_EWF
def __init__(self, parent=None, **kwargs):
"""Initializes a path specification.
Note that the EWF file path specification must have a parent.
Args:
parent (Optional[PathSpec]): parent path specification.
Raises:
ValueError: when parent is not set.
"""
if not parent:
raise ValueError('Missing parent value.')
super(EWFPathSpec, self).__init__(parent=parent, **kwargs)
factory.Factory.RegisterPathSpec(EWFPathSpec)
| joachimmetz/dfvfs | dfvfs/path/ewf_path_spec.py | Python | apache-2.0 | 777 | 0.005148 |
# -*- coding: utf-8 -*-
from collections import OrderedDict
from django import forms
from django.utils.translation import ugettext_lazy as _
from envelope.forms import ContactForm
class ContactForm(ContactForm):
template_name = "envelope/contact_email.txt"
html_template_name = "envelope/contact_email.html"
phone = forms.CharField(label='Teléfono', required=False)
country = forms.CharField(label='País', required=False)
def __init__(self, *args, **kwargs):
super(ContactForm, self).__init__(*args, **kwargs)
self.fields['email'].required = False
ContactForm.base_fields = OrderedDict(
(k, ContactForm.base_fields[k])
for k in [
'sender', 'subject', 'email', 'phone', 'country', 'message',
]
)
| CacaoMovil/guia-de-cacao-django | cacao_app/configuracion/forms.py | Python | bsd-3-clause | 762 | 0.001316 |
from django import template
register = template.Library()
@register.assignment_tag(takes_context=True)
def has_bookmark_permission(context, action):
"""Checks if the current user can bookmark the action item.
Returns a boolean.
Syntax::
{% has_bookmark_permission action %}
"""
request = context['request']
if not request.user.is_authenticated():
return False
has_permission = True
if action.target.approval_required and not request.user.can_access_all_projects:
has_permission = False
if not has_permission:
return False
return True
@register.assignment_tag(takes_context=True)
def get_existing_bookmark(context, action):
request = context['request']
if not request.user.is_authenticated():
return None
existing_bookmark = request.user.bookmark_set.filter(
object_pk=action.action_object.pk, content_type=action.action_object_content_type
).first()
return existing_bookmark
| HMSBeagle1831/rapidscience | rlp/bookmarks/templatetags/bookmarks.py | Python | mit | 990 | 0.00202 |
# Natural Language Toolkit: Clusterer Utilities
#
# Copyright (C) 2001-2015 NLTK Project
# Author: Trevor Cohn <tacohn@cs.mu.oz.au>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
from __future__ import print_function, unicode_literals
import copy
from sys import stdout
from math import sqrt
try:
import numpy
except ImportError:
pass
from nltk.cluster.api import ClusterI
from nltk.compat import python_2_unicode_compatible
class VectorSpaceClusterer(ClusterI):
"""
Abstract clusterer which takes tokens and maps them into a vector space.
Optionally performs singular value decomposition to reduce the
dimensionality.
"""
def __init__(self, normalise=False, svd_dimensions=None):
"""
:param normalise: should vectors be normalised to length 1
:type normalise: boolean
:param svd_dimensions: number of dimensions to use in reducing vector
dimensionsionality with SVD
:type svd_dimensions: int
"""
self._Tt = None
self._should_normalise = normalise
self._svd_dimensions = svd_dimensions
def cluster(self, vectors, assign_clusters=False, trace=False):
assert len(vectors) > 0
# normalise the vectors
if self._should_normalise:
vectors = list(map(self._normalise, vectors))
# use SVD to reduce the dimensionality
if self._svd_dimensions and self._svd_dimensions < len(vectors[0]):
[u, d, vt] = numpy.linalg.svd(numpy.transpose(numpy.array(vectors)))
S = d[:self._svd_dimensions] * \
numpy.identity(self._svd_dimensions, numpy.float64)
T = u[:,:self._svd_dimensions]
Dt = vt[:self._svd_dimensions,:]
vectors = numpy.transpose(numpy.dot(S, Dt))
self._Tt = numpy.transpose(T)
# call abstract method to cluster the vectors
self.cluster_vectorspace(vectors, trace)
# assign the vectors to clusters
if assign_clusters:
print(self._Tt, vectors)
return [self.classify(vector) for vector in vectors]
def cluster_vectorspace(self, vectors, trace):
"""
Finds the clusters using the given set of vectors.
"""
raise NotImplementedError()
def classify(self, vector):
if self._should_normalise:
vector = self._normalise(vector)
if self._Tt is not None:
vector = numpy.dot(self._Tt, vector)
cluster = self.classify_vectorspace(vector)
return self.cluster_name(cluster)
def classify_vectorspace(self, vector):
"""
Returns the index of the appropriate cluster for the vector.
"""
raise NotImplementedError()
def likelihood(self, vector, label):
if self._should_normalise:
vector = self._normalise(vector)
if self._Tt is not None:
vector = numpy.dot(self._Tt, vector)
return self.likelihood_vectorspace(vector, label)
def likelihood_vectorspace(self, vector, cluster):
"""
Returns the likelihood of the vector belonging to the cluster.
"""
predicted = self.classify_vectorspace(vector)
return (1.0 if cluster == predicted else 0.0)
def vector(self, vector):
"""
Returns the vector after normalisation and dimensionality reduction
"""
if self._should_normalise:
vector = self._normalise(vector)
if self._Tt is not None:
vector = numpy.dot(self._Tt, vector)
return vector
def _normalise(self, vector):
"""
Normalises the vector to unit length.
"""
return vector / sqrt(numpy.dot(vector, vector))
def euclidean_distance(u, v):
"""
Returns the euclidean distance between vectors u and v. This is equivalent
to the length of the vector (u - v).
"""
diff = u - v
return sqrt(numpy.dot(diff, diff))
def cosine_distance(u, v):
"""
Returns 1 minus the cosine of the angle between vectors v and u. This is equal to
1 - (u.v / |u||v|).
"""
return 1 - (numpy.dot(u, v) / (sqrt(numpy.dot(u, u)) * sqrt(numpy.dot(v, v))))
class _DendrogramNode(object):
""" Tree node of a dendrogram. """
def __init__(self, value, *children):
self._value = value
self._children = children
def leaves(self, values=True):
if self._children:
leaves = []
for child in self._children:
leaves.extend(child.leaves(values))
return leaves
elif values:
return [self._value]
else:
return [self]
def groups(self, n):
queue = [(self._value, self)]
while len(queue) < n:
priority, node = queue.pop()
if not node._children:
queue.push((priority, node))
break
for child in node._children:
if child._children:
queue.append((child._value, child))
else:
queue.append((0, child))
# makes the earliest merges at the start, latest at the end
queue.sort()
groups = []
for priority, node in queue:
groups.append(node.leaves())
return groups
@python_2_unicode_compatible
class Dendrogram(object):
"""
Represents a dendrogram, a tree with a specified branching order. This
must be initialised with the leaf items, then iteratively call merge for
each branch. This class constructs a tree representing the order of calls
to the merge function.
"""
def __init__(self, items=[]):
"""
:param items: the items at the leaves of the dendrogram
:type items: sequence of (any)
"""
self._items = [_DendrogramNode(item) for item in items]
self._original_items = copy.copy(self._items)
self._merge = 1
def merge(self, *indices):
"""
Merges nodes at given indices in the dendrogram. The nodes will be
combined which then replaces the first node specified. All other nodes
involved in the merge will be removed.
:param indices: indices of the items to merge (at least two)
:type indices: seq of int
"""
assert len(indices) >= 2
node = _DendrogramNode(self._merge, *[self._items[i] for i in indices])
self._merge += 1
self._items[indices[0]] = node
for i in indices[1:]:
del self._items[i]
def groups(self, n):
"""
Finds the n-groups of items (leaves) reachable from a cut at depth n.
:param n: number of groups
:type n: int
"""
if len(self._items) > 1:
root = _DendrogramNode(self._merge, *self._items)
else:
root = self._items[0]
return root.groups(n)
def show(self, leaf_labels=[]):
"""
Print the dendrogram in ASCII art to standard out.
:param leaf_labels: an optional list of strings to use for labeling the leaves
:type leaf_labels: list
"""
# ASCII rendering characters
JOIN, HLINK, VLINK = '+', '-', '|'
# find the root (or create one)
if len(self._items) > 1:
root = _DendrogramNode(self._merge, *self._items)
else:
root = self._items[0]
leaves = self._original_items
if leaf_labels:
last_row = leaf_labels
else:
last_row = ["%s" % leaf._value for leaf in leaves]
# find the bottom row and the best cell width
width = max(map(len, last_row)) + 1
lhalf = width / 2
rhalf = width - lhalf - 1
# display functions
def format(centre, left=' ', right=' '):
return '%s%s%s' % (lhalf*left, centre, right*rhalf)
def display(str):
stdout.write(str)
# for each merge, top down
queue = [(root._value, root)]
verticals = [ format(' ') for leaf in leaves ]
while queue:
priority, node = queue.pop()
child_left_leaf = list(map(lambda c: c.leaves(False)[0], node._children))
indices = list(map(leaves.index, child_left_leaf))
if child_left_leaf:
min_idx = min(indices)
max_idx = max(indices)
for i in range(len(leaves)):
if leaves[i] in child_left_leaf:
if i == min_idx: display(format(JOIN, ' ', HLINK))
elif i == max_idx: display(format(JOIN, HLINK, ' '))
else: display(format(JOIN, HLINK, HLINK))
verticals[i] = format(VLINK)
elif min_idx <= i <= max_idx:
display(format(HLINK, HLINK, HLINK))
else:
display(verticals[i])
display('\n')
for child in node._children:
if child._children:
queue.append((child._value, child))
queue.sort()
for vertical in verticals:
display(vertical)
display('\n')
# finally, display the last line
display(''.join(item.center(width) for item in last_row))
display('\n')
def __repr__(self):
if len(self._items) > 1:
root = _DendrogramNode(self._merge, *self._items)
else:
root = self._items[0]
leaves = root.leaves(False)
return '<Dendrogram with %d leaves>' % len(leaves)
| Reagankm/KnockKnock | venv/lib/python3.4/site-packages/nltk/cluster/util.py | Python | gpl-2.0 | 9,689 | 0.001858 |
#!/usr/bin/env python
# REQUIRES both rst2pdf and wikir project from google code.
import sys
import subprocess
sys.path.insert(0, '../../rson/py2x')
from rson import loads
from simplejson import dumps
subprocess.call('../../rst2pdf/bin/rst2pdf manual.txt -e preprocess -e dotted_toc -o manual.pdf'.split())
lines = iter(open('manual.txt', 'rb').read().splitlines())
badstuff = 'page:: space:: footer:: ##Page## contents::'.split()
result = []
for line in lines:
for check in badstuff:
if check in line:
break
else:
result.append(line)
result.append('')
result = '\n'.join(result)
from wikir import publish_string
result = publish_string(result)
f = open('manual.wiki', 'wb')
f.write(result)
f.close()
| pmaupin/playtag | doc/make.py | Python | mit | 745 | 0.005369 |
# -*- coding: utf-8 -*-
from requests import (get, post, delete)
from .base import Base
class System(Base):
def __init__(self, host, secret, endpoint='/plugins/restapi/v1/system/properties'):
"""
:param host: Scheme://Host/ for API requests
:param secret: Shared secret key for API requests
:param endpoint: Endpoint for API requests
"""
super(System, self).__init__(host, secret, endpoint)
def get_props(self):
"""
Retrieve all system properties
"""
return self._submit_request(get, self.endpoint)
def get_prop(self, key):
"""
Retrieve system property
:param key: The name of system property
"""
endpoint = '/'.join([self.endpoint, key])
return self._submit_request(get, endpoint)
def update_prop(self, key, value):
"""
Create or update a system property
:param key: The name of system property
:param value: The value of system property
"""
payload = {
'@key': key,
'@value': value,
}
return self._submit_request(post, self.endpoint, json=payload)
def delete_prop(self, key):
"""
Delete a system property
:param key: The name of system property
"""
endpoint = '/'.join([self.endpoint, key])
return self._submit_request(delete, endpoint)
def get_concurrent_sessions(self):
"""
Retrieve concurrent sessions
"""
endpoint = '/'.join([self.endpoint.rpartition('/')[0], 'statistics', 'sessions'])
return self._submit_request(get, endpoint)
| etutionbd/openfire-restapi | ofrestapi/system.py | Python | gpl-3.0 | 1,675 | 0.001194 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import mock
from django.contrib.auth.models import User
from sentry.constants import MEMBER_USER
from sentry.models import Project
from sentry.web.helpers import get_project_list
from tests.base import TestCase
class GetProjectListTEst(TestCase):
def setUp(self):
self.user = User.objects.create(username="admin", email="admin@localhost")
self.project = Project.objects.get()
assert self.project.public is True
self.project2 = Project.objects.create(name='Test', slug='test', owner=self.user, public=False)
@mock.patch('sentry.models.Team.objects.get_for_user', mock.Mock(return_value={}))
def test_includes_public_projects_without_access(self):
project_list = get_project_list(self.user)
self.assertEquals(len(project_list), 1)
self.assertIn(self.project.id, project_list)
@mock.patch('sentry.models.Team.objects.get_for_user', mock.Mock(return_value={}))
def test_does_exclude_public_projects_without_access(self):
project_list = get_project_list(self.user, MEMBER_USER)
self.assertEquals(len(project_list), 0)
@mock.patch('sentry.models.Team.objects.get_for_user')
def test_does_include_private_projects_without_access(self, get_for_user):
get_for_user.return_value = {self.project2.team.id: self.project2.team}
project_list = get_project_list(self.user)
get_for_user.assert_called_once_with(self.user, None)
self.assertEquals(len(project_list), 2)
self.assertIn(self.project.id, project_list)
self.assertIn(self.project2.id, project_list)
@mock.patch('sentry.models.Team.objects.get_for_user')
def test_does_exclude_public_projects_but_include_private_with_access(self, get_for_user):
get_for_user.return_value = {self.project2.team.id: self.project2.team}
project_list = get_project_list(self.user, MEMBER_USER)
get_for_user.assert_called_once_with(self.user, MEMBER_USER)
self.assertEquals(len(project_list), 1)
self.assertIn(self.project2.id, project_list)
| chayapan/django-sentry | tests/sentry/web/helpers/tests.py | Python | bsd-3-clause | 2,132 | 0.002345 |
#!/usr/bin/python
# Written by Stjepan Horvat
# ( zvanstefan@gmail.com )
# by the exercises from David Lucal Burge - Perfect Pitch Ear Traning Supercourse
# Thanks to Wojciech M. Zabolotny ( wzab@ise.pw.edu.pl ) for snd-virmidi example
# ( wzab@ise.pw.edu.pl )
import random
import time
import sys
import re
fname="/dev/snd/midiC2D0"
#fname=sys.argv[1]
fin=open(fname,"rb")
fout=open(fname,"wb")
#keymin=int(sys.argv[2])
#keymax=int(sys.argv[3])
#keymin=int(60)
#keymax=int(72)
#c major scale
print ("Exercise 10-17:")
print ("Aural chord analisys. First you have to unlock the sound by ear. And then you have to indentify. It's a very powerful tehnique to stabilize perfect pitch.")
#from c to c'' white tones
#c major scale
notes = [ 36, 38, 40, 41, 43, 45, 47, 48, 50, 52, 53, 55, 57, 59, 60, 62, 64, 65, 67, 69, 71, 72, 74, 76, 77, 79, 81, 83, 84, 86, 88, 89, 91, 93, 95, 96 ]
noteC = [ 36, 48, 60, 72, 84, 96 ]
def playNote(noteOne, noteTwo, noteThree):
fout.write((chr(0x90)+chr(noteOne)+chr(127)).encode('utf-8'))
fout.flush()
time.sleep(0.7)
fout.write((chr(0x80)+chr(noteOne)+chr(127)).encode('utf-8'))
fout.flush()
fout.write((chr(0x90)+chr(noteTwo)+chr(127)).encode('utf-8'))
fout.flush()
time.sleep(0.7)
fout.write((chr(0x80)+chr(noteTwo)+chr(127)).encode('utf-8'))
fout.flush()
fout.write((chr(0x90)+chr(noteThree)+chr(127)).encode('utf-8'))
fout.flush()
time.sleep(0.7)
fout.write((chr(0x80)+chr(noteThree)+chr(127)).encode('utf-8'))
fout.flush()
def nameNote(note):
if note in noteC:
return("C")
elif note-2 in noteC:
return("D")
elif note-4 in noteC:
return("E")
elif note-5 in noteC:
return("F")
elif note-7 in noteC:
return("G")
elif note-9 in noteC:
return("A")
elif note-11 in noteC:
return("H")
def name2Note(name):
if name == "c":
return(60)
if name == "d":
return(62)
if name == "e":
return(64)
if name == "f":
return(65)
if name == "g":
return(67)
if name == "a":
return(69)
if name == "h":
return(71)
usage = "Usage: 1-repeat, <note> <note> \"c d e\", ?-usage."
round = 1
a = re.compile("^[a-h] [a-h] [a-h]$")
try:
print(usage)
while True:
noteOne = random.choice(notes)
while True:
while True:
noteTwo = random.choice(notes)
if nameNote(noteOne) != nameNote(noteTwo):
break
while True:
noteThree = random.choice(notes)
if nameNote(noteOne) != nameNote(noteTwo):
break
if nameNote(noteOne) != nameNote(noteThree):
break
match = False
while not match:
done = False
playNote(noteOne, noteTwo, noteThree)
while not done:
n = input("? ")
if n == "1":
playNote(noteOne, noteTwo, noteThree)
if n == "?":
print(usage)
if n == "help":
print(nameNote(noteOne).lower(), nameNote(noteTwo).lower(), nameNote(noteThree).lower())
elif a.match(n):
splitNote = n.split()
if splitNote[0] == nameNote(noteOne).lower() and splitNote[1] == nameNote(noteTwo).lower() and splitNote[2] == nameNote(noteThree).lower():
round += 1
print("Correct. Next round. " + str(round) + ".:")
done = True
match = True
else:
playNote(name2Note(splitNote[0]), name2Note(splitNote[1]), name2Note(splitNote[2]))
except KeyboardInterrupt:
pass
| schef/schef.github.io | source/11/mc-11-05-sk-mt.py | Python | mit | 3,429 | 0.022164 |
#!/usr/bin/env python
import datetime as dt
import numpy as np
import matplotlib.pyplot as plt
#import matplotlib.dates as mdates
#import matplotlib.cbook as cbook
#from matplotlib import pyplot as plt
from matplotlib.dates import date2num
from statsmodels.distributions.empirical_distribution import ECDF
from collections import Counter
from ..guifiwrapper.guifiwrapper import *
#root = 3671
#root = 2444
root = 17711
g = CNMLWrapper(root)
import os
basedir = os.path.join(os.getcwd(), 'figs')
baseservicesdir = os.path.join(basedir, 'services')
for d in [basedir, baseservicesdir]:
if not os.path.exists(d):
os.makedirs(d)
user = ['meteo', 'radio', 'web', 'VPS', 'tv', 'wol', 'Proxy', 'mail', 'irc',
'teamspeak', 'ftp', 'asterisk', 'apt-cache', 'AP', 'IM', 'p2p',
'VPN', 'Streaming', 'games', 'cam']
mgmt = ['iperf', 'LDAP', 'DNS', 'SNPgraphs', 'NTP', 'AirControl']
# Extract user services and frequencies
#userServices = [s.type for s in g.services.values() if s.type in user]
#totalServices = len(userServices)
#userServices = Counter(userServices).items()
#userServicesNumber = len(userServices)
#userTypes = [typ for (typ,values) in userServices]
#userValues = [float(value)/float(totalServices) for (typ,value) in userServices]
# Extract mgmt services and frequencies
services = [s.type for s in g.services.values() if s.type in user]
totalServices = len(services)
services = Counter(services).items()
from operator import itemgetter
sercices = services.sort(key=itemgetter(1), reverse=True)
servicesNumber = len(services)
types = [typ for (typ, value) in services]
values = [float(value) / float(totalServices) for (typ, value) in services]
ind = np.arange(servicesNumber)
width = 0.35
fig = plt.figure()
fig.set_canvas(plt.gcf().canvas)
#ax = fig.add_subplot(121)
ax = fig.add_subplot(111)
rects = ax.bar(ind, values, width, color='black')
ax.set_xlim(-width, len(ind) + width)
ax.set_ylim(0, 0.7)
# ax.set_ylim(0,45)
ax.set_ylabel('Frequency')
#ax.set_xlabel('Service Type')
ax.set_title('User Services Frequency')
xTickMarks = [str(i) for i in types]
ax.set_xticks(ind + width)
xtickNames = ax.set_xticklabels(xTickMarks)
plt.setp(xtickNames, rotation=45, fontsize=13)
services1 = [s.type for s in g.services.values() if s.type in mgmt]
totalServices1 = len(services1)
services1 = Counter(services1).items()
sercices1 = services1.sort(key=itemgetter(1), reverse=True)
servicesNumber1 = len(services1)
types1 = [typ for (typ, value1) in services1]
values1 = [float(value) / float(totalServices1) for (typ, value) in services1]
if False:
# Disable analytical mgmt frequency image
ind1 = np.arange(servicesNumber1)
ax1 = fig.add_subplot(122)
rects = ax1.bar(ind1, values1, width, color='black')
ax1.set_xlim(-width, len(ind1) + width)
ax1.set_ylim(0, 0.7)
# ax.set_ylim(0,45)
# ax1.set_ylabel('Frequency')
#ax1.set_xlabel('Service Type')
ax1.set_title('Management Services Frequency')
xTickMarks1 = [str(i) for i in types1]
ax1.set_xticks(ind1 + width)
xtickNames1 = ax1.set_xticklabels(xTickMarks1)
plt.setp(xtickNames1, rotation=0, fontsize=13)
plt.show()
figfile = os.path.join(baseservicesdir, str(root) + "services_frequency")
fig.savefig(figfile, format='png', dpi=fig.dpi)
# Other categories
for s in g.services.values():
if s.type in mgmt:
s.type = "Management"
elif s.type != "Proxy":
s.type = "Other services"
services = [s.type for s in g.services.values()]
totalServices = len(services)
services = Counter(services).items()
sercices = services.sort(key=itemgetter(1), reverse=True)
servicesNumber = len(services)
types = [typ for (typ, value) in services]
values = [float(value) / float(totalServices) for (typ, value) in services]
ind = np.arange(servicesNumber)
width = 0.35
fig = plt.figure()
fig.set_canvas(plt.gcf().canvas)
ax = fig.add_subplot(111)
rects = ax.bar(ind, values, width, color='black')
ax.set_xlim(-width, len(ind) + width)
ax.set_ylim(0, 0.7)
# ax.set_ylim(0,45)
ax.set_ylabel('Frequency')
#ax.set_xlabel('Service Type')
ax.set_title(' Service Categories Frequency')
xTickMarks = [str(i) for i in types]
ax.set_xticks(ind + width)
xtickNames = ax.set_xticklabels(xTickMarks)
plt.setp(xtickNames, rotation=0, fontsize=12)
plt.show()
figfile = os.path.join(
baseservicesdir,
str(root) +
"services_frequency_categories")
fig.savefig(figfile, format='png', dpi=fig.dpi)
| emmdim/guifiAnalyzer | plot/plotsServices.py | Python | gpl-3.0 | 4,467 | 0.004477 |
# -*- coding: utf-8 -*-
import gensim, logging
class SemanticVector:
model = ''
def __init__(self, structure):
self.structure = structure
def model_word2vec(self, min_count=15, window=15, size=100):
print 'preparing sentences list'
sentences = self.structure.prepare_list_of_words_in_sentences()
print 'start modeling'
self.model = gensim.models.Word2Vec(sentences, size=size, window=window, min_count=min_count, workers=4, sample=0.001, sg=0)
return self.model
def save_model(self, name):
self.model.save(name)
def load_model(self, name):
self.model = gensim.models.Word2Vec.load(name)
| arashzamani/lstm_nlg_ver1 | language_parser/SemanticVector.py | Python | gpl-3.0 | 679 | 0.002946 |
# 类结构的堆排序
class DLinkHeap(object):
def __init__(self, list=None, N = 0):
self.dList = list
self.lengthSize = N
# 插入数据
def insert_heap(self, data):
self.dList.append(data)
self.lengthSize += 1
# 初始化堆结构
def init_heap(self):
n = self.lengthSize
for i in range(n):
self.sift_down(i)
# 交换数据
def swap(self, a, b):
tmp = self.dList[a]
self.dList[a] = self.dList[b]
self.dList[b] = tmp
# 向下调整节点
def sift_down(self, size):
n = size
t = 0
tmp_pos = 0
# 注意python的/运算,是取浮点数
while t < int(n/2):
if self.dList[t] > self.dList[2*t+1]:
tmp_pos = 2*t+1
else:
tmp_pos = t
if 2*t+2 < n:
if self.dList[tmp_pos] > self.dList[2*t+2]:
tmp_pos = 2*t+2
if t != tmp_pos:
self.swap(tmp_pos, t)
t = tmp_pos
else:
break
# 向上调整节点
def sift_up(self, size):
n = size
i = n - 1
flag = 0
while i > 0 and flag == 0:
parent_i = int(i/2)
if self.dList[i] < self.dList[parent_i]:
self.swap(i, parent_i)
i = parent_i
else:
flag = 1
# 堆排序
def heap_sort(self):
n = self.lengthSize
while n > 0:
self.swap(0, n-1)
n -= 1
self.sift_down(n)
# 打印堆数据
def print_heap(self, size):
for idx in range(size):
print(self.dList[idx], end=" ")
print()
if __name__ == "__main__":
k = 0
# 读取n个数
n = int(input())
# 输入列表
input_L = list(map(int, input().split()))
L = input_L
dLinkHeap = DLinkHeap(L, n)
dLinkHeap.init_heap()
dLinkHeap.print_heap(n)
print("-----after sort-----")
dLinkHeap.heap_sort()
dLinkHeap.print_heap(n)
| jinzekid/codehub | 数据结构与算法/heap_sort/类定义操作定义堆结果以及排序.py | Python | gpl-3.0 | 2,192 | 0.008205 |
from . import slide_channel_technology_category
from . import slide_channel_technology
from . import slide_channel
| avanzosc/odoo-addons | slide_channel_technology/models/__init__.py | Python | agpl-3.0 | 115 | 0 |
#!/usr/bin/python3
__author__ = 'ivan.shynkarenka'
import argparse
from TTWebClient.TickTraderWebClient import TickTraderWebClient
def main():
parser = argparse.ArgumentParser(description='TickTrader Web API sample')
parser.add_argument('web_api_address', help='TickTrader Web API address')
args = parser.parse_args()
# Create instance of the TickTrader Web API client
client = TickTraderWebClient(args.web_api_address)
# Public currencies
currencies = client.get_public_all_currencies()
for c in currencies:
print('Currency: {0}'.format(c['Name']))
currency = client.get_public_currency(currencies[0]['Name'])
print("{0} currency precision: {1}".format(currency[0]['Name'], currency[0]['Precision']))
if __name__ == '__main__':
main() | SoftFx/TTWebClient-Python | TTWebClientSample/public_currencies.py | Python | mit | 796 | 0.002513 |
from django.contrib import admin
# Register your models here.
from django.contrib import admin
from rango.models import Category, Page
class PageAdmin(admin.ModelAdmin):
list_display = ('title', 'category', 'url')
admin.site.register(Category)
admin.site.register(Page,PageAdmin)
| ramprasathdgl/TangoWithDjango | TangoWithDjango/rango/admin.py | Python | gpl-3.0 | 287 | 0.017422 |
#!/usr/bin/env python
# coding: utf-8
# # rede_gephi_com_ipca_csv
# In[6]:
ano_eleicao = '2014'
rede =f'rede{ano_eleicao}'
csv_dir = f'/home/neilor/{rede}'
# In[7]:
dbschema = f'rede{ano_eleicao}'
table_edges = f"{dbschema}.gephi_edges_com_ipca_2018"
table_nodes = f"{dbschema}.gephi_nodes_com_ipca_2018"
table_receitas = f"{dbschema}.receitas_com_ipca_2018"
table_candidaturas = f"{dbschema}.candidaturas_com_ipca_2018"
table_municipios = f"{dbschema}.municipios_{ano_eleicao}"
# In[8]:
import sys
sys.path.append('../')
import mod_tse as mtse
# In[9]:
import os
home = os.environ["HOME"]
local_dir = f'{home}/temp'
# In[10]:
mtse.execute_query(f"update {table_municipios} set rede= 'N';")
# ## REDE BRASIL
# In[11]:
def salva_rede_brasil(csv_dir,rede):
rede_dir_BR = f'{csv_dir}/{rede}_Brasil'
os.makedirs(rede_dir_BR)
edges_csv_query=f"""copy
(
select * from {table_edges}
)
TO '{rede_dir_BR}/{rede}_Brasil_edges.csv' DELIMITER ';' CSV HEADER;
"""
mtse.execute_query(edges_csv_query)
nodes_csv_query=f"""copy
(
select * from {table_nodes}
)
TO '{rede_dir_BR}/{rede}_Brasil_nodes.csv' DELIMITER ';' CSV HEADER;
"""
mtse.execute_query(nodes_csv_query)
candidaturas_csv_query=f"""copy
(
select * from {table_candidaturas}
)
TO '{rede_dir_BR}/{rede}_Brasil_candidaturas.csv' DELIMITER ';' CSV HEADER;
"""
mtse.execute_query(candidaturas_csv_query)
receitas_csv_query=f"""copy
(
select * from {table_receitas}
)
TO '{rede_dir_BR}/{rede}_Brasil_receitas.csv' DELIMITER ';' CSV HEADER;
"""
mtse.execute_query(receitas_csv_query)
# ## REDES POR ESTADO
# In[12]:
def salva_rede_csv_uf(csv_dir,rede,sg_uf):
rede_dir_uf = f'{csv_dir}/{rede}_{sg_uf}'
os.makedirs(rede_dir_uf)
edges_query=f"""copy
(
select * from {table_edges} where ue ='{sg_uf}'
)
TO '{rede_dir_uf}/{rede}_{sg_uf}_edges.csv' DELIMITER ';' CSV HEADER;
"""
mtse.execute_query(edges_query)
nodes_query=f"""copy
(
select * from {table_nodes} where ue ='{sg_uf}'
)
TO '{rede_dir_uf}/{rede}_{sg_uf}_nodes.csv' DELIMITER ';' CSV HEADER;
"""
mtse.execute_query(nodes_query)
candidaturas_csv_query=f"""copy
(
select * from {table_candidaturas} where sg_uf ='{sg_uf}'
)
TO '{rede_dir_uf}/{rede}_{sg_uf}_candidaturas.csv' DELIMITER ';' CSV HEADER;
"""
mtse.execute_query(candidaturas_csv_query)
receitas_csv_query=f"""copy
(
select * from {table_receitas} where receptor_uf ='{sg_uf}'
)
TO '{rede_dir_uf}/{rede}_{sg_uf}_receitas.csv' DELIMITER ';' CSV HEADER;
"""
mtse.execute_query(receitas_csv_query)
# In[13]:
import pandas as pd
import shutil
if os.path.exists(csv_dir):
shutil.rmtree(csv_dir)
os.makedirs(csv_dir)
salva_rede_brasil(csv_dir,rede)
df_uf = mtse.pandas_query(f'select sg_uf from {table_candidaturas} group by sg_uf order by sg_uf')
for index, row in df_uf.iterrows():
sg_uf = row['sg_uf']
salva_rede_csv_uf(csv_dir,rede,sg_uf)
# In[14]:
import datetime
print(datetime.datetime.now())
# In[ ]:
| elivre/arfe | e2014/SCRIPTS/055-rede2014_rede_gephi_com_ipca_csv.py | Python | mit | 3,896 | 0.014117 |
import pytest
from django.db import connection, IntegrityError
from .models import MyTree
def flush_constraints():
# the default db setup is to have constraints DEFERRED.
# So IntegrityErrors only happen when the transaction commits.
# Django's testcase thing does eventually flush the constraints but to
# actually test it *within* a testcase we have to flush it manually.
connection.cursor().execute("SET CONSTRAINTS ALL IMMEDIATE")
def test_node_creation_simple(db):
MyTree.objects.create(label='root1')
MyTree.objects.create(label='root2')
def test_node_creation_with_no_label(db):
# You need a label
with pytest.raises(ValueError):
MyTree.objects.create(label='')
with pytest.raises(ValueError):
MyTree.objects.create(label=None)
with pytest.raises(ValueError):
MyTree.objects.create()
def test_root_node_already_exists(db):
MyTree.objects.create(label='root1')
with pytest.raises(IntegrityError):
MyTree.objects.create(label='root1')
def test_same_label_but_different_parent(db):
root1 = MyTree.objects.create(label='root1')
MyTree.objects.create(label='root1', parent=root1)
def test_same_label_as_sibling(db):
root1 = MyTree.objects.create(label='root1')
MyTree.objects.create(label='child', parent=root1)
with pytest.raises(IntegrityError):
MyTree.objects.create(label='child', parent=root1)
def test_parent_is_self_errors(db):
root1 = MyTree.objects.create(label='root1')
root1.parent = root1
with pytest.raises(IntegrityError):
root1.save()
flush_constraints()
def test_parent_is_remote_ancestor_errors(db):
root1 = MyTree.objects.create(label='root1')
child2 = MyTree.objects.create(label='child2', parent=root1)
desc3 = MyTree.objects.create(label='desc3', parent=child2)
with pytest.raises(IntegrityError):
# To test this integrity error, have to update table without calling save()
# (because save() changes `ltree` to match `parent_id`)
MyTree.objects.filter(pk=desc3.pk).update(parent=root1)
flush_constraints()
def test_parent_is_descendant_errors(db):
root1 = MyTree.objects.create(label='root1')
child2 = MyTree.objects.create(label='child2', parent=root1)
desc3 = MyTree.objects.create(label='desc3', parent=child2)
child2.parent = desc3
with pytest.raises(IntegrityError):
child2.save()
flush_constraints()
| craigds/django-mpathy | tests/test_db_consistency.py | Python | bsd-3-clause | 2,474 | 0.000404 |
"""Unit test for the SNES nonlinear solver"""
# Copyright (C) 2012 Patrick E. Farrell
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
#
# First added: 2012-10-17
# Last changed: 2012-10-26
"""Solve the Yamabe PDE which arises in the differential geometry of general
relativity. http://arxiv.org/abs/1107.0360.
The Yamabe equation is highly nonlinear and supports many solutions. However,
only one of these is of physical relevance -- the positive solution.
This unit test demonstrates the capability of the SNES solver to accept bounds
on the resulting solution. The plain Newton method converges to an unphysical
negative solution, while the SNES solution with {sign: nonnegative} converges
to the physical positive solution.
"""
from dolfin import *
import unittest
try:
parameters["linear_algebra_backend"] = "PETSc"
except RuntimeError:
import sys; sys.exit(0)
parameters["form_compiler"]["quadrature_degree"] = 5
mesh = Mesh("doughnut.xml.gz")
V = FunctionSpace(mesh, "CG", 1)
bcs = [DirichletBC(V, 1.0, "on_boundary")]
u = Function(V)
v = TestFunction(V)
u.interpolate(Constant(-1000.0))
r = sqrt(triangle.x[0]**2 + triangle.x[1]**2)
rho = 1.0/r**3
F = (8*inner(grad(u), grad(v))*dx +
rho * inner(u**5, v)*dx +
(-1.0/8.0)*inner(u, v)*dx)
newton_solver_parameters = {"nonlinear_solver": "newton",
"linear_solver": "lu",
"newton_solver": {"maximum_iterations": 100,
"report": False}}
snes_solver_parameters = {"nonlinear_solver": "snes",
"linear_solver": "lu",
"snes_solver": {"maximum_iterations": 100,
"sign": "nonnegative",
"report": False}}
class SNESSolverTester(unittest.TestCase):
def test_snes_solver(self):
solve(F == 0, u, bcs, solver_parameters=snes_solver_parameters)
self.assertTrue(u.vector().min() >= 0)
def test_newton_solver(self):
solve(F == 0, u, bcs, solver_parameters=newton_solver_parameters)
self.assertTrue(u.vector().min() < 0)
if __name__ == "__main__":
# Turn off DOLFIN output
set_log_active(False)
print ""
print "Testing DOLFIN nls/PETScSNESSolver interface"
print "--------------------------------------------"
unittest.main()
| alogg/dolfin | test/unit/nls/python/PETScSNESSolver.py | Python | gpl-3.0 | 2,986 | 0.005023 |
# Copyright (c) 2016-2017 Dustin Doloff
# Licensed under Apache License v2.0
import argparse
import difflib
import hashlib
import os
import subprocess
import zipfile
# Resets color formatting
COLOR_END = '\33[0m'
# Modifies characters or color
COLOR_BOLD = '\33[1m'
COLOR_DISABLED = '\33[02m' # Mostly just means darker
# Sets the text color
COLOR_GREEN = '\33[32m'
COLOR_YELLOW = '\33[33m'
COLOR_RED = '\33[31m'
def parse_args():
parser = argparse.ArgumentParser(description='Asserts files are the same')
parser.add_argument('--stamp', type=argparse.FileType('w+'), required=True,
help='Stamp file to record action completed')
parser.add_argument('--files', type=str, nargs='+', required=True)
return parser.parse_args()
def bytes_to_str(bytes):
return bytes.decode('utf-8', 'backslashreplace')
def color_diff(text_a, text_b):
"""
Compares two pieces of text and returns a tuple
The first value is a colorized diff of the texts.
The second value is a boolean, True if there was a diff, False if there wasn't.
"""
sequence_matcher = difflib.SequenceMatcher(None, text_a, text_b)
colorized_diff = ''
diff = False
for opcode, a0, a1, b0, b1 in sequence_matcher.get_opcodes():
if opcode == 'equal':
colorized_diff += bytes_to_str(sequence_matcher.a[a0:a1])
elif opcode == 'insert':
colorized_diff += COLOR_BOLD + COLOR_GREEN + bytes_to_str(sequence_matcher.b[b0:b1]) + COLOR_END
diff = True
elif opcode == 'delete':
colorized_diff += COLOR_BOLD + COLOR_RED + bytes_to_str(sequence_matcher.a[a0:a1]) + COLOR_END
diff = True
elif opcode == 'replace':
colorized_diff += (COLOR_BOLD + COLOR_YELLOW + bytes_to_str(sequence_matcher.a[a0:a1]) +
COLOR_DISABLED + bytes_to_str(sequence_matcher.b[b0:b1]) + COLOR_END)
diff = True
else:
raise RuntimeError('unexpected opcode ' + opcode)
return colorized_diff, diff
def hash_file(file):
"""
Computes the SHA-256 hash of the file
file - The file to hash
"""
hasher = hashlib.sha256()
with open(file, 'rb') as f:
for block in iter(lambda: f.read(1024), b''):
hasher.update(block)
return hasher.digest()
def summarize(file):
"""
Summarizes a file via it's metadata to provide structured text for diffing
"""
summary = None
if zipfile.is_zipfile(file):
with zipfile.ZipFile(file) as zf:
summary = ''
for info in zf.infolist():
summary += 'Entry: ('
summary += ', '.join(s + ': ' + repr(getattr(info, s)) for s in info.__slots__)
summary += ') ' + os.linesep
assert summary is not None, 'Unable to summarize %s' % file
return summary
def main():
args = parse_args()
files = args.files
assert len(files) >= 2, 'There must be at least two files to compare'
files_hashes = set()
max_file_size = 0
for file in files:
files_hashes.add(hash_file(file))
max_file_size = max(max_file_size, os.stat(file).st_size)
# Check hashes first
if len(files_hashes) != 1:
for i in range(len(files) - 1):
file_a = files[i]
file_b = files[i + 1]
file_a_contents = None
file_b_contents = None
if max_file_size > 1024 * 1024:
file_a_contents = summarize(file_a)
file_b_contents = summarize(file_b)
else:
with open(file_a, 'rb') as a:
file_a_contents = a.read()
with open(file_b, 'rb') as b:
file_b_contents = b.read()
diff, problem = color_diff(file_a_contents, file_b_contents)
assert not problem, 'File {a} does not match {b}:{newline}{diff}'.format(
a = file_a,
b = file_b,
newline = os.linesep,
diff = diff)
assert False, 'File hashes don\'t match.'
with args.stamp as stamp_file:
stamp_file.write(str(args))
if __name__ == '__main__':
main()
| quittle/bazel_toolbox | assert/scripts/assert_equal.py | Python | apache-2.0 | 4,276 | 0.005847 |
#!/usr/bin/env python3
# Copyright 2015 Dietrich Epp.
# This file is part of SGGL. SGGL is licensed under the terms of the
# 2-clause BSD license. For more information, see LICENSE.txt.
import glgen.__main__
glgen.__main__.main()
| depp/sggl | sggl.py | Python | bsd-2-clause | 232 | 0 |
# Copyright 2015 Google Inc. All Rights Reserved.
"""Command for setting target pools of instance group manager."""
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.compute.lib import base_classes
from googlecloudsdk.compute.lib import utils
class SetTargetPools(base_classes.BaseAsyncMutator):
"""Set instances target pools of instance group manager."""
@staticmethod
def Args(parser):
parser.add_argument('instance_group_manager',
help='Instance group manager name.')
mutually_exclusive_group = parser.add_mutually_exclusive_group()
mutually_exclusive_group.add_argument(
'--clear-target-pools',
action='store_true',
help='Do not add instances to any Compute Engine Target Pools.')
mutually_exclusive_group.add_argument(
'--target-pools',
type=arg_parsers.ArgList(min_length=1),
action=arg_parsers.FloatingListValuesCatcher(),
metavar='TARGET_POOL',
help=('Compute Engine Target Pools to add the instances to. '
'Target Pools must can specified by name or by URL. Example: '
'--target-pool target-pool-1,target-pool-2'))
utils.AddZoneFlag(
parser,
resource_type='instance group manager',
operation_type='set target pools')
@property
def method(self):
return 'SetTargetPools'
@property
def service(self):
return self.compute.instanceGroupManagers
@property
def resource_type(self):
return 'instanceGroupManagers'
def _ValidateArgs(self, args):
if not args.clear_target_pools and args.target_pools is None:
raise exceptions.InvalidArgumentException(
'--target-pools', 'not passed but --clear-target-pools not present '
'either.')
def CreateRequests(self, args):
self._ValidateArgs(args)
ref = self.CreateZonalReference(args.instance_group_manager, args.zone)
region = utils.ZoneNameToRegionName(ref.zone)
if args.clear_target_pools:
pool_refs = []
else:
pool_refs = self.CreateRegionalReferences(
args.target_pools, region, resource_type='targetPools')
pools = [pool_ref.SelfLink() for pool_ref in pool_refs]
request = (
self.messages.ComputeInstanceGroupManagersSetTargetPoolsRequest(
instanceGroupManager=ref.Name(),
instanceGroupManagersSetTargetPoolsRequest=(
self.messages.InstanceGroupManagersSetTargetPoolsRequest(
targetPools=pools,
)
),
project=self.project,
zone=ref.zone,)
)
return [request]
SetTargetPools.detailed_help = {
'brief': 'Set instance template for instance group manager.',
'DESCRIPTION': """
*{command}* sets the target pools for an existing instance group
manager.
The new target pools won't apply to existing instances in the group
unless they are recreated using the recreate-instances command. But any
new instances created in the managed instance group will be added to all
of the provided target pools for load balancing purposes.
""",
}
| wemanuel/smry | smry/server-auth/ls/google-cloud-sdk/lib/googlecloudsdk/compute/subcommands/instance_groups/managed/set_target_pools.py | Python | apache-2.0 | 3,208 | 0.004364 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.contrib.gis.db.models.fields
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Incident',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=150)),
('description', models.TextField(max_length=1000)),
('severity', models.CharField(default='ME', max_length=2, choices=[('UR', 'Urgent'), ('HI', 'High'), ('ME', 'Medium'), ('LO', 'Low'), ('IN', 'Info')])),
('closed', models.BooleanField(default=False)),
('location', django.contrib.gis.db.models.fields.PointField(srid=4326)),
('created', models.DateTimeField(auto_now_add=True)),
],
options={
},
bases=(models.Model,),
),
]
| vivek8943/tracker_project | tracker_project/tracker/migrations/0001_initial.py | Python | mit | 1,059 | 0.002833 |
"""
/***************************************************************************
Name : ProfileTenureView
Description : A widget for rendering a profile's social tenure
relationship.
Date : 9/October/2016
copyright : John Kahiu
email : gkahiu at gmail dot com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
import math
from qgis.PyQt.QtCore import (
pyqtSignal,
QFile,
QIODevice,
QLineF,
QPointF,
QRect,
QRectF,
QSize,
QSizeF,
Qt
)
from qgis.PyQt.QtGui import (
QBrush,
QColor,
QCursor,
QFont,
QFontMetrics,
QIcon,
QImage,
QLinearGradient,
QKeyEvent,
QPainter,
QPainterPath,
QPen,
QPolygonF,
QTextLayout
)
from qgis.PyQt.QtWidgets import (
QApplication,
QComboBox,
QDialog,
QGraphicsItem,
QGraphicsLineItem,
QGraphicsScene,
QGraphicsTextItem,
QGraphicsView,
QGridLayout,
QLabel,
QMessageBox,
QSizePolicy,
QSpacerItem,
QToolButton,
QWidget
)
from stdm.ui.gui_utils import GuiUtils
from stdm.ui.image_export_settings import ImageExportSettings
class Arrow(QGraphicsLineItem):
"""
Renders an arrow object (with line and arrow head) from one item to
another. The arrow head size can be customized by specifying the angle
and width of the arrow base.
"""
def __init__(self, start_item, end_item, base_width=None,
tip_angle=None, fill_arrow_head=False,
parent_item=None):
"""
Class constructor
:param start_point: Arrow start item.
:type start_point: BaseTenureItem
:param end_point: Arrow end item.
:type end_point: BaseTenureItem
:param base_width: Width (in pixels) of the arrow base. If not
specified, it defaults to 9.0.
:type base_width: float
:param tip_angle: Angle (in radians) between the two line components
at the tip of the arrow. If not specified, it defaults to
math.radians(50.0).
Minimum math.radians(10.0)
Maximum math.radians(<90.0)
:type tip_angle: float
:param fill_arrow_head: True to close and fill the arrow head with
the specified pen and brush settings. Defaults to False.
:type fill_arrow_head: bool
:param parent_item: Parent item.
:type parent_item: QGraphicsItem
:param scene: Scene object.
:type scene: QGraphicsScene
"""
super(Arrow, self).__init__(parent_item)
self._start_item = start_item
self._end_item = end_item
self.base_width = base_width
if self.base_width is None:
self.base_width = 9.0
self._angle = tip_angle
if tip_angle is None:
self._angle = math.radians(50.0)
self.fill_arrow_head = fill_arrow_head
self.setPen(
QPen(
Qt.black,
1,
Qt.SolidLine,
Qt.RoundCap,
Qt.MiterJoin
)
)
self.brush = QBrush(Qt.black)
self._arrow_head_points = []
@property
def start_item(self):
"""
:return: Returns the start item for the arrow.
:rtype: BaseTenureItem
"""
return self._start_item
@property
def end_item(self):
"""
:return: Returns the end item for the arrow.
:rtype: BaseTenureItem
"""
return self._end_item
@property
def start_point(self):
"""
:return: Returns the arrow start point.
:rtype: QPointF
"""
return self._start_item.pos()
@property
def end_point(self):
"""
:return: Returns the arrow end point.
:rtype: QPointF
"""
return self._end_item.pos()
def boundingRect(self):
extra = (self.base_width + self.pen().widthF()) / 2.0
p1 = self.line().p1()
p2 = self.line().p2()
rect = QRectF(
p1, QSizeF(p2.x() - p1.x(), p2.y() - p1.y())
).normalized().adjusted(-extra, -extra, extra, extra)
return rect
def arrow_head_polygon(self):
"""
:return: Returns the arrow head as a QPolygonF object.
:rtype: QPolygonF
"""
return QPolygonF(self._arrow_head_points)
def shape(self):
path = super(Arrow, self).shape()
path.addPolygon(self.arrow_head_polygon())
return path
@property
def angle(self):
"""
:return: Returns the value of the angle at the tip in radians.
:rtype: float
"""
return self._angle
@angle.setter
def angle(self, angle):
"""
Sets the value of the angle to be greater than or equal to
math.radians(10.0) and less than math.radians(90).
:param angle: Angle at the tip of the arrow in radians.
:type angle: float
"""
min_angle = math.radians(10.0)
max_angle = math.radians(90)
if angle < min_angle:
self._angle = min_angle
elif angle > max_angle:
self._angle = max_angle
else:
self._angle = angle
self.update()
@property
def arrow_points(self):
"""
:return: Returns a collection of points used to draw the arrow head.
:rtype: list(QPointF)
"""
return self._arrow_head_points
def update_position(self):
"""
Updates the position of the line and arrowhead when the positions of
the start and end items change.
"""
line = QLineF(
self.mapFromScene(self.start_item.center()),
self.mapFromScene(self.end_item.center())
)
self.setLine(line)
def _intersection_point(self, item, reference_line):
# Computes the intersection point between the item's line segments
# with the reference line.
intersect_point = QPointF()
for l in item.line_segments():
intersect_type = l.intersect(reference_line, intersect_point)
if intersect_type == QLineF.BoundedIntersection:
return intersect_point
return None
def paint(self, painter, option, widget):
"""
Draw the arrow item.
"""
if self._start_item.collidesWithItem(self._end_item):
return
painter.setPen(self.pen())
center_line = QLineF(self.start_item.center(), self.end_item.center())
# Get intersection points
start_intersection_point = self._intersection_point(
self._start_item,
center_line
)
end_intersection_point = self._intersection_point(
self._end_item,
center_line
)
# Do not draw if there are no intersection points
if start_intersection_point is None or end_intersection_point is None:
return
arrow_line = QLineF(start_intersection_point, end_intersection_point)
self.setLine(arrow_line)
arrow_length = arrow_line.length()
# Setup computation parameters
cnt_factor = (self.base_width / 2.0) / (
math.tan(self._angle / 2.0) * arrow_length
)
cnt_point_delta = (self.base_width / 2.0) / arrow_length
# Get arrow base along the line
arrow_base_x = end_intersection_point.x() - (arrow_line.dx() * cnt_factor)
arrow_base_y = end_intersection_point.y() - (arrow_line.dy() * cnt_factor)
# Get deltas to arrow points from centre point of arrow base
cnt_point_dx = -(arrow_line.dy() * cnt_point_delta)
cnt_point_dy = arrow_line.dx() * cnt_point_delta
# Compute absolute arrow positions
A1 = QPointF(arrow_base_x - cnt_point_dx, arrow_base_y - cnt_point_dy)
A2 = QPointF(arrow_base_x + cnt_point_dx, arrow_base_y + cnt_point_dy)
# Update arrow points
self._arrow_head_points = [A1, A2, end_intersection_point]
# Draw main arrow line
painter.drawLine(arrow_line)
# Draw arrow head
if not self.fill_arrow_head:
painter.drawLine(A1, end_intersection_point)
painter.drawLine(end_intersection_point, A2)
else:
painter.setPen(Qt.NoPen)
painter.setBrush(self.brush)
painter.drawPolygon(self.arrow_head_polygon())
class BaseIconRender(object):
"""Renders an icon on the tenure item's header section. This icon can be
can be used to visually depict the nature of the context of the tenure
item. See bounding_rect function for positioning of the icon in the
tenure item. This is an abstract class and needs to be sub-classed for
custom renderers."""
def __init__(self):
# Icon area is 16px by 16px
# TODO: Set location based on screen resolution
self.upper_left = QPointF(142.5, 14.0)
self.bottom_right = QPointF(158.5, 30.0)
def bounding_rect(self):
"""
:return: Returns the bounds of the icon and does not factor in the
pen width.
:rtype: QRectF
"""
return QRectF(self.upper_left, self.bottom_right)
@property
def width(self):
"""
:return: Returns the width of the icon plane area.
:rtype: float
"""
return self.bottom_right.x() - self.upper_left.x()
@property
def height(self):
"""
:return: Returns the height of the icon plane area.
:rtype: float
"""
return self.bottom_right.y() - self.upper_left.y()
@property
def pen(self):
"""
:return: Returns a default pen for use in the renderer's painter.
:rtype: QPen
"""
return QPen(Qt.black, 1.0, Qt.SolidLine, Qt.SquareCap, Qt.MiterJoin)
def draw(self, painter, item):
"""
Custom code for rendering the icon. To be implemented by subclasses.
:param painter: Painter object
:type painter: QPainter
:param item: Tenure item object.
:type item: BaseTenureItem
"""
raise NotImplementedError
class EntityIconRenderer(BaseIconRender):
"""Renderer for an icon depicting a data table."""
def draw(self, p, item):
# Save painter state
p.save()
p.setPen(self.pen)
# Draw outline
# Define gradient
grad = QLinearGradient(self.upper_left, self.bottom_right)
grad.setColorAt(0.0, Qt.white)
grad.setColorAt(0.65, QColor('#D2F6FC'))
grad.setColorAt(1.0, QColor('#50E3FC'))
grad_bush = QBrush(grad)
p.setBrush(grad_bush)
p.drawRect(self.bounding_rect())
# Draw column header
cols_header_rect = QRectF(
self.upper_left.x() + 0.5,
self.upper_left.y() + 0.5,
self.width - 1.0,
3.5
)
p.setBrush(QColor('#1399FC'))
p.setPen(Qt.NoPen)
p.drawRect(cols_header_rect)
# Draw horizontal separators
h1_start_point = self.upper_left + QPointF(0, 4.0)
h1_end_point = self.upper_left + QPointF(self.width, 4.0)
h1_sep = QLineF(h1_start_point, h1_end_point)
p.setPen(self.pen)
p.drawLine(h1_sep)
h_col_pen = QPen(self.pen)
h_col_pen.setColor(QColor('#32A7BB'))
p.setPen(h_col_pen)
delta_v = 12 / 3.0
y = 4.0 + delta_v
for i in range(2):
h_start_point = self.upper_left + QPointF(1.0, y)
h_end_point = self.upper_left + QPointF(self.width - 1.0, y)
h_sep = QLineF(h_start_point, h_end_point)
y += delta_v
p.drawLine(h_sep)
# Draw vertical column separator
v_start_point = self.upper_left + QPointF(8.0, 0)
v_end_point = self.upper_left + QPointF(8.0, 16.0)
col_vertical_sep = QLineF(v_start_point, v_end_point)
p.setPen(self.pen)
p.drawLine(col_vertical_sep)
p.restore()
class DocumentIconRenderer(BaseIconRender):
"""Renderer for document icon."""
def draw(self, p, item):
p.save()
# Draw primary folder
outline = QPen(self.pen)
outline.setColor(QColor('#1399FC'))
p.setPen(outline)
back_leaf_brush = QBrush(QColor('#C2E4F8'))
p.setBrush(back_leaf_brush)
leaf_1 = QPainterPath()
leaf_1.moveTo(self.upper_left + QPointF(0, (self.height - 1.5)))
leaf_1.lineTo(self.upper_left + QPointF(0, 5.0))
leaf_1.lineTo(self.upper_left + QPointF(2.0, 5.0))
leaf_1.lineTo(self.upper_left + QPointF(4.0, 2.5))
leaf_1.lineTo(self.upper_left + QPointF(8.0, 2.5))
leaf_1.lineTo(self.upper_left + QPointF(10.0, 5.0))
leaf_1.lineTo(self.upper_left + QPointF(13.0, 5.0))
leaf_1.lineTo(self.upper_left + QPointF(13.0, self.height - 1.5))
leaf_1.closeSubpath()
p.drawPath(leaf_1)
# Front folder leaf
p.setBrush(QBrush(Qt.white))
leaf_2 = QPainterPath()
leaf_2.moveTo(self.upper_left + QPointF(0.5, (self.height - 0.5)))
leaf_2.lineTo(self.upper_left + QPointF(3.0, 8.5))
leaf_2.lineTo(self.upper_left + QPointF(15.5, 8.5))
leaf_2.lineTo(self.upper_left + QPointF(13.0, self.height - 0.5))
leaf_2.closeSubpath()
p.drawPath(leaf_2)
p.restore()
class TenureLinkRenderer(BaseIconRender):
"""Renders an icon depicting a link between the party and
spatial unit."""
def draw(self, p, item):
p.save()
outline = QPen(self.pen)
outline.setColor(QColor('#1399FC'))
outline.setCapStyle(Qt.RoundCap)
outline.setWidthF(1.6)
p.setPen(outline)
# Set segment fill brush
seg_brush = QBrush(QColor('#ECF8FF'))
p.setBrush(seg_brush)
# Draw link segment
link_path = QPainterPath()
link_path.moveTo(self.upper_left + QPointF(2.0, 5.0))
rect_pos = self.upper_left + QPointF(0.5, 5.0)
arc_rect = QRectF(rect_pos, QSizeF(3.0, 6.0))
link_path.arcTo(arc_rect, 90, 180.0)
link_path.lineTo(self.upper_left + QPointF(5.5, 11.0))
rect_pos_2 = self.upper_left + QPointF(4.0, 5.0)
arc_rect_2 = QRectF(rect_pos_2, QSizeF(3.0, 6.0))
link_path.arcTo(arc_rect_2, -90, 180)
link_path.closeSubpath()
p.drawPath(link_path)
# Draw 2nd segment
p.translate(8.5, 0)
p.drawPath(link_path)
# Draw segment connector
p.translate(-8.5, 0)
start_p = self.upper_left + QPointF(5.0, 8.0)
end_p = self.upper_left + QPointF(11.0, 8.0)
p.drawLine(QLineF(start_p, end_p))
p.restore()
class BaseTenureItem(QGraphicsItem):
"""Abstract class that provides core functionality for rendering entity and
social tenure relationship objects corresponding to the entities in a
given profile."""
Type = QGraphicsItem.UserType + 1
def __init__(self, parent=None, **kwargs):
super(BaseTenureItem, self).__init__(parent)
self.setFlag(QGraphicsItem.ItemIsMovable)
# Renderer for header icon
self.icon_renderer = kwargs.get('icon_renderer', None)
self.arrows = []
self.pen = QPen(
Qt.black,
0.9,
Qt.SolidLine,
Qt.RoundCap,
Qt.RoundJoin
)
# Display properties
self._default_header = QApplication.translate(
'ProfileTenureView',
'Not Defined'
)
self.header = self._default_header
self.items_title = ''
self.icon_painter = kwargs.pop('icon_painter', None)
self.items = []
self.font_name = 'Consolas'
self._entity = None
# Distance between the primary shape and its shadow
self.shadow_thickness = 4
self._side = 156
self._height = self._side
self._start_pos = 10
# The start and stop positions match the size of the item
stop_position = self._start_pos + self._side
# Main item gradient
self._gradient = QLinearGradient(
self._start_pos,
self._start_pos,
stop_position,
stop_position
)
self._gradient_light = QColor('#fcf2e3')
self._gradient_dark = QColor('#e9dac2')
self._gradient.setColorAt(0.0, self._gradient_light)
self._gradient.setColorAt(1.0, self._gradient_dark)
self._brush = QBrush(self._gradient)
# Shadow gradient
# The start and stop positions match the size of the item
shadow_start_pos = self._start_pos + self.shadow_thickness
shadow_stop_pos = self._start_pos + self._side + self.shadow_thickness
self._shadow_gradient = QLinearGradient(
shadow_start_pos,
shadow_start_pos,
shadow_stop_pos,
shadow_stop_pos
)
self._shadow_gradient.setColorAt(0.0, QColor('#f7f8f9'))
self._shadow_gradient.setColorAt(1.0, QColor('#d1d1d1'))
self._brush = QBrush(self._gradient)
self._text_highlight_color = QColor('#E74C3C')
self._text_item_color = QColor('#CC0000')
self._normal_text_color = Qt.black
def type(self):
return BaseTenureItem.Type
def remove_arrow(self, arrow):
"""
Removes an arrow from the collection.
:param arrow: Arrow item.
:type arrow: Arrow
"""
try:
self.arrows.remove(arrow)
except ValueError:
pass
def remove_arrows(self):
"""
Removes all arrows associated with this item and related item.
"""
for ar in self.arrows[:]:
ar.start_item.remove_arrow(ar)
ar.end_item.remove_arrow(ar)
self.scene().removeItem(ar)
def add_arrow(self, arrow):
"""
Adds arrow item to the collection.
:param arrow: Arrow item.
:type arrow: Arrow
"""
self.arrows.append(arrow)
def boundingRect(self):
extra = self.pen.widthF() / 2.0
return QRectF(
self._start_pos - extra,
self._start_pos - extra,
self.width + self.shadow_thickness + extra,
self.height + self.shadow_thickness + extra
)
def invalidate(self):
"""
Reset the title and items.
"""
self.header = self._default_header
self.items = []
self.update()
@property
def brush(self):
"""
:return: Returns the brush used for rendering the entity item.
:rtype: QBrush
"""
return self._brush
@property
def header_font(self):
"""
:return: Returns the font object used to render the header text.
:rtype: QFont
"""
return QFont(self.font_name, 10, 63)
@property
def items_title_font(self):
"""
:return: Returns the font object used to render the items header text.
:rtype: QFont
"""
return QFont(self.font_name, 10)
@property
def items_font(self):
"""
:return: Returns the font object used to render multiline items.
:rtype: QFont
"""
return QFont(self.font_name, 9)
@property
def entity(self):
"""
:return: Returns the entity associated with the rendered.
:rtype: Entity
"""
return self._entity
def auto_adjust_height(self):
"""
:return: True if the height should be automatically adjusted to fit
the number of items specified. Otherwise, False; in this case, the
height is equal to the default height of the item. Items that exceed
the height of the items area will not be shown.
To be overridden by sub-classes.
:rtype: bool
"""
return True
@entity.setter
def entity(self, entity):
"""
Sets the current entity object.
:param entity: Entity object.
:type entity: Entity
"""
self._entity = entity
self.prepareGeometryChange()
self._on_set_entity()
def _on_set_entity(self):
"""
Update attributes based on the entity's attributes. To be implemented
by subclasses.
"""
raise NotImplementedError
@property
def width(self):
"""
:return: Returns the logical width of the item.
:rtype: float
"""
return float(self._side + self.shadow_thickness)
@property
def height(self):
"""
:return: Returns the logical height of the item. If
auto_adjust_height is True then the height will be automatically
adjusted to match number of items, else it will be equal to the width
of the item.
"""
return float(self._height + self.shadow_thickness)
def scene_bounding_rect(self):
"""
:return: Returns the bounding rect of the primary item in scene
coordinates, this does not include the shadow thickness.
:rtype: QRectF
"""
local_start_point = QPointF(self._start_pos, self._start_pos)
scene_start_point = self.mapToScene(local_start_point)
return QRectF(scene_start_point, QSizeF(self._side, self._height))
def center(self):
"""
:return: Returns the center point of the item in scene coordinates.
:rtype: QPointF
"""
return self.scene_bounding_rect().center()
def line_segments(self):
"""
:return: Returns a list of QLineF objects that constitute the scene
bounding rect. The line segments are in scene coordinates.
:rtype: QRectF
"""
lines = []
rect = self.scene_bounding_rect()
poly = QPolygonF(rect)
for i, p in enumerate(poly):
if i == len(poly) - 1:
break
p1 = poly[i]
# Close to first point if the last item is reached
if i + 1 == len(poly):
p2 = poly[0]
else:
p2 = poly[i + 1]
# Construct line object
line = QLineF(p1, p2)
lines.append(line)
return lines
def _elided_text(self, font, text, width):
# Returns elided version of the text if greater than the width
fm = QFontMetrics(font)
return str(fm.elidedText(text, Qt.ElideRight, width))
def _elided_items(self, font, width):
# Formats each item text to incorporate an elide if need be and
# return the items in a list.
return [self._elided_text(font, item, width) for item in self.items]
def items_size(self, items):
"""
Computes an appropriate width and height of an items' text separated
by a new line.
:param items: Iterable containing string items for which the size
will be computed.
:type items: list
:return: Returns a size object that fits the items' text in the list.
:rtype: QSize
"""
fm = QFontMetrics(self.items_font)
return fm.size(Qt.TextWordWrap, '\n'.join(items))
def items_by_height(self, height, items):
"""
:param height: Height in pixels in which the subset of items will fit.
:type height: int
:return: Returns a subset of items which fit the specified height.
:rtype: list
"""
items_sub = []
fm = QFontMetrics(self.items_font)
for i in items:
sz = self.items_size(items_sub)
if sz.height() > height:
break
items_sub.append(i)
return items_sub
def _font_height(self, font, text):
"""
Computes the height for the given font object.
:param font: Font object.
:type font: QFont
:param text: Text
:type text: str
:return: Returns the minimum height for the given font object.
:rtype: int
"""
fm = QFontMetrics(font)
return fm.size(Qt.TextSingleLine, text).height()
def draw_text(self, painter, text, font, bounds, alignment=Qt.AlignCenter):
"""
Provides a device independent mechanism for rendering fonts
regardless of the device's resolution. By default, the text will be
centred. This is a workaround for the font scaling issue for devices
with different resolutions.
:param painter: Painter object.
:type painter: QPainter
:param text: Text to be rendered.
:type text: str
:param font: Font for rendering the text.
:type font: QFont
:param bounds: Rect object which will provide the reference point for
drawing the text.
:type bounds: QRectF
:param alignment: Qt enums used to describe alignment. AlignCenter is
the default. Accepts bitwise OR for horizontal and vertical flags.
:type alignment: int
"""
layout = QTextLayout(text, font)
layout.beginLayout()
# Create the required number of lines in the layout
while layout.createLine().isValid():
pass
layout.endLayout()
y = 0
max_width = 0
# Set line positions relative to the layout
for i in range(layout.lineCount()):
line = layout.lineAt(i)
max_width = max(max_width, line.naturalTextWidth())
line.setPosition(QPointF(0, y))
y += line.height()
# Defaults
start_x = bounds.left()
start_y = bounds.top()
# Horizontal flags
if (alignment & Qt.AlignLeft) == Qt.AlignLeft:
start_x = bounds.left()
elif (alignment & Qt.AlignCenter) == Qt.AlignCenter or \
(alignment & Qt.AlignHCenter) == Qt.AlignHCenter:
start_x = bounds.left() + (bounds.width() - max_width) / 2.0
# Vertical flags
if (alignment == Qt.AlignTop) == Qt.AlignTop:
start_y = bounds.top()
elif (alignment & Qt.AlignCenter) == Qt.AlignCenter or \
(alignment & Qt.AlignVCenter) == Qt.AlignVCenter:
start_y = bounds.top() + (bounds.height() - y) / 2.0
layout.draw(painter, QPointF(start_x, start_y))
def paint(self, painter, option, widget=None):
"""
Performs the painting of the tenure item based on the object's
attributes.
:param painter: Performs painting operation on the item.
:type painter: QPainter
:param option: Provides style option for the item.
:type option: QStyleOptionGraphicsItem
:param widget: Provides points to the widget that is being painted on.
:type widget: QWidget
"""
shadow_start_pos = self._start_pos + self.shadow_thickness
# Use height of subsections to compute the appropriate height
header_height = self._font_height(self.header_font, self.header) + 7
items_title_height = self._font_height(
self.items_title_font,
self.items_title
)
margin = 1
fixed_height = header_height + items_title_height + (6 * margin)
if self.auto_adjust_height():
items_height = self.items_size(self.items).height() + 2
main_item_height = max(self._side, fixed_height + items_height)
else:
items_height = self._side - fixed_height
main_item_height = self._side
self._height = main_item_height
shadow_rect = QRect(
shadow_start_pos,
shadow_start_pos,
self._side,
main_item_height
)
main_item_rect = QRect(
self._start_pos,
self._start_pos,
self._side,
main_item_height
)
painter_pen = painter.pen()
painter_pen.setColor(self._normal_text_color)
painter_pen.setWidth(0)
# Create shadow effect using linear gradient
painter.setBrush(self._shadow_gradient)
painter.setPen(Qt.NoPen)
painter.drawRect(shadow_rect)
painter.setPen(self.pen)
painter.setBrush(self._brush)
# Main item outline
painter.drawRect(main_item_rect)
line_y_pos = header_height + margin * 2
painter.drawLine(
self._start_pos,
self._start_pos + line_y_pos,
self._start_pos + self._side,
self._start_pos + line_y_pos
)
# Draw header text
header_start_pos = self._start_pos + margin
header_rect = QRect(
header_start_pos,
header_start_pos,
self._side - (margin * 2),
header_height
)
# Adjust header text area if there is an icon renderer
if not self.icon_renderer is None:
init_width = header_rect.width()
adj_width = init_width - (self.icon_renderer.width + 6)
header_rect.setWidth(adj_width)
# Draw header icon if renderer is available
if not self.icon_renderer is None:
if isinstance(self.icon_renderer, BaseIconRender):
self.icon_renderer.draw(painter, self)
painter.setFont(self.header_font)
if self.header == self._default_header:
painter.setPen(self._text_highlight_color)
else:
painter.setPen(self._normal_text_color)
elided_header = self._elided_text(
self.header_font,
self.header,
header_rect.width()
)
# print(elided_header)
self.draw_text(painter, elided_header, self.header_font, header_rect)
# Draw items header
items_title_rect = QRect(
header_start_pos + 1,
header_height + items_title_height - 1,
self._side - (margin * 4),
items_title_height
)
painter.setFont(self.items_title_font)
painter.setPen(QColor('#c3b49c'))
items_title_brush = QBrush(self._gradient_dark)
painter.setBrush(items_title_brush)
painter.drawRect(items_title_rect)
# Adjust left margin of items title
items_title_rect.adjust(1, 0, 0, 0)
painter.setPen(self._normal_text_color)
self.draw_text(
painter,
self.items_title,
self.items_title_font,
items_title_rect
)
# Items listing
items_margin = 6
items_vertical_pos = header_height + items_title_height + 16
items_w = self._side - (items_margin * 2)
items_rect = QRect(
header_start_pos + items_margin,
items_vertical_pos,
items_w,
items_height
)
# Draw if there are items
if len(self.items) > 0:
painter.setFont(self.items_font)
painter.setPen(self._text_item_color)
multiline_items = self._elided_items(self.items_font, items_w)
# If auto-adjust is disabled then extract subset that will fit
if not self.auto_adjust_height():
multiline_items = self.items_by_height(
items_height,
multiline_items
)
# QTextLayout requires the unicode character of the line separator
multiline_items = '\u2028'.join(multiline_items)
self.draw_text(
painter,
multiline_items,
self.items_font,
items_rect,
Qt.AlignLeft | Qt.AlignTop
)
class EntityItem(BaseTenureItem):
"""
Represents a Party or a SpatialUnit items in a profile's social tenure
relationship.
"""
Type = QGraphicsItem.UserType + 2
def __init__(self, *args, **kwargs):
super(EntityItem, self).__init__(*args, **kwargs)
columns = QApplication.translate(
'ProfileTenureView',
'columns'
)
self.items_title = '<<{0}>>'.format(columns)
# Use default renderer if none is specified
if self.icon_renderer is None:
self.icon_renderer = EntityIconRenderer()
def type(self):
return EntityItem.Type
def _on_set_entity(self):
if not self._entity is None:
self.header = self.entity.short_name
self.items = list(self.entity.columns.keys())
self.update()
def _updated_code_values(value_list):
vl = []
# Use updated values in the value list
for cd in value_list.values.values():
lk_value = cd.value
if cd.updated_value:
lk_value = cd.updated_value
vl.append(lk_value)
return vl
class TenureRelationshipItem(BaseTenureItem):
"""
Renders the profile's tenure relationship by listing the tenure types.
"""
Type = QGraphicsItem.UserType + 3
def __init__(self, *args, **kwargs):
super(TenureRelationshipItem, self).__init__(*args, **kwargs)
tenure_types = QApplication.translate(
'ProfileTenureView',
'tenure types'
)
self.items_title = '<<{0}>>'.format(tenure_types)
self.header = QApplication.translate(
'ProfileTenureView',
'Social Tenure'
)
# Use default renderer if none is specified
if self.icon_renderer is None:
self.icon_renderer = TenureLinkRenderer()
def type(self):
return TenureRelationshipItem.Type
def auto_adjust_height(self):
# Base class override
return False
def _on_set_entity(self):
if not self._entity is None:
self.items = _updated_code_values(
self.entity.tenure_type_lookup.value_list
)
self.update()
class TenureDocumentItem(BaseTenureItem):
"""
Renders the document types for the social tenure relationship.
"""
Type = QGraphicsItem.UserType + 4
def __init__(self, *args, **kwargs):
super(TenureDocumentItem, self).__init__(*args, **kwargs)
tenure_types = QApplication.translate(
'ProfileTenureView',
'document types'
)
self.items_title = '<<{0}>>'.format(tenure_types)
self.header = QApplication.translate(
'ProfileTenureView',
'Documents'
)
# Use default renderer if none is specified
if self.icon_renderer is None:
self.icon_renderer = DocumentIconRenderer()
def type(self):
return TenureDocumentItem.Type
def auto_adjust_height(self):
# Base class override
return False
def _on_set_entity(self):
if not self._entity is None:
supporting_doc = self.entity.supporting_doc
self.items = _updated_code_values(
supporting_doc.doc_type.value_list
)
self.update()
class Annotation(QGraphicsTextItem):
"""Add major or minor annotation item to the view. The only difference
between major and minor annotations is the font size and underline
(for the former)."""
Minor, Major = list(range(2))
lost_focus = pyqtSignal(QGraphicsTextItem)
def __init__(self, parent=None, size=0):
super(Annotation, self).__init__(parent)
self.setFlag(QGraphicsItem.ItemIsMovable)
self.setFlag(QGraphicsItem.ItemIsSelectable)
self.size = size
self.setDefaultTextColor(Qt.black)
font = 'Consolas'
# Set font size
if self.size == Annotation.Minor:
self.setFont(QFont(font, 10, 50))
else:
font = QFont(font, 14, 75)
font.setUnderline(True)
self.setFont(font)
def focusOutEvent(self, event):
# Disable text interaction
self.setTextInteractionFlags(Qt.NoTextInteraction)
self.lost_focus.emit(self)
super(Annotation, self).focusOutEvent(event)
def mouseDoubleClickEvent(self, event):
# Enable text interaction
if self.textInteractionFlags() == Qt.NoTextInteraction:
self.setTextInteractionFlags(Qt.TextEditorInteraction)
super(Annotation, self).mouseDoubleClickEvent(event)
class ProfileTenureScene(QGraphicsScene):
"""
Custom scene for handling annotation items.
"""
InsertMajorAnnotation, InsertMinorAnnotation, MoveItem = list(range(3))
annotation_inserted = pyqtSignal(QGraphicsTextItem)
def __init__(self, parent=None):
super(ProfileTenureScene, self).__init__(parent)
self.mode = ProfileTenureScene.MoveItem
def editor_lost_focus(self, item):
"""
Check if the annotation item is empty and delete if it is.
:param item: Annotation item.
:type item: QGraphicsTextItem
"""
cursor = item.textCursor()
cursor.clearSelection()
item.setTextCursor(cursor)
if not item.toPlainText():
self.removeItem(item)
item.deleteLater()
def mousePressEvent(self, event):
"""
Handles insert of annotation item.
:param event: Mouse press event.
:type event: QGraphicsSceneMouseEvent
"""
if event.button() != Qt.LeftButton:
return
if self.mode == ProfileTenureScene.InsertMajorAnnotation:
sz = Annotation.Major
self._insert_annotation_item(sz, event.scenePos())
elif self.mode == ProfileTenureScene.InsertMinorAnnotation:
sz = Annotation.Minor
self._insert_annotation_item(sz, event.scenePos())
super(ProfileTenureScene, self).mousePressEvent(event)
def _insert_annotation_item(self, size, scene_pos):
# Insert major or minor annotation based on size
annotation = Annotation(size=size)
annotation.setTextInteractionFlags(Qt.TextEditorInteraction)
annotation.setZValue(1000.0)
annotation.lost_focus.connect(self.editor_lost_focus)
self.addItem(annotation)
annotation.setPos(scene_pos)
self.annotation_inserted.emit(annotation)
class ProfileTenureView(QGraphicsView):
"""
A widget for rendering a profile's social tenure relationship. It also
includes functionality for saving the view as an image.
"""
MIN_DPI = 72
MAX_DPI = 600
# Enums for add party policy
ADD_TO_EXISTING, REMOVE_PREVIOUS = list(range(2))
def __init__(self, parent=None, profile=None):
super(ProfileTenureView, self).__init__(parent)
# Specify STR graphic items adding policy
self.add_party_policy = ProfileTenureView.ADD_TO_EXISTING
self.add_spatial_unit_policy = ProfileTenureView.ADD_TO_EXISTING
# Init items
# Container for party entities and corresponding items
self._default_party_item = EntityItem()
self._party_items = {}
self._sp_unit_items = {}
self._default_sp_item = EntityItem()
self._str_item = TenureRelationshipItem()
self._supporting_doc_item = TenureDocumentItem()
self.profile = profile
scene_rect = QRectF(0, 0, 960, 540)
scene = ProfileTenureScene(self)
scene.setItemIndexMethod(QGraphicsScene.NoIndex)
scene.setSceneRect(scene_rect)
self.setRenderHint(QPainter.Antialiasing)
self.setRenderHint(QPainter.TextAntialiasing)
self.setViewportUpdateMode(QGraphicsView.FullViewportUpdate)
self.setResizeAnchor(QGraphicsView.AnchorViewCenter)
self.setScene(scene)
# Connect signals
scene.annotation_inserted.connect(self.annotation_inserted)
# Add items to view
self.scene().addItem(self._default_party_item)
self.scene().addItem(self._str_item)
self.scene().addItem(self._default_sp_item)
self.scene().addItem(self._supporting_doc_item)
# Position items
self._default_party_item.setPos(210, 20)
self._str_item.setPos(400, 20)
self._default_sp_item.setPos(590, 20)
self._supporting_doc_item.setPos(400, 220)
# Ensure vertical scroll is at the top
self.centerOn(490.0, 20.0)
# Link social tenure item to supporting documents item
self.add_arrow(self._supporting_doc_item, self._str_item)
def annotation_inserted(self, item):
"""
Slot raised when an annotation item has been inserted.
:param item: Annotation item.
:type item: Annotation
"""
self.scene().mode = ProfileTenureScene.MoveItem
@property
def profile(self):
"""
:return: The profile object being rendered.
:rtype: Profile
"""
return self._profile
def _update_profile(self):
# Update profile objects and render
if self._profile is None:
return
# Remove existing party items
party_items = list(self._party_items.keys())
for p in party_items:
self.remove_party(p)
# Remove spatial unit items
spatial_unit_items = list(self._sp_unit_items.keys())
for sp in spatial_unit_items:
self.remove_spatial_unit(sp)
str_ent = self._profile.social_tenure
# Set renderer entities
self._str_item.entity = str_ent
self._supporting_doc_item.entity = str_ent
# Add party entities
parties = str_ent.parties
if len(parties) == 0:
self._default_party_item.show()
else:
self.add_parties(parties)
# Add spatial unit entities
sp_units = str_ent.spatial_units
if len(sp_units) == 0:
self._default_sp_item.show()
else:
self.add_spatial_units(sp_units)
def add_parties(self, parties):
"""
Add party items to the view.
:param parties: List of party entities.
:type parties: list
"""
for p in parties:
self.add_party_entity(p)
def add_spatial_units(self, spatial_units):
"""
Add spatial unit items to the view.
:param spatial_units: List of spatial unit entities.
:type spatial_units: list
"""
for sp in spatial_units:
self.add_spatial_unit_entity(sp)
@profile.setter
def profile(self, profile):
"""
Sets the profile object whose STR view is to rendered.
:param profile: Profile object to be rendered.
:type profile: Profile
"""
self._profile = profile
self._update_profile()
def _highest_party_z_order(self):
# Returns the highest z-order of party graphic items.
return self._highest_item_z_order(list(self._party_items.values()))
def _highest_sp_unit_z_order(self):
# Returns the highest z-order of spatial unit graphic items.
return self._highest_item_z_order(list(self._sp_unit_items.values()))
def _highest_item_z_order(self, items):
# Get the highest z-order of the graphic items in the list.
z = 0
for gi in items:
if gi.zValue() > z:
z = gi.zValue()
return z
def add_party_entity(self, party):
"""
Adds a party entity to the view. If there is a existing one with the
same name then it will be removed before adding this party.
:param party: Party entity.
:type party: Entity
"""
if party.short_name in self._party_items:
self.remove_party(party.short_name)
# Remove previous if set in the policy
if self.add_party_policy == ProfileTenureView.REMOVE_PREVIOUS:
for p in self._party_items.keys():
self.remove_party(p)
# Hide default party placeholder
self._default_party_item.hide()
p_item = EntityItem()
p_item.entity = party
# Set z-order
z = self._highest_party_z_order()
if z == 0:
z = 1.0
else:
z = z + 1.1
p_item.setZValue(z)
self.scene().addItem(p_item)
if len(self._party_items) == 0:
p_item.setPos(210, 20)
else:
self.auto_position(p_item)
# Add to collection
self._party_items[party.short_name] = p_item
# Add connection arrow to social tenure item
self.add_arrow(p_item, self._str_item)
def add_spatial_unit_entity(self, spatial_unit):
"""
Adds a spatial unit entity to the view. If there is a existing one
with the same name then it will be removed before adding this spatial
unit.
.. versionadded:: 1.7
:param spatial_unit: Spatial unit entity.
:type spatial_unit: Entity
"""
if spatial_unit.short_name in self._sp_unit_items:
self.remove_spatial_unit(spatial_unit.short_name)
# Remove previous if specified in the policy
if self.add_spatial_unit_policy == ProfileTenureView.REMOVE_PREVIOUS:
for sp in self._sp_unit_items.keys():
self.remove_spatial_unit(sp)
# Hide default spatial unit placeholder
self._default_sp_item.hide()
sp_item = EntityItem()
sp_item.entity = spatial_unit
# Set z-order
z = self._highest_sp_unit_z_order()
if z == 0:
z = 1.0
else:
z = z + 1.1
sp_item.setZValue(z)
self.scene().addItem(sp_item)
if len(self._sp_unit_items) == 0:
sp_item.setPos(590, 20)
else:
self.auto_position_spatial_unit(sp_item)
# Add to collection
self._sp_unit_items[spatial_unit.short_name] = sp_item
# Add connection arrow to social tenure item
self.add_arrow(self._str_item, sp_item)
def auto_position(self, item):
"""
Automatically positions the party item to prevent it from overlapping
the others.
:param item: Party entity item.
:type item: EntityItem
"""
item_count = len(self._party_items)
# Just in case it is called externally
if item_count == 0:
return
factor = item_count + 1
dx, dy = 5 * factor, 10 * factor
pos_x, pos_y = 205 + dx, 10 + dy
item.setPos(pos_x, pos_y)
def auto_position_spatial_unit(self, item):
"""
Automatically positions the spatial unit item to prevent it from
overlapping the others.
.. versionadded:: 1.7
:param item: Spatial unit entity item.
:type item: EntityItem
"""
item_count = len(self._sp_unit_items)
# Just in case it is called externally
if item_count == 0:
return
factor = item_count + 1
dx, dy = 5 * factor, 10 * factor
pos_x, pos_y = 585 + dx, 10 + dy
item.setPos(pos_x, pos_y)
def remove_party(self, name):
"""
Removes the party with the specified name from the collection.
:param name: Party name
:type name: str
:return: Returns True if the operation succeeded, otherwise False if
the party with the specified name does not exist in the collection.
:rtype: bool
"""
if not name in self._party_items:
return False
p_item = self._party_items.pop(name)
p_item.remove_arrows()
self.scene().removeItem(p_item)
del p_item
# Show default party item
if len(self._party_items) == 0:
self._default_party_item.show()
return True
def remove_spatial_unit(self, name):
"""
Removes the spatial unit graphics item with the specified name from
the collection.
.. versionadded:: 1.7
:param name: Spatial unit name
:type name: str
:return: Returns True if the operation succeeded, otherwise False if
the spatial unit item with the specified name does not exist in the
collection.
:rtype: bool
"""
if not name in self._sp_unit_items:
return False
sp_item = self._sp_unit_items.pop(name)
sp_item.remove_arrows()
self.scene().removeItem(sp_item)
del sp_item
# Show default spatial unit item
if len(self._sp_unit_items) == 0:
self._default_sp_item.show()
return True
def invalidate_spatial_unit(self):
"""
Clears the spatial unit entity.
.. deprecated:: 1.7
"""
pass
def set_spatial_unit(self, spatial_unit):
"""
Set the spatial unit entity.
.. deprecated:: 1.7
:param spatial_unit: Entity corresponding to a spatial unit in a
profile's STR relationship.
:type spatial_unit: Entity
"""
pass
def add_arrow(self, start_item, end_item, **kwargs):
"""
Adds an arrow item running from the start to the end item.
:param start_item: Start item for the arrow.
:type start_item: BaseTenureItem
:param end_item: End item for the arrow.
:type end_item: BaseTenureItem
:param kwargs: Optional arrow arguments such as angle, base width
etc. See arguments for the Arrow class.
:type kwargs: dict
"""
arrow = Arrow(start_item, end_item, **kwargs)
start_item.add_arrow(arrow)
end_item.add_arrow(arrow)
# Set z-value
ref_z = end_item.zValue()
if start_item.zValue() > end_item.zValue():
ref_z = start_item.zValue()
arrow.setZValue(ref_z + 1.0)
self.scene().addItem(arrow)
arrow.update_position()
def keyPressEvent(self, event):
"""
Capture delete key to remove selected annotation items.
:param event: Key event.
:type event: QKeyEvent
"""
if event.key() == Qt.Key_Delete:
self._delete_selected_annotation_items()
super(ProfileTenureView, self).keyPressEvent(event)
def deselect_items(self):
"""
Deselects all graphic items in the scene.
"""
for item in self.scene().selectedItems():
item.setSelected(False)
def _delete_selected_annotation_items(self):
# Deletes selected annotation items in the scene
for item in self.scene().selectedItems():
if isinstance(item, Annotation):
# Only remove if item is not on interactive text edit mode
if item.textInteractionFlags() == Qt.NoTextInteraction:
self.scene().removeItem(item)
item.deleteLater()
def save_image_to_file(self, path, resolution=96, background=Qt.white):
"""
Saves the profile tenure view image to file using A4 paper size.
:param path: Absolute path where the image will be saved.
:type path: str
:param resolution: Resolution in dpi. Default is 96.
:type resolution: int
:param background: Background color of the image:
:type background: QColor
:return: Returns True if the operation succeeded, otherwise False. If
False then a corresponding message is returned as well.
:rtype: (bool, str)
"""
image = self.image(resolution, background)
if image.isNull():
msg = self.tr('Constructed image is null.')
return False, msg
# Test if file is writeable
fl = QFile(path)
if not fl.open(QIODevice.WriteOnly):
msg = self.tr('The image file cannot be saved in the '
'specified location.')
return False, msg
# Attempt to save to file
save_op = image.save(fl)
if not save_op:
msg = self.tr('Image operation failed.')
return False, msg
return True, ''
def _resolution_in_mm(self, resolution):
# Calculates the resolution in mm
return resolution / 25.4
def _resolution_in_m(self, resolution):
# Calculates the resolution in mm
return self._resolution_in_mm(resolution) * 1000
def image_size(self, resolution):
"""
Computes the image size from the given resolution in dpi.
:param resolution: Resolution in dpi.
:type resolution: int
:return: Image size in pixels.
:rtype: QSize
"""
res = resolution / 25.4
# A4 landscape size
width = 297 * res
height = 210 * res
return QSize(int(width), int(height))
def image(self, resolution, background=Qt.white):
"""
Renders the view onto a QImage object.
:param resolution: Resolution of the image in dpi.
:type resolution: int
:param background: Set background color of the image. Default is a
white background.
:type background: QColor
:return: Returns a QImage object corresponding to the profile STR
view.
:rtype: QImage
"""
# Ensure resolution is within limits
if resolution < ProfileTenureView.MIN_DPI:
resolution = ProfileTenureView.MIN_DPI
if resolution > ProfileTenureView.MAX_DPI:
resolution = ProfileTenureView.MAX_DPI
# In metres
dpm = self._resolution_in_m(resolution)
image_size = self.image_size(resolution)
img = QImage(
image_size.width(),
image_size.height(),
QImage.Format_ARGB32
)
img.setDotsPerMeterX(int(dpm))
img.setDotsPerMeterY(int(dpm))
img.fill(background)
# Deselect selected items
self.deselect_items()
painter = QPainter(img)
painter.setRenderHint(QPainter.Antialiasing, True)
painter.setRenderHint(QPainter.TextAntialiasing, True)
self.scene().render(painter)
painter.end()
return img
def valid(self):
"""
:return: Returns False if the respective party and spatial unit
entities have not been set. Otherwise True.
:rtype: bool
"""
if len(self._party_items) == 0:
return False
if len(self._sp_unit_items) == 0:
return False
return True
def minimumSizeHint(self):
return QSize(480, 270)
def sizeHint(self):
return QSize(560, 315)
class ProfileTenureDiagram(QWidget):
"""
Widget for visualizing a profile's social tenure relationship definition.
It provides controls for zooming, adding text and exporting the view to
an image file, and wraps most of the ProfileTenureView functionality.
"""
def __init__(self, parent=None, profile=None):
super(ProfileTenureDiagram, self).__init__(parent)
self._profile_view = ProfileTenureView(self, profile)
self.set_scene_mode(ProfileTenureScene.MoveItem)
self._profile_view.scene().annotation_inserted.connect(
self.on_annotation_inserted
)
self._setup_widgets()
self._current_zoom_factor = 1.0
# Image export options
self._path = ''
self._resolution = 96
self._bg_color = Qt.transparent
def scene_mode(self):
"""
:return: Returns the current state of the scene.
:rtype: int
"""
return self._profile_view.scene().mode
def set_scene_mode(self, mode):
"""
Sets the current state of the scene.
:param mode: Scene mode i.e. move item, insert major or minor
annotation.
:type mode: int
"""
if self.scene_mode() != mode:
self._profile_view.scene().mode = mode
def _setup_widgets(self):
self.layout = QGridLayout(self)
self.layout.setContentsMargins(2, 4, 2, 9)
self.minor_annotation = QToolButton(self)
self.minor_annotation.setMaximumSize(QSize(24, 24))
minor_icon = QIcon()
minor_icon.addPixmap(
GuiUtils.get_icon_pixmap('minor_annotation.png')
)
self.minor_annotation.setIcon(minor_icon)
self.minor_annotation.setCheckable(True)
self.minor_annotation.setToolTip(self.tr('Add Minor Annotation'))
self.minor_annotation.toggled.connect(self.on_minor_annotation_toggled)
self.layout.addWidget(self.minor_annotation, 0, 0, 1, 1)
self.major_annotation = QToolButton(self)
self.major_annotation.setMinimumSize(QSize(24, 24))
major_icon = QIcon()
major_icon.addPixmap(
GuiUtils.get_icon_pixmap('major_annotation.png')
)
self.major_annotation.setIcon(major_icon)
self.major_annotation.setCheckable(True)
self.major_annotation.setToolTip(self.tr('Add Major Annotation'))
self.major_annotation.toggled.connect(self.on_major_annotation_toggled)
self.layout.addWidget(self.major_annotation, 0, 1, 1, 1)
self.export_image = QToolButton(self)
self.export_image.setMinimumSize(QSize(24, 24))
export_image_icon = QIcon()
export_image_icon.addPixmap(
GuiUtils.get_icon_pixmap('save_image.png')
)
self.export_image.setIcon(export_image_icon)
self.export_image.setToolTip(self.tr('Save Image...'))
self.export_image.clicked.connect(self.on_image_export_settings)
self.layout.addWidget(self.export_image, 0, 2, 1, 1)
spacer_item = QSpacerItem(
288,
20,
QSizePolicy.Expanding,
QSizePolicy.Minimum
)
self.layout.addItem(spacer_item, 0, 3, 1, 1)
self.label = QLabel(self)
self.label.setText(self.tr('Zoom'))
self.layout.addWidget(self.label, 0, 4, 1, 1)
self.zoom_cbo = QComboBox(self)
self.zoom_cbo.addItem(self.tr('50%'), 50 / 100.0)
self.zoom_cbo.addItem(self.tr('75%'), 75 / 100.0)
self.zoom_cbo.addItem(self.tr('100%'), 100 / 100.0)
self.zoom_cbo.addItem(self.tr('125%'), 125 / 100.0)
self.zoom_cbo.addItem(self.tr('150%'), 150 / 100.0)
self.zoom_cbo.setCurrentIndex(2)
self.zoom_cbo.currentIndexChanged.connect(self.on_zoom_changed)
self.layout.addWidget(self.zoom_cbo, 0, 5, 1, 1)
self.layout.addWidget(self._profile_view, 1, 0, 1, 6)
def minimumSizeHint(self):
return QSize(500, 320)
def sizeHint(self):
return QSize(600, 360)
def image_size(self, resolution):
"""
Computes the image size based on the specified resolution.
:param resolution: Resolution in dpi.
:type resolution: int
:return: Returns a QSize object containing the width and height of
the image.
:rtype: QSize
"""
return self._profile_view.image_size(resolution)
def on_image_export_settings(self):
"""
Slot raised to show the dialog for image export settings.
"""
img_export = ImageExportSettings(
self,
image_path=self._path,
resolution=self._resolution,
background=self._bg_color
)
if img_export.exec_() == QDialog.Accepted:
self._path = img_export.path
self._resolution = img_export.resolution
self._bg_color = img_export.background_color
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
# Attempt to save the image
status, msg = self.save_image_to_file(
self._path,
self._resolution,
self._bg_color
)
QApplication.restoreOverrideCursor()
if status:
QMessageBox.information(
self,
self.tr('Profile Tenure View'),
self.tr('Image successfully saved.')
)
else:
QMessageBox.critical(
self,
self.tr('Profile Tenure View'),
msg
)
def on_major_annotation_toggled(self, state):
"""
Slot raised when the major annotation tool button has been toggled.
:param state: Button state
:type state: bool
"""
if not state and self.scene_mode() != ProfileTenureScene.MoveItem:
self.set_scene_mode(ProfileTenureScene.MoveItem)
if state:
if self.minor_annotation.isChecked():
self.minor_annotation.setChecked(False)
self.set_scene_mode(ProfileTenureScene.InsertMajorAnnotation)
def on_minor_annotation_toggled(self, state):
"""
Slot raised when the minor annotation tool button has been toggled.
:param state: Button state
:type state: bool
"""
if not state and self.scene_mode() != ProfileTenureScene.MoveItem:
self.set_scene_mode(ProfileTenureScene.MoveItem)
if state:
if self.major_annotation.isChecked():
self.major_annotation.setChecked(False)
self.set_scene_mode(ProfileTenureScene.InsertMinorAnnotation)
def on_annotation_inserted(self, item):
"""
Slot raised when an annotation item has been inserted. It unchecks
the correct tool button based on the annotation type.
:param item: Annotation item.
:type item: Annotation
"""
if not isinstance(item, Annotation):
return
anno_type = item.size
if anno_type == Annotation.Minor:
self.minor_annotation.setChecked(False)
elif anno_type == Annotation.Major:
self.major_annotation.setChecked(False)
def on_zoom_changed(self, idx):
"""
Slot raised when the zoom level changes to change the scale of the
view.
:param idx: Item index for the combo.
:type idx: int
"""
if idx == -1:
return
factor = self.zoom_cbo.itemData(idx)
# Compute relative scale
scale = factor / self._current_zoom_factor
self.scale(scale)
self._current_zoom_factor = factor
def scale(self, factor):
"""
Scales the view by the given scale factor.
:param factor: Scale factor
:type factor: float
"""
if factor <= 0:
return
self._profile_view.scale(factor, factor)
def valid(self):
"""
:return: Returns False if the respective party and spatial unit
entities have not been set. Otherwise True.
:rtype: bool
"""
return self._profile_view.valid()
def save_image_to_file(self, path, resolution, background=Qt.white):
"""
Saves the profile tenure view image to file using A4 paper size.
:param path: Absolute path where the image will be saved.
:type path: str
:param resolution: Resolution in dpi. Default is 96.
:type resolution: int
:param background: Background color of the image.
:type background: QColor
:return: Returns True if the operation succeeded, otherwise False. If
False then a corresponding message is returned as well.
:rtype: (bool, str)
"""
return self._profile_view.save_image_to_file(
path,
resolution,
background
)
def set_spatial_unit(self, spatial_unit):
"""
Set the spatial unit entity.
.. deprecated:: 1.7
:param spatial_unit: Entity corresponding to a spatial unit in a
profile's STR relationship.
:type spatial_unit: Entity
"""
self._profile_view.set_spatial_unit(spatial_unit)
def invalidate_spatial_unit(self):
"""
Clears the spatial unit entity.
.. deprecated:: 1.7
"""
self._profile_view.invalidate_spatial_unit()
def add_parties(self, parties):
"""
Add party items to the view.
:param parties: List of party entities.
:type parties: list
"""
for p in parties:
self.add_party_entity(p)
def add_party_entity(self, party):
"""
Adds a party entity to the view. If there is a existing one with the
same name then it will be removed before adding this party.
:param party: Party entity.
:type party: Entity
"""
self._profile_view.add_party_entity(party)
def add_spatial_units(self, spatial_units):
"""
Add spatial unit items to the view.
.. versionadded:: 1.7
:param spatial_units: List of spatial unit entities.
:type spatial_units: list
"""
for sp in spatial_units:
self.add_spatial_unit_entity(sp)
def add_spatial_unit_entity(self, spatial_unit):
"""
Adds a spatial unit entity to the view. If there is a existing one
with the same name then it will be removed before adding this
spatial unit.
.. versionadded:: 1.7
:param spatial_unit: Spatial unit entity.
:type spatial_unit: Entity
"""
self._profile_view.add_spatial_unit_entity(spatial_unit)
def remove_party(self, name):
"""
Removes the party with the specified name from the collection.
:param name: Party name
:return: Returns True if the operation succeeded, otherwise False if
the party with the specified name does not exist in the collection.
:rtype: bool
"""
return self._profile_view.remove_party(name)
def remove_spatial_unit(self, name):
"""
Removes the spatial unit with the specified name from the collection.
.. versionadded:: 1.7
:param name: Spatial unit name.
:return: Returns True if the operation succeeded, otherwise False if
the spatial unit with the specified name does not exist in the
collection.
:rtype: bool
"""
return self._profile_view.remove_spatial_unit(name)
@property
def profile(self):
"""
:return: The profile object being rendered.
:rtype: Profile
"""
return self._profile_view.profile
@profile.setter
def profile(self, profile):
"""
Sets the profile object whose STR view is to rendered.
:param profile: Profile object to be rendered.
:type profile: Profile
"""
self._profile_view.profile = profile
| gltn/stdm | stdm/ui/wizard/profile_tenure_view.py | Python | gpl-2.0 | 67,557 | 0.000148 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM-SVC (Support Vector Classification)
=========================================================
The classification application of the SVM is used below. The
`Iris <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_
dataset has been used for this example
The decision boundaries, are shown with all the points in the training-set.
"""
print __doc__
import sys
import numpy as np
import pylab as pl
from sklearn import svm, datasets
# import some data to play with
#iris = datasets.load_iris()
#X = iris.data[:, :2] # we only take the first two features.
#Y = iris.target
XTmp, Y = datasets.load_svmlight_file("../SVMData.txt")
X = XTmp.toarray()
h = .02 # step size in the mesh
clf = svm.SVC(C=1.0, kernel='linear')
# we create an instance of SVM Classifier and fit the data.
clf.fit(X, Y)
# Plot the decision boundary. For that, we will asign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
pl.figure(1, figsize=(4, 3))
pl.pcolormesh(xx, yy, Z, cmap=pl.cm.Paired)
# Plot also the training points
pl.scatter(X[:, 0], X[:, 1], c=Y, cmap=pl.cm.Paired)
pl.xlabel('Sepal length')
pl.ylabel('Sepal width')
pl.xlim(xx.min(), xx.max())
pl.ylim(yy.min(), yy.max())
pl.xticks(())
pl.yticks(())
pl.show()
| hackliff/domobot | kinect/pySVM/test/plotLinearSVC.py | Python | apache-2.0 | 1,628 | 0.0043 |
"""
Test the pulp.server.db.manage module.
"""
from argparse import Namespace
from cStringIO import StringIO
import os
from mock import call, inPy3k, MagicMock, patch
from mongoengine.queryset import DoesNotExist
from ... import base
from pulp.common.compat import all, json
from pulp.server.db import manage
from pulp.server.db.migrate import models
from pulp.server.db.model import MigrationTracker
import pulp.plugins.types.database as types_db
import migration_packages.a
import migration_packages.b
import migration_packages.duplicate_versions
import migration_packages.platform
import migration_packages.raise_exception
import migration_packages.version_gap
import migration_packages.version_zero
import migration_packages.z
# This is used for mocking
_test_type_json = '''{"types": [{
"id" : "test_type_id",
"display_name" : "Test Type",
"description" : "Test Type",
"unit_key" : ["attribute_1", "attribute_2", "attribute_3"],
"search_indexes" : ["attribute_1", "attribute_3"]
}]}'''
# This is used to mock the entry_point system for discovering migration packages.
def iter_entry_points(name):
class FakeEntryPoint(object):
def __init__(self, migration_package):
self._migration_package = migration_package
def load(self):
return self._migration_package
test_packages = [
migration_packages.a,
migration_packages.duplicate_versions,
migration_packages.raise_exception,
migration_packages.version_gap,
migration_packages.version_zero,
migration_packages.z,
]
if name == models.MIGRATIONS_ENTRY_POINT:
return [FakeEntryPoint(package) for package in test_packages]
return []
# Mock 1.0.0 has a built in mock_open, and one day when we upgrade to 1.0.0 we can use that. In the
# meantime, I've included the example for mock_open as listed in the Mock 0.8 docs, slightly
# modified to allow read_data to just be a str.
# http://www.voidspace.org.uk/python/mock/0.8/examples.html?highlight=open#mocking-open
if inPy3k:
file_spec = [
'_CHUNK_SIZE', '__enter__', '__eq__', '__exit__',
'__format__', '__ge__', '__gt__', '__hash__', '__iter__', '__le__',
'__lt__', '__ne__', '__next__', '__repr__', '__str__',
'_checkClosed', '_checkReadable', '_checkSeekable',
'_checkWritable', 'buffer', 'close', 'closed', 'detach',
'encoding', 'errors', 'fileno', 'flush', 'isatty',
'line_buffering', 'mode', 'name',
'newlines', 'peek', 'raw', 'read', 'read1', 'readable',
'readinto', 'readline', 'readlines', 'seek', 'seekable', 'tell',
'truncate', 'writable', 'write', 'writelines']
else:
file_spec = file
def mock_open(mock=None, read_data=None):
if mock is None:
mock = MagicMock(spec=file_spec)
handle = MagicMock(spec=file_spec)
handle.write.return_value = None
fake_file = StringIO(read_data)
if read_data is None:
if hasattr(handle, '__enter__'):
handle.__enter__.return_value = handle
else:
if hasattr(handle, '__enter__'):
handle.__enter__.return_value = fake_file
handle.read = fake_file.read
mock.return_value = handle
return mock
class MigrationTest(base.PulpServerTests):
def clean(self):
super(MigrationTest, self).clean()
# Make sure each test doesn't have any lingering MigrationTrackers
MigrationTracker.objects().delete()
class TestMigrateDatabase(MigrationTest):
@patch('pulp.server.db.manage.logging.getLogger')
@patch('pulp.server.db.migrate.models.get_migration_packages', auto_spec=True)
def test_migration_removed(self, mock_get_packages, mock_getLogger):
"""
ensure that if a migration raises the MigrationRemovedError, it bubbles up.
"""
mock_package = MagicMock()
mock_package.current_version = 6
mock_package.latest_available_version = 7
mock_package.name = 'foo'
mock_migration = MagicMock()
mock_migration.version = 7
mock_package.unapplied_migrations = [mock_migration]
e = models.MigrationRemovedError('0006', '1.2.0', '1.1.0', 'foo')
mock_package.apply_migration.side_effect = e
mock_get_packages.return_value = [mock_package]
options = MagicMock()
options.dry_run = False
with self.assertRaises(models.MigrationRemovedError) as assertion:
manage.migrate_database(options)
self.assertTrue(assertion.exception is e)
class TestManageDB(MigrationTest):
def clean(self):
super(self.__class__, self).clean()
types_db.clean()
@patch.object(manage, 'PluginManager')
@patch.object(manage, 'model')
def test_ensure_database_indexes(self, mock_model, mock_plugin_manager):
"""
Make sure that the ensure_indexes method is called for all
the appropriate platform models
"""
test_model = MagicMock()
mock_plugin_manager.return_value.unit_models.items.return_value = [('test-unit',
test_model)]
manage.ensure_database_indexes()
test_model.ensure_indexes.assert_called_once_with()
@patch.object(manage, 'PluginManager')
@patch.object(manage, 'model')
def test_ensure_database_indexes_throws_exception(self, mock_model, mock_plugin_manager):
"""
Make sure that the ensure_indexes method is called for all
the appropriate platform models
"""
test_model = MagicMock()
test_model.unit_key_fields = ('1', '2', '3')
unit_key_index = {'fields': test_model.unit_key_fields, 'unique': True}
test_model._meta.__getitem__.side_effect = [[unit_key_index]]
mock_plugin_manager.return_value.unit_models.items.return_value = [('test-unit',
test_model)]
with self.assertRaises(ValueError) as context:
manage.ensure_database_indexes()
self.assertEqual(context.exception.message, "Content unit type 'test-unit' explicitly "
"defines an index for its unit key. This is "
"not allowed because the platform handlesit "
"for you.")
@patch.object(manage, 'ensure_database_indexes')
@patch('logging.config.fileConfig')
@patch('pkg_resources.iter_entry_points', iter_entry_points)
@patch('pulp.server.db.manage.connection.initialize')
@patch('pulp.server.db.manage.factory')
@patch('pulp.server.db.manage.logging.getLogger')
@patch('pulp.server.db.manage.RoleManager.ensure_super_user_role')
@patch('pulp.server.db.manage.managers.UserManager.ensure_admin')
@patch('pulp.server.db.migrate.models.pulp.server.db.migrations',
migration_packages.platform)
@patch('sys.argv', ["pulp-manage-db"])
@patch.object(models.MigrationPackage, 'apply_migration')
def test_admin_is_ensured(self, apply_migration, ensure_admin, ensure_super_user_role,
getLogger, factory, initialize, fileConfig, ensure_db_indexes):
"""
pulp-manage-db is responsible for making sure the admin user and role are in place. This
test makes sure the manager methods that do that are called.
"""
logger = MagicMock()
getLogger.return_value = logger
code = manage.main()
self.assertEqual(code, os.EX_OK)
# Make sure all the right logging happens
expected_messages = ('Ensuring the admin role and user are in place.',
'Admin role and user are in place.')
info_messages = ''.join([mock_call[1][0] for mock_call in logger.info.mock_calls])
for msg in expected_messages:
self.assertTrue(msg in info_messages)
# Make sure the admin user and role creation methods were called. We'll leave it up to other
# tests to make sure they work.
ensure_admin.assert_called_once_with()
ensure_super_user_role.assert_called_once_with()
# Also, make sure the factory was initialized
factory.initialize.assert_called_once_with()
initialize.assert_called_once_with(max_timeout=1)
@patch('logging.config.fileConfig')
@patch('pulp.server.db.manage.logging.getLogger')
@patch('pulp.server.db.manage._auto_manage_db')
@patch('sys.argv', ["pulp-manage-db"])
@patch('pulp.server.db.connection.initialize')
def test_set_connection_timeout(self, mock_initialize, *unused_mocks):
manage.main()
mock_initialize.assert_called_once_with(max_timeout=1)
@patch('sys.stderr')
@patch('os.getuid', return_value=0)
def test_wont_run_as_root(self, mock_getuid, mock_stderr):
ret = manage.main()
# make sure the exit code reflect a usage error
self.assertEqual(ret, os.EX_USAGE)
# make sure a message was written to stderr with appropriate keywords
self.assertTrue(mock_stderr.write.call_count > 0)
self.assertTrue('root' in mock_stderr.write.call_args_list[0][0][0])
self.assertTrue('apache' in mock_stderr.write.call_args_list[0][0][0])
@patch('pulp.server.db.manage.logging.getLogger')
@patch('pkg_resources.iter_entry_points', iter_entry_points)
@patch('pulp.server.db.manage.connection.initialize')
@patch('pulp.server.db.migrate.models.pulp.server.db.migrations',
migration_packages.platform)
@patch('sys.argv', ["pulp-manage-db"])
@patch('logging.config.fileConfig')
def test_current_version_too_high(self, mocked_file_config, initialize, getLogger):
"""
Set the current package version higher than latest available version, then sit back and eat
popcorn.
"""
logger = MagicMock()
getLogger.return_value = logger
# Make sure we start out with a clean slate
self.assertEquals(MigrationTracker.objects().count(), 0)
# Make sure that our mock works and sees the right number of packages
self.assertEquals(len(models.get_migration_packages()), 5)
# Set all versions to ridiculously high values
for package in models.get_migration_packages():
package._migration_tracker.version = 9999999
package._migration_tracker.save()
error_code = manage.main()
self.assertEqual(error_code, os.EX_DATAERR)
# There should have been a critical log about the Exception
expected_messages = (
'The database for migration package unit.server.db.migration_packages.'
'platform is at version 9999999, which is larger than the latest version available, 1.')
critical_messages = ''.join([mock_call[1][0] for mock_call in logger.critical.mock_calls])
for msg in expected_messages:
self.assertTrue(msg in critical_messages)
initialize.assert_called_once_with(max_timeout=1)
@patch('pulp.server.db.manage.logging.getLogger')
@patch.object(models.MigrationPackage, 'apply_migration',
side_effect=models.MigrationPackage.apply_migration, autospec=True)
@patch('pkg_resources.iter_entry_points', iter_entry_points)
@patch('pulp.server.db.manage.connection.initialize')
@patch('pulp.server.db.migrate.models.pulp.server.db.migrations',
migration_packages.platform)
@patch('sys.argv', ["pulp-manage-db"])
@patch('logging.config.fileConfig')
def test_migrate(self, mock_file_config, initialize, mocked_apply_migration, getLogger):
"""
Let's set all the packages to be at version 0, and then check that the migrations get
called in the correct order.
"""
logger = MagicMock()
getLogger.return_value = logger
# Make sure we start out with a clean slate
self.assertEquals(MigrationTracker.objects().count(), 0)
# Make sure that our mock works and sees the right number of packages.
self.assertEquals(len(models.get_migration_packages()), 5)
# Set all versions back to 0
for package in models.get_migration_packages():
package._migration_tracker.version = 0
package._migration_tracker.save()
manage.main()
# There should have been a critical log about the Exception
expected_messages = (
'Applying migration '
'unit.server.db.migration_packages.raise_exception.0002_oh_no failed.\n\n'
'Halting migrations due to a migration failure.',
"Bet you didn\'t see this coming."
)
critical_messages = ''.join([mock_call[1][0] for mock_call in logger.critical.mock_calls])
for msg in expected_messages:
self.assertTrue(msg in critical_messages)
migration_modules_called = [
mock_call[1][1].name for mock_call in mocked_apply_migration.mock_calls]
# Note that none of the migrations that don't meet our criteria show up in this list. Also,
# Note that migration_packages.raise_exception.0003_shouldnt_run doesn't appear
# since migration_packages.raise_exception.0002_oh_no raised an Exception. Note
# also that even though the raise_exception package raised an Exception, we still run all
# the z migrations because we don't want one package to break another.
expected_migration_modules_called = [
'unit.server.db.migration_packages.platform.0001_stuff_and_junk',
'unit.server.db.migration_packages.raise_exception.0001_works_fine',
'unit.server.db.migration_packages.raise_exception.0002_oh_no']
self.assertEquals(migration_modules_called, expected_migration_modules_called)
# Assert that our precious versions have been updated correctly
for package in models.get_migration_packages():
if package.name == 'unit.server.db.migration_packages.platform':
self.assertEqual(package.current_version, package.latest_available_version)
elif package.name == 'unit.server.db.migration_packages.raise_exception':
# The raised Exception should have prevented us from getting past version 1
self.assertEquals(package.current_version, 1)
else:
# raise_exception should cause the migrations to stop
self.assertEqual(package.current_version, 0)
initialize.assert_called_once_with(max_timeout=1)
@patch('sys.stderr')
@patch('sys.stdout')
@patch('pkg_resources.iter_entry_points', iter_entry_points)
@patch('pulp.server.db.migrate.models.pulp.server.db.migrations',
migration_packages.platform)
@patch('sys.argv', ["pulp-manage-db"])
@patch('pulp.server.db.manage._logger')
@patch('pulp.server.db.manage._start_logging')
@patch('pulp.server.db.manage.connection.initialize')
def test_migrate_with_new_packages(self, initialize, start_logging_mock, logger_mock,
mocked_stdout, mocked_stderr):
"""
Adding new packages to a system that doesn't have any trackers should advance
each package to the latest available version, applying no migrate() functions along the
way.
"""
# Make sure we start out with a clean slate
self.assertEquals(MigrationTracker.objects().count(), 0)
# Make sure that our mock works. There are five valid packages.
self.assertEquals(len(models.get_migration_packages()), 5)
manage.main()
for package in models.get_migration_packages():
if 'raise_exception' in str(package):
# The Exception raising package should get to version 3, despite the fact that
# version 2 raises an exception, because new trackers get fast-forwarded.
self.assertEqual(package.current_version, 3)
else:
# All other packages should reach their top versions
self.assertEqual(package.current_version, package.latest_available_version)
initialize.assert_called_once_with(max_timeout=1)
@patch('pulp.plugins.types.database._drop_indexes')
@patch('pulp.plugins.loader.api._generate_plugin_definitions', return_value=[])
@patch('__builtin__.open', mock_open(read_data=_test_type_json))
@patch('os.listdir', return_value=['test_type.json'])
@patch('sys.argv', ["pulp-manage-db"])
@patch('sys.stdout', MagicMock())
@patch('pulp.server.db.manage._start_logging')
@patch('pulp.server.db.manage.connection.initialize')
def test_pulp_manage_db_loads_types(self, initialize, start_logging_mock, listdir_mock,
mock_plugin_definitions, mock_drop_indices):
"""
Test calling pulp-manage-db imports types on a clean types database.
"""
manage.main()
all_collection_names = types_db.all_type_collection_names()
self.assertFalse(mock_drop_indices.called)
self.assertEqual(len(all_collection_names), 1)
self.assertEqual(['units_test_type_id'], all_collection_names)
# Let's make sure we loaded the type definitions correctly
db_type_definitions = types_db.all_type_definitions()
self.assertEquals(len(db_type_definitions), 1)
test_json = json.loads(_test_type_json)
for attribute in ['id', 'display_name', 'description', 'unit_key', 'search_indexes']:
self.assertEquals(test_json['types'][0][attribute], db_type_definitions[0][attribute])
# Now let's ensure that we have the correct indexes
collection = types_db.type_units_collection('test_type_id')
indexes = collection.index_information()
self.assertEqual(indexes['_id_']['key'], [(u'_id', 1)])
# Make sure we have the unique constraint on all three attributes
self.assertEqual(indexes['attribute_1_1_attribute_2_1_attribute_3_1']['unique'], True)
self.assertEqual(indexes['attribute_1_1_attribute_2_1_attribute_3_1']['key'],
[(u'attribute_1', 1), (u'attribute_2', 1), (u'attribute_3', 1)])
# Make sure we indexed attributes 1 and 3
self.assertEqual(indexes['attribute_1_1']['key'], [(u'attribute_1', 1)])
self.assertEqual(indexes['attribute_3_1']['key'], [(u'attribute_3', 1)])
# Make sure we only have the indexes that we've hand inspected here
self.assertEqual(indexes.keys(), [u'_id_', u'attribute_1_1_attribute_2_1_attribute_3_1',
u'attribute_1_1', u'attribute_3_1'])
initialize.assert_called_once_with(max_timeout=1)
@patch('pulp.server.db.manage.logging.getLogger')
@patch.object(models.MigrationPackage, 'apply_migration',
side_effect=models.MigrationPackage.apply_migration, autospec=True)
@patch('pkg_resources.iter_entry_points', iter_entry_points)
@patch('pulp.server.db.manage.connection.initialize')
@patch('pulp.server.db.migrate.models.pulp.server.db.migrations',
migration_packages.platform)
@patch('sys.argv', ["pulp-manage-db", "--test"])
@patch('logging.config.fileConfig')
def test_migrate_with_test_flag(self, mock_file_config, initialize, mocked_apply_migration,
getLogger):
"""
Let's set all the packages to be at version 0, and then check that the migrations get called
in the correct order. We will also set the --test flag and ensure that the migration
versions do not get updated.
"""
logger = MagicMock()
getLogger.return_value = logger
# Make sure we start out with a clean slate
self.assertEquals(MigrationTracker.objects().count(), 0)
# Make sure that our mock works. There are five valid packages.
self.assertEquals(len(models.get_migration_packages()), 5)
# Set all versions back to 0
for package in models.get_migration_packages():
package._migration_tracker.version = 0
package._migration_tracker.save()
manage.main()
# There should have been a critical log about the Exception
expected_messages = (
'Applying migration '
'unit.server.db.migration_packages.raise_exception.0002_oh_no failed.\n\n'
'Halting migrations due to a migration failure.',
'Bet you didn\'t see this coming.'
)
critical_messages = [mock_call[1][0] for mock_call in logger.critical.mock_calls]
for msg in expected_messages:
self.assertTrue(msg in critical_messages)
migration_modules_called = [
mock_call[1][1].name for mock_call in mocked_apply_migration.mock_calls]
# Note that none of the migrations that don't meet our criteria show up in this list. Also,
# Note that migration_packages.raise_exception.0003_shouldnt_run doesn't appear
# since migration_packages.raise_exception.0002_oh_no raised an Exception. Note
# also that even though the raise_exception package raised an Exception, we still run all
# the z migrations because we don't want one package to break another.
expected_migration_modules_called = [
'unit.server.db.migration_packages.platform.0001_stuff_and_junk',
'unit.server.db.migration_packages.raise_exception.0001_works_fine',
'unit.server.db.migration_packages.raise_exception.0002_oh_no']
self.assertEquals(migration_modules_called, expected_migration_modules_called)
# Assert that our precious versions have not been updated, since we have the --test flag
for package in models.get_migration_packages():
self.assertEqual(package.current_version, 0)
initialize.assert_called_once_with(max_timeout=1)
@patch('pulp.server.db.manage.logging.getLogger')
@patch.object(models.MigrationPackage, 'apply_migration',
side_effect=models.MigrationPackage.apply_migration, autospec=True)
@patch('pkg_resources.iter_entry_points', iter_entry_points)
@patch('pulp.server.db.manage.connection.initialize')
@patch('pulp.server.db.migrate.models.pulp.server.db.migrations',
migration_packages.platform)
@patch('sys.argv', ["pulp-manage-db", "--dry-run"])
@patch('logging.config.fileConfig')
def test_migrate_with_dry_run_flag(self, mock_file_config, initialize, mocked_apply_migration,
getLogger):
"""
Test that when a dry run is performed, no migrations actually occur.
"""
logger = MagicMock()
getLogger.return_value = logger
# Make sure we start out with a clean slate
self.assertEquals(MigrationTracker.objects().count(), 0)
# Make sure that our mock works. There are three valid packages.
self.assertEquals(len(models.get_migration_packages()), 5)
# Set all versions back to 0
for package in models.get_migration_packages():
package._migration_tracker.version = 0
package._migration_tracker.save()
result = manage.main()
# Test that none of the mock objects were actually called
migration_modules_called = [
mock_call[1][1].name for mock_call in mocked_apply_migration.mock_calls]
self.assertEquals(0, len(migration_modules_called))
self.assertEquals(1, result)
for package in models.get_migration_packages():
self.assertEqual(package.current_version, 0)
initialize.assert_called_once_with(max_timeout=1)
@patch('pulp.server.db.manage.connection.initialize')
@patch('pulp.server.db.manage.RoleManager.ensure_super_user_role')
@patch('pulp.server.db.manage.managers.UserManager.ensure_admin')
@patch('pulp.server.db.manage.logging.getLogger')
@patch.object(models.MigrationPackage, 'apply_migration',
side_effect=models.MigrationPackage.apply_migration, autospec=True)
@patch('pkg_resources.iter_entry_points', iter_entry_points)
@patch('pulp.server.db.migrate.models.pulp.server.db.migrations',
migration_packages.platform)
@patch('sys.argv', ["pulp-manage-db", "--dry-run"])
@patch('logging.config.fileConfig')
def test_admin_creation_dry_run(self, mock_file_config, mocked_apply_migration, getLogger,
mock_ensure_admin, mock_ensure_super_role, initialize):
logger = MagicMock()
getLogger.return_value = logger
exit_code = manage.main()
self.assertEqual(exit_code, 1)
# Make sure the admin user and role creation methods were not called
self.assertEquals(0, mock_ensure_admin.call_count)
self.assertEquals(0, mock_ensure_super_role.call_count)
initialize.assert_called_once_with(max_timeout=1)
@patch.object(manage, 'ensure_database_indexes')
@patch('pulp.server.db.manage.logging.getLogger')
@patch.object(models.MigrationPackage, 'apply_migration',
side_effect=models.MigrationPackage.apply_migration, autospec=True)
@patch('pkg_resources.iter_entry_points')
@patch('pulp.server.db.manage.connection.initialize')
@patch('pulp.server.db.migrate.models.pulp.server.db.migrations',
migration_packages.platform)
@patch('pulp.server.db.manage.parse_args', autospec=True)
@patch('logging.config.fileConfig')
def test_dry_run_no_changes(self, mock_file_config, mock_parse_args, initialize,
mocked_apply_migration, mock_entry, getLogger, mock_ensure_indexes):
logger = MagicMock()
getLogger.return_value = logger
mock_args = Namespace(dry_run=True, test=False)
mock_parse_args.return_value = mock_args
# Test that when dry run is on, it returns 1 if migrations remain
exit_code = manage.main()
self.assertEqual(exit_code, 1)
self.assertFalse(mock_ensure_indexes.called)
initialize.assert_called_once_with(max_timeout=1)
# Actually apply the migrations
mock_args.dry_run = False
mock_ensure_indexes.reset_mock()
initialize.reset_mock()
exit_code = manage.main()
self.assertEqual(exit_code, 0)
self.assertTrue(mock_ensure_indexes.called)
initialize.assert_called_once_with(max_timeout=1)
# Perform another dry run and check the return value is now 0
mock_args.dry_run = True
mock_ensure_indexes.reset_mock()
initialize.reset_mock()
exit_code = manage.main()
self.assertEquals(exit_code, 0)
self.assertFalse(mock_ensure_indexes.called)
initialize.assert_called_once_with(max_timeout=1)
@patch('pulp.server.db.manage.logging.getLogger')
@patch('pulp.server.db.manage.connection.initialize')
@patch('pulp.server.db.manage.parse_args', autospec=True)
@patch.object(manage, '_auto_manage_db')
def test_migration_removed(self, mock_auto_manage_db, mock_parse_args, mock_init,
mock_getLogger):
e = models.MigrationRemovedError('0002', '1.2.0', '1.1.0', 'foo')
mock_auto_manage_db.side_effect = e
ret = manage.main()
self.assertEqual(ret, os.EX_SOFTWARE)
class TestMigrationModule(MigrationTest):
def test___cmp__(self):
mm_2 = models.MigrationModule('unit.server.db.migration_packages.z.0002_test')
mm_3 = models.MigrationModule('unit.server.db.migration_packages.z.0003_test')
self.assertEquals(cmp(mm_2, mm_3), -1)
def test___init__(self):
mm = models.MigrationModule('unit.server.db.migration_packages.z.0002_test')
self.assertEquals(mm._module.__name__,
'unit.server.db.migration_packages.z.0002_test')
self.assertEquals(mm.version, 2)
# It should have a migrate attribute that is callable
self.assertTrue(hasattr(mm.migrate, '__call__'))
def test___repr__(self):
mm = models.MigrationModule('unit.server.db.migration_packages.z.0003_test')
self.assertEqual(repr(mm), 'unit.server.db.migration_packages.z.0003_test')
def test__get_version(self):
mm = models.MigrationModule('unit.server.db.migration_packages.z.0003_test')
self.assertEquals(mm._get_version(), 3)
def test_name(self):
mm = models.MigrationModule('unit.server.db.migration_packages.z.0003_test')
self.assertEqual(mm.name, 'unit.server.db.migration_packages.z.0003_test')
class TestMigrationPackage(MigrationTest):
def test___init__(self):
mp = models.MigrationPackage(migration_packages.z)
self.assertEquals(mp._package.__name__, 'unit.server.db.migration_packages.z')
self.assertEquals(mp._migration_tracker.name, 'unit.server.db.migration_packages.z')
# By default, MigrationPackages should start at version -1
self.assertEquals(mp._migration_tracker.version, -1)
@patch('pulp.server.db.migrate.models.MigrationTracker')
def test__init__2(self, mock_tracker):
"""
Test package does not exist.
"""
mock_tracker.objects.return_value.get.side_effect = DoesNotExist()
models.MigrationPackage(migration_packages.b)
self.assertEqual(mock_tracker.return_value.save.call_count, 1)
@patch('pulp.server.db.migrate.models.MigrationTracker')
def test__init__3(self, mock_tracker):
"""
Test package exists.
"""
mock_some = MagicMock()
mock_tracker.objects.return_value.get = mock_some
models.MigrationPackage(migration_packages.a)
self.assertEqual(mock_tracker.return_value.save.call_count, 0)
def test_apply_migration(self):
mp = models.MigrationPackage(migration_packages.z)
# Let's fake the migration version being at 2 instead of 3
mp._migration_tracker.version = 2
mp._migration_tracker.save()
# Now, let's apply version 3
mm_v3 = mp.unapplied_migrations[-1]
self.assertEqual(mm_v3.version, 3)
# Let's change the migrate() function to one that tracks that it gets called.
mm_v3.migrate = MagicMock(name='migrate')
self.assertEquals(mm_v3.migrate.called, False)
# Now try to run the migration and assert that it gets called
mp.apply_migration(mm_v3)
self.assertEquals(mm_v3.migrate.called, True)
# Now the mp should be at v3
self.assertEqual(mp.current_version, 3)
def test_available_versions(self):
mp = models.MigrationPackage(migration_packages.z)
self.assertEquals(mp.available_versions, [1, 2, 3])
def test_current_version(self):
mp = models.MigrationPackage(migration_packages.z)
# By default, a MigrationPackage will be at version -1
self.assertEqual(mp.current_version, -1)
# Now let's change the version to 4 and see what happens
mp._migration_tracker.version = 4
mp._migration_tracker.save()
# Now we should be able to reinstantiate this mammajamma and see that the version is right
mp = models.MigrationPackage(migration_packages.z)
self.assertEqual(mp.current_version, 4)
def test_duplicate_versions(self):
error_message = 'There are two migration modules that share version 2 in ' +\
'unit.server.db.migration_packages.duplicate_versions.'
try:
models.MigrationPackage(migration_packages.duplicate_versions)
self.fail('The MigrationPackage.DuplicateVersions exception should have been raised, '
'but was not raised.')
except models.MigrationPackage.DuplicateVersions, e:
self.assertEquals(str(e), error_message)
def test_latest_available_version(self):
# This one has no migrations, so the latest is 0
self.assertEqual(
models.MigrationPackage(migration_packages.a).latest_available_version, 0)
self.assertEqual(models.MigrationPackage(
migration_packages.platform).latest_available_version, 1)
self.assertEqual(
models.MigrationPackage(migration_packages.z).latest_available_version, 3)
def test_migrations(self):
migration_package = models.MigrationPackage(migration_packages.z)
migrations = migration_package.migrations
self.assertEqual(len(migrations), 3)
self.assertTrue(all([isinstance(migration, models.MigrationModule)
for migration in migrations]))
# Make sure their versions are set and sorted appropriately
self.assertEqual([1, 2, 3], [migration.version for migration in migrations])
# Check the names
self.assertEqual(['unit.server.db.migration_packages.z.0001_test',
'unit.server.db.migration_packages.z.0002_test',
'unit.server.db.migration_packages.z.0003_test'],
[migration._module.__name__ for migration in migrations])
def test_name(self):
mp = models.MigrationPackage(migration_packages.z)
self.assertEqual(mp.name, 'unit.server.db.migration_packages.z')
@patch('pulp.server.db.migrate.models._logger.debug')
def test_nonconforming_modules(self, log_mock):
# The z package has a module called doesnt_conform_to_naming_convention.py. This shouldn't
# count as a migration module, but it also should not interfere with the existing migration
# modules, and the debug log should mention that the file was found but was not found to be
# a migration module. The z package also has a module called 0004_doesnt_have_migrate.py.
# Since it doesn't have a migrate function, it should just be logged and things should keep
# going as usual.
mp = models.MigrationPackage(migration_packages.z)
migrations = mp.migrations
self.assertEqual(len(migrations), 3)
self.assertTrue(all([isinstance(migration, models.MigrationModule)
for migration in migrations]))
# Make sure their versions are set and sorted appropriately
self.assertEqual([1, 2, 3], [migration.version for migration in migrations])
# Check the names
self.assertEqual(['unit.server.db.migration_packages.z.0001_test',
'unit.server.db.migration_packages.z.0002_test',
'unit.server.db.migration_packages.z.0003_test'],
[migration.name for migration in migrations])
# Now let's assert that the non-conforming dealios were logged
# They actually get logged twice each, once for when we initialized the MP, and the other
# when we retrieved the migrations
log_mock.assert_has_calls([
call('The module unit.server.db.migration_packages.z.0004_doesnt_have_migrate '
'doesn\'t have a migrate function. It will be ignored.'),
call('The module '
'unit.server.db.migration_packages.z.doesnt_conform_to_naming_convention '
'doesn\'t conform to the migration package naming conventions. It will be '
'ignored.'),
call('The module unit.server.db.migration_packages.z.0004_doesnt_have_migrate '
'doesn\'t have a migrate function. It will be ignored.'),
call('The module '
'unit.server.db.migration_packages.z.doesnt_conform_to_naming_convention '
'doesn\'t conform to the migration package naming conventions. It will be '
'ignored.')])
def test_unapplied_migrations(self):
mp = models.MigrationPackage(migration_packages.z)
# Drop the version to 1, which should make this method return two migrations
mp._migration_tracker.version = 1
mp._migration_tracker.save()
unapplied = mp.unapplied_migrations
self.assertEqual(len(unapplied), 2)
self.assertEqual([m.version for m in unapplied], [2, 3])
self.assertEqual(
[m._module.__name__ for m in unapplied],
['unit.server.db.migration_packages.z.0002_test',
'unit.server.db.migration_packages.z.0003_test'])
def test_migration_version_cant_be_zero(self):
"""
Make sure that we reserve migration zero.
"""
error_message = (
'0 is a reserved migration version number, but the module '
'unit.server.db.migration_packages.version_zero.0000_not_allowed has been '
'assigned that version.')
try:
models.MigrationPackage(migration_packages.version_zero)
self.fail('The MigrationPackage.DuplicateVersions exception should have been raised, '
'but was not raised.')
except models.MigrationPackage.DuplicateVersions, e:
self.assertEquals(str(e), error_message)
def test_migration_version_gap(self):
"""
Make sure that we allow migrations to have version gaps. Previously version gaps were not
allowed, and this call would have caused a validation failure. This test makes sure no such
failure happens.
"""
models.MigrationPackage(migration_packages.version_gap)
@patch('pulp.server.db.migrate.models.pulp.server.db.migrations',
migration_packages.platform)
def test___cmp__(self):
mp_1 = models.MigrationPackage(migration_packages.a)
mp_2 = models.MigrationPackage(migration_packages.platform)
mp_3 = models.MigrationPackage(migration_packages.z)
# platform should always sort first, and they should otherwise be alphabeticalness
self.assertEqual(cmp(mp_1, mp_1), 0)
self.assertEqual(cmp(mp_1, mp_2), 1)
self.assertEqual(cmp(mp_1, mp_3), -1)
self.assertEqual(cmp(mp_2, mp_1), -1)
self.assertEqual(cmp(mp_2, mp_2), 0)
self.assertEqual(cmp(mp_2, mp_3), -1)
self.assertEqual(cmp(mp_3, mp_1), 1)
self.assertEqual(cmp(mp_3, mp_2), 1)
self.assertEqual(cmp(mp_3, mp_3), 0)
def test___repr__(self):
mp = models.MigrationPackage(migration_packages.z)
self.assertEqual(repr(mp), 'unit.server.db.migration_packages.z')
def test___str__(self):
mp = models.MigrationPackage(migration_packages.z)
self.assertEqual(str(mp), 'unit.server.db.migration_packages.z')
class TestMigrationUtils(MigrationTest):
def test__import_all_the_way(self):
"""
Make sure that models._import_all_the_way() gives back the most specific module.
"""
module = models._import_all_the_way('unit.server.db.migration_packages.z.0001_test')
self.assertEqual(module.__name__, 'unit.server.db.migration_packages.z.0001_test')
@patch('pkg_resources.iter_entry_points', iter_entry_points)
@patch('pulp.server.db.migrate.models.pulp.server.db.migrations',
migration_packages.platform)
@patch('pulp.server.db.migrate.models._logger.error')
def test_get_migration_packages(self, log_mock):
"""
Ensure that pulp.server.db.migrate.models.get_migration_packages functions correctly.
"""
packages = models.get_migration_packages()
self.assertEquals(len(packages), 5)
self.assertTrue(
all([isinstance(package, models.MigrationPackage) for package in packages]))
# Make sure that the packages are sorted correctly, with platform first
self.assertEquals(packages[0].name, 'unit.server.db.migration_packages.platform')
self.assertEquals(packages[1].name, 'unit.server.db.migration_packages.a')
self.assertEquals(packages[2].name,
'unit.server.db.migration_packages.raise_exception')
self.assertEquals(packages[3].name, 'unit.server.db.migration_packages.version_gap')
self.assertEquals(packages[4].name, 'unit.server.db.migration_packages.z')
# Assert that we logged the duplicate version exception and the version gap exception
expected_log_calls = [call('There are two migration modules that share version 2 in '
'unit.server.db.migration_packages.duplicate_versions.')]
log_mock.assert_has_calls(expected_log_calls)
| ulif/pulp | server/test/unit/server/db/test_manage.py | Python | gpl-2.0 | 40,647 | 0.003174 |
from flask import Flask
from flask import request
from flask import jsonify
from flask import abort
import time
app = Flask(__name__)
@app.route('/api/1', defaults={'path': ''}, methods=['GET', 'POST'])
@app.route('/api/1/<path:path>', methods=['GET', 'POST'])
def api1(path):
time.sleep(20)
return jsonify({
'userinfo': {
'username': 'zhouyang',
'pk': 10,
'birthday': '2010101'
}
})
@app.route('/api/2', defaults={'path': ''}, methods=['GET', 'POST'])
@app.route('/api/2/<path:path>', methods=['GET', 'POST'])
def api2(path):
return abort(400, 'you did a bad request')
@app.route('/api/3', defaults={'path': ''}, methods=['GET', 'POST'])
@app.route('/api/3/<path:path>', methods=['GET', 'POST'])
def api3(path):
userId = request.args.get('userId')
return jsonify({
'userinfo': {
'userId': userId
}
})
@app.route('/usercenter/userinfo', methods=['GET', 'POST'])
def api4():
return jsonify({
'userinfo': {
'username': 'zhouyang'
}
})
if __name__ == '__main__':
app.run(port=1330, host='0.0.0.0')
| jie/microgate | test_server.py | Python | mit | 1,151 | 0.001738 |
"""
Simple utility code for animations.
"""
# Author: Prabhu Ramachandran <prabhu at aerodotiitbdotacdotin>
# Copyright (c) 2009, Enthought, Inc.
# License: BSD Style.
import types
from functools import wraps
try:
from decorator import decorator
HAS_DECORATOR = True
except ImportError:
HAS_DECORATOR = False
from pyface.timer.api import Timer
from traits.api import HasTraits, Button, Instance, Range
from traitsui.api import View, Group, Item
###############################################################################
# `Animator` class.
###############################################################################
class Animator(HasTraits):
""" Convenience class to manage a timer and present a convenient
UI. This is based on the code in `tvtk.tools.visual`.
Here is a simple example of using this class::
>>> from mayavi import mlab
>>> def anim():
... f = mlab.gcf()
... while 1:
... f.scene.camera.azimuth(10)
... f.scene.render()
... yield
...
>>> anim = anim()
>>> t = Animator(500, anim.next)
>>> t.edit_traits()
This makes it very easy to animate your visualizations and control
it from a simple UI.
**Notes**
If you want to modify the data plotted by an `mlab` function call,
please refer to the section on: :ref:`mlab-animating-data`
"""
########################################
# Traits.
start = Button('Start Animation')
stop = Button('Stop Animation')
delay = Range(10, 100000, 500,
desc='frequency with which timer is called')
# The internal timer we manage.
timer = Instance(Timer)
######################################################################
# User interface view
traits_view = View(Group(Item('start'),
Item('stop'),
show_labels=False),
Item('_'),
Item(name='delay'),
title='Animation Controller',
buttons=['OK'])
######################################################################
# Initialize object
def __init__(self, millisec, callable, *args, **kwargs):
"""Constructor.
**Parameters**
:millisec: int specifying the delay in milliseconds
between calls to the callable.
:callable: callable function to call after the specified
delay.
:\*args: optional arguments to be passed to the callable.
:\*\*kwargs: optional keyword arguments to be passed to the callable.
"""
HasTraits.__init__(self)
self.delay = millisec
self.ui = None
self.timer = Timer(millisec, callable, *args, **kwargs)
######################################################################
# `Animator` protocol.
######################################################################
def show(self):
"""Show the animator UI.
"""
self.ui = self.edit_traits()
def close(self):
"""Close the animator UI.
"""
if self.ui is not None:
self.ui.dispose()
######################################################################
# Non-public methods, Event handlers
def _start_fired(self):
self.timer.Start(self.delay)
def _stop_fired(self):
self.timer.Stop()
def _delay_changed(self, value):
t = self.timer
if t is None:
return
if t.IsRunning():
t.Stop()
t.Start(value)
###############################################################################
# Decorators.
def animate(func=None, delay=500, ui=True):
""" A convenient decorator to animate a generator that performs an
animation. The `delay` parameter specifies the delay (in
milliseconds) between calls to the decorated function. If `ui` is
True, then a simple UI for the animator is also popped up. The
decorated function will return the `Animator` instance used and a
user may call its `Stop` method to stop the animation.
If an ordinary function is decorated a `TypeError` will be raised.
**Parameters**
:delay: int specifying the time interval in milliseconds between
calls to the function.
:ui: bool specifying if a UI controlling the animation is to be
provided.
**Returns**
The decorated function returns an `Animator` instance.
**Examples**
Here is the example provided in the Animator class documentation::
>>> from mayavi import mlab
>>> @mlab.animate
... def anim():
... f = mlab.gcf()
... while 1:
... f.scene.camera.azimuth(10)
... f.scene.render()
... yield
...
>>> a = anim() # Starts the animation.
For more specialized use you can pass arguments to the decorator::
>>> from mayavi import mlab
>>> @mlab.animate(delay=500, ui=False)
... def anim():
... f = mlab.gcf()
... while 1:
... f.scene.camera.azimuth(10)
... f.scene.render()
... yield
...
>>> a = anim() # Starts the animation without a UI.
**Notes**
If you want to modify the data plotted by an `mlab` function call,
please refer to the section on: :ref:`mlab-animating-data`.
"""
class Wrapper(object):
# The wrapper which calls the decorated function.
def __init__(self, function):
self.func = function
self.ui = ui
self.delay = delay
def __call__(self, *args, **kw):
if isinstance(self.func, types.GeneratorType):
f = self.func
else:
f = self.func(*args, **kw)
if isinstance(f, types.GeneratorType):
a = Animator(self.delay, f.next)
if self.ui:
a.show()
return a
else:
msg = 'The function "%s" must be a generator '\
'(use yield)!' % (self.func.__name__)
raise TypeError(msg)
def decorator_call(self, func, *args, **kw):
return self(*args, **kw)
def _wrapper(function):
# Needed to create the Wrapper in the right scope.
if HAS_DECORATOR:
# The decorator calls a callable with (func, *args, **kw) signature
return decorator(Wrapper(function).decorator_call, function)
else:
return wraps(function)(Wrapper(function))
if func is None:
return _wrapper
else:
return _wrapper(func)
| liulion/mayavi | mayavi/tools/animator.py | Python | bsd-3-clause | 7,087 | 0.000564 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'NewMangaDialog.ui'
#
# Created: Wed Jul 24 19:06:21 2013
# by: PyQt4 UI code generator 4.10.2
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_NewMangaDialog(object):
def setupUi(self, NewMangaDialog):
NewMangaDialog.setObjectName(_fromUtf8("NewMangaDialog"))
NewMangaDialog.resize(231, 78)
self.gridLayout = QtGui.QGridLayout(NewMangaDialog)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.splitter = QtGui.QSplitter(NewMangaDialog)
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.splitter.setObjectName(_fromUtf8("splitter"))
self.label = QtGui.QLabel(self.splitter)
self.label.setObjectName(_fromUtf8("label"))
self.mangaLineEdit = QtGui.QLineEdit(self.splitter)
self.mangaLineEdit.setObjectName(_fromUtf8("mangaLineEdit"))
self.gridLayout.addWidget(self.splitter, 0, 0, 1, 1)
self.buttonBox = QtGui.QDialogButtonBox(NewMangaDialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.gridLayout.addWidget(self.buttonBox, 1, 0, 1, 1)
self.retranslateUi(NewMangaDialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), NewMangaDialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), NewMangaDialog.reject)
QtCore.QMetaObject.connectSlotsByName(NewMangaDialog)
def retranslateUi(self, NewMangaDialog):
NewMangaDialog.setWindowTitle(_translate("NewMangaDialog", "Dialog", None))
self.label.setText(_translate("NewMangaDialog", "Manga:", None))
| ilbay/PyMangaDownloader | Ui_NewMangaDialog.py | Python | gpl-2.0 | 2,334 | 0.003428 |
"""engine.SCons.Platform.darwin
Platform-specific initialization for Mac OS X systems.
There normally shouldn't be any need to import this module directly. It
will usually be imported through the generic SCons.Platform.Platform()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Platform/darwin.py 5023 2010/06/14 22:05:46 scons"
import posix
def generate(env):
posix.generate(env)
env['SHLIBSUFFIX'] = '.dylib'
env['ENV']['PATH'] = env['ENV']['PATH'] + ':/sw/bin'
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| faarwa/EngSocP5 | zxing/cpp/scons/scons-local-2.0.0.final.0/SCons/Platform/darwin.py | Python | gpl-3.0 | 1,758 | 0.001706 |
# -*- coding: utf-8 -*-
# This file is part of https://github.com/26fe/jsonstat.py
# Copyright (C) 2016-2021 gf <gf@26fe.com>
# See LICENSE file
# stdlib
import time
import os
import hashlib
# packages
import requests
# jsonstat
from jsonstat.exceptions import JsonStatException
class Downloader:
"""Helper class to download json stat files.
It has a very simple cache mechanism
"""
def __init__(self, cache_dir="./data", time_to_live=None):
"""initialize downloader
:param cache_dir: directory where to store downloaded files, if cache_dir is None files are not stored
:param time_to_live: how many seconds to store file on disk, None is infinity, 0 for not to store
"""
if cache_dir is not None:
self.__cache_dir = os.path.abspath(cache_dir)
else:
self.__cache_dir = None
self.__time_to_live = time_to_live
self.__session = requests.session()
def cache_dir(self):
return self.__cache_dir
def download(self, url, filename=None, time_to_live=None):
"""Download url from internet.
Store the downloaded content into <cache_dir>/file.
If <cache_dir>/file exists, it returns content from disk
:param url: page to be downloaded
:param filename: filename where to store the content of url, None if we want not store
:param time_to_live: how many seconds to store file on disk,
None use default time_to_live,
0 don't use cached version if any
:returns: the content of url (str type)
"""
pathname = self.__build_pathname(filename, url)
# note: html must be a str type not byte type
if time_to_live == 0 or not self.__is_cached(pathname):
response = self.__session.get(url)
response.raise_for_status()
html = response.text
self.__write_page_to_cache(pathname, html)
else:
html = self.__read_page_from_file(pathname)
return html
def __build_pathname(self, filename, url):
if self.__cache_dir is None:
return None
if filename is None:
filename = hashlib.md5(url.encode('utf-8')).hexdigest()
pathname = os.path.join(self.__cache_dir, filename)
return pathname
def __is_cached(self, pathname):
"""check if pathname exists
:param pathname:
:returns: True if the file can be retrieved from the disk (cache)
"""
if pathname is None:
return False
if not os.path.exists(pathname):
return False
if self.__time_to_live is None:
return True
cur = time.time()
mtime = os.stat(pathname).st_mtime
# print("last modified: %s" % time.ctime(mtime))
return cur - mtime < self.__time_to_live
def __write_page_to_cache(self, pathname, content):
"""write content to pathname
:param pathname:
:param content:
"""
if pathname is None:
return
# create cache directory only the fist time it is needed
if not os.path.exists(self.__cache_dir):
os.makedirs(self.__cache_dir)
if not os.path.isdir(self.__cache_dir):
msg = "cache_dir '{}' is not a directory".format(self.__cache_dir)
raise JsonStatException(msg)
# note:
# in python 3 file must be open without b (binary) option to write string
# otherwise the following error will be generated
# TypeError: a bytes-like object is required, not 'str'
with open(pathname, 'w') as f:
f.write(content)
@staticmethod
def __read_page_from_file(pathname):
"""it reads content from pathname
:param pathname:
"""
with open(pathname, 'r') as f:
content = f.read()
return content
| 26fe/jsonstat.py | jsonstat/downloader.py | Python | lgpl-3.0 | 3,966 | 0.001261 |
# Use default debug configuration or local configuration
try:
from .config_local import *
except ImportError:
from .config_default import *
| steelart/ask-navalny | django-backend/config/config.py | Python | mit | 148 | 0 |
#-------------------------------------------------------------------------------
# Name: ModSlaveSettingsRTU
# Purpose:
#
# Author: ElBar
#
# Created: 17/04/2012
# Copyright: (c) ElBar 2012
# Licence: <your licence>
#-------------------------------------------------------------------------------
#!/usr/bin/env python
from PyQt4 import QtGui,QtCore
from Ui_settingsModbusRTU import Ui_SettingsModbusRTU
import Utils
#add logging capability
import logging
#-------------------------------------------------------------------------------
class ModSlaveSettingsRTUWindow(QtGui.QDialog):
""" Class wrapper for RTU settings ui """
def __init__(self):
super(ModSlaveSettingsRTUWindow,self).__init__()
#init value
self.rtu_port = 1
self.baud_rate = 9600
self.byte_size = 8
self.parity = 'None'
self.stop_bits = '1'
self._logger = logging.getLogger("modbus_tk")
self.setupUI()
def setupUI(self):
#create window from ui
self.ui=Ui_SettingsModbusRTU()
self.ui.setupUi(self)
#set init values
self._set_values()
#signals-slots
self.accepted.connect(self._OK_pressed)
self.rejected.connect(self._cancel_pressed)
def _set_values(self):
"""set param values to ui"""
self._logger.info("Set param values to UI")
self.ui.cmbPort.setEditText(str(self.rtu_port))
self.ui.cmbBaud.setCurrentIndex(self.ui.cmbBaud.findText(str(self.baud_rate)))
self.ui.cmbDataBits.setCurrentIndex(self.ui.cmbDataBits.findText(str(self.byte_size)))
self.ui.cmbParity.setCurrentIndex(self.ui.cmbParity.findText(self.parity))
self.ui.cmbStopBits.setCurrentIndex(self.ui.cmbStopBits.findText(str(self.stop_bits)))
def _get_values(self):
"""get param values from ui"""
self._logger.info("Get param values from UI")
self.rtu_port = int(self.ui.cmbPort.currentText())
self.baud_rate = self.ui.cmbBaud.currentText()
self.byte_size = self.ui.cmbDataBits.currentText()
self.parity = self.ui.cmbParity.currentText()
self.stop_bits = self.ui.cmbStopBits.currentText()
def _OK_pressed(self):
"""new values are accepted"""
port = str(self.ui.cmbPort.currentText())
if (port.isdigit() and int(port) >= 1 and int(port) <= 16):#port must be an integer
self._get_values()
else:
self.rtu_port = 1
self._set_values()
self._get_values()
self._logger.error("Port must be an integer between 1 and 16")
Utils.errorMessageBox("Port must be an integer between 1 and 16")
def _cancel_pressed(self):
"""new values are rejected"""
self._set_values()
def showEvent(self,QShowEvent):
"""set values for controls"""
self._set_values()
#------------------------------------------------------------------------------- | zhanglongqi/pymodslave | ModSlaveSettingsRTU.py | Python | gpl-2.0 | 2,993 | 0.009021 |
"""
Helper for views.py
"""
from base_handler import base_handler
import traceback
import app.model
from flask import g, render_template
class single_access_handler(base_handler):
def __init__(self):
"""
Manages all the operations that are involved with a single port association with EPGs
(for virtual port channel association the vpc_access_handler is used)
:return:
"""
try:
self.cobra_apic_object = single_access_handler.init_connections()
self.exception = None
except Exception as e:
self.exception = e
print traceback.print_exc()
def get_create_single_access_networks(self, obj_response, form_values):
# Check if there has been connection errors
if self.exception is not None:
obj_response.script("create_notification('Connection problem', '" + str(self.exception).replace("'", "").
replace('"', '').replace("\n", "")[0:100] + "', 'danger', 0)")
return
# Load the sel_create_single_access_network select with the networks within the selected group
try:
network_ap = self.cobra_apic_object.get_nca_ap(form_values['sel_create_single_access_group'])
item_list = []
if network_ap is not None:
networks = self.cobra_apic_object.get_epg_by_ap(str(network_ap.dn))
for network in networks:
# Creates a dynamic object
network_do = type('network_do', (object,), {})
network_do.key = str(network.dn)
network_do.text = network.name
item_list.append(network_do)
html_response = render_template('select_partial.html', item_list=item_list)
obj_response.html("#sel_create_single_access_network", html_response)
except Exception as e:
print traceback.print_exc()
obj_response.script("create_notification('Can not retrieve networks', '" + str(e).replace("'", "").
replace('"', '').replace("\n", "")[0:100] + "', 'danger', 0)")
finally:
g.db.close()
obj_response.html("#create_single_access_response", '')
def get_create_single_access_ports(self, obj_response, form_values):
# Check if there has been connection errors
if self.exception is not None:
obj_response.script("create_notification('Connection problem', '" + str(self.exception).replace("'", "").
replace('"', '').replace("\n", "")[0:100] + "', 'danger', 0)")
return
# Load the sel_create_single_access_port select with the available ports within the selected leaf
try:
ports = self.cobra_apic_object.get_available_ports(form_values['sel_create_single_access_leaf'])
item_list = []
for i in range(0, len(ports[0])):
# Creates a dynamic object
port_do = type('port_do', (object,), {})
port_do.key = ports[0][i]
port_do.text = ports[1][i]
item_list.append(port_do)
html_response = render_template('select_partial.html', item_list=item_list)
obj_response.html("#sel_create_single_access_port", html_response)
except Exception as e:
print traceback.print_exc()
obj_response.script("create_notification('Can not retrieve ports', '" + str(e).replace("'", "").
replace('"', '').replace("\n", "")[0:100] + "', 'danger', 0)")
finally:
g.db.close()
obj_response.html("#create_single_access_response", '')
def create_single_access(self, obj_response, form_values):
# Check if there has been connection errors
if self.exception is not None:
obj_response.script("create_notification('Connection problem', '" + str(self.exception).replace("'", "").
replace('"', '').replace("\n", "")[0:100] + "', 'danger', 0)")
return
# Creates switch profiles, interface profiles, policy groups and static bindings to associate a port
# to an EPG
try:
port_id = form_values['sel_create_single_access_port'].split('[')[-1][:-1].replace('/','-')
switch_id = form_values['sel_create_single_access_leaf'].split('/')[-1]
if form_values['create_port_access_type'] == 'single_vlan':
network_o = app.model.network.select().where(app.model.network.epg_dn ==
form_values['sel_create_single_access_network'])
if len(network_o) > 0:
self.cobra_apic_object.create_single_access(network_o[0].epg_dn,
form_values['sel_create_single_access_leaf'],
form_values['sel_create_single_access_port'],
network_o[0].encapsulation,
'migration-tool',
'if_policy_' + switch_id + '_' + port_id,
'single_access_' + switch_id + '_' + port_id)
obj_response.script("create_notification('Assigned', '', 'success', 5000)")
else:
obj_response.script(
"create_notification('Network not found in local database', '', 'danger', 0)")
elif form_values['create_port_access_type'] == 'vlan_profile':
network_profilexnetworks = app.model.network_profilexnetwork.select().where(
app.model.network_profilexnetwork.network_profile == int(form_values['sel_profile_create_port_access']))
for network_profile in network_profilexnetworks:
network_o = app.model.network.select().where(app.model.network.id == network_profile.network.id)
if len(network_o) > 0:
self.cobra_apic_object.create_single_access(network_o[0].epg_dn,
form_values['sel_create_single_access_leaf'],
form_values['sel_create_single_access_port'],
network_o[0].encapsulation,
'migration-tool',
'if_policy_' + switch_id + '_' + port_id,
'single_access_' + switch_id + '_' + port_id)
else:
ex = Exception()
ex.message = 'Some networks where not assigned because they are not in the local database'
raise ex
obj_response.script("create_notification('Assigned', '', 'success', 5000)")
except Exception as e:
print traceback.print_exc()
obj_response.script("create_notification('Can not create single access', '" + str(e).replace("'", "").
replace('"', '').replace("\n", "")[0:100] + "', 'danger', 0)")
finally:
g.db.close()
obj_response.html("#create_single_access_response", '')
def get_delete_single_access_networks(self, obj_response, form_values):
# Check if there has been connection errors
if self.exception is not None:
obj_response.script("create_notification('Connection problem', '" + str(self.exception).replace("'", "").
replace('"', '').replace("\n", "")[0:100] + "', 'danger', 0)")
return
# Load the sel_delete_single_access_network select with the network within the selected group
try:
network_ap = self.cobra_apic_object.get_nca_ap(form_values['sel_delete_single_access_group'])
item_list = []
if network_ap is not None:
networks = self.cobra_apic_object.get_epg_by_ap(str(network_ap.dn))
for network in networks:
# Creates a dynamic object
network_do = type('network_do', (object,), {})
network_do.key = str(network.dn)
network_do.text = network.name
item_list.append(network_do)
html_response = render_template('select_partial.html', item_list=item_list)
obj_response.html("#sel_delete_single_access_network", html_response)
except Exception as e:
print traceback.print_exc()
obj_response.script("create_notification('Can not retrieve networks', '" + str(e).replace("'", "").
replace('"', '').replace("\n", "")[0:100] + "', 'danger', 0)")
finally:
g.db.close()
obj_response.html("#delete_single_access_response", '')
def get_delete_single_access_ports(self, obj_response, form_values):
# Check if there has been connection errors
if self.exception is not None:
obj_response.script("create_notification('Connection problem', '" + str(self.exception).replace("'", "").
replace('"', '').replace("\n", "")[0:100] + "', 'danger', 0)")
return
# Load the sel_delete_single_access_port select with the available ports from the selected leaf
try:
ports = self.cobra_apic_object.get_available_ports(form_values['sel_delete_single_access_leaf'])
item_list = []
for i in range(0, len(ports[0])):
# Creates a dynamic object
port_do = type('port_do', (object,), {})
port_do.key = ports[0][i]
port_do.text = ports[1][i]
item_list.append(port_do)
html_response = render_template('select_partial.html', item_list=item_list)
obj_response.html("#sel_delete_single_access_port", html_response)
except Exception as e:
print traceback.print_exc()
obj_response.script("create_notification('Can not retrieve ports', '" + str(e).replace("'", "").
replace('"', '').replace("\n", "")[0:100] + "', 'danger', 0)")
finally:
g.db.close()
obj_response.html("#delete_single_access_response", '')
def delete_single_access(self, obj_response, form_values):
# Check if there has been connection errors
if self.exception is not None:
obj_response.script("create_notification('Connection problem', '" + str(self.exception).replace("'", "").
replace('"', '').replace("\n", "")[0:100] + "', 'danger', 0)")
return
# Removes the static binding between a port and an EPG. If no other EPG is using this port the system
# removes also the switch profile, interface profile and policy group associated with the port
try:
port_id = form_values['sel_delete_single_access_port'].split('[')[-1][:-1].replace('/','-')
switch_id = form_values['sel_delete_single_access_leaf'].split('/')[-1]
if form_values['delete_port_access_type'] == 'single_vlan':
network_o = app.model.network.select().where(app.model.network.epg_dn ==
form_values['sel_delete_single_access_network'])
if len(network_o) > 0:
self.cobra_apic_object.delete_single_access(form_values['sel_delete_single_access_network'],
form_values['sel_delete_single_access_port'],
'if_policy_' + switch_id + '_' + port_id,
'single_access_' + switch_id + '_' + port_id)
obj_response.script("create_notification('Removed', '', 'success', 5000)")
else:
obj_response.script(
"create_notification('Network not found in local database', '', 'danger', 0)")
elif form_values['delete_port_access_type'] == 'vlan_profile':
network_profilexnetworks = app.model.network_profilexnetwork.select().where(
app.model.network_profilexnetwork.network_profile == int(form_values['sel_profile_delete_port_access']))
for network_profile in network_profilexnetworks:
network_o = app.model.network.select().where(app.model.network.id == network_profile.network.id)
if len(network_o) > 0:
self.cobra_apic_object.delete_single_access(network_o[0].epg_dn,
form_values['sel_delete_single_access_port'],
'if_policy_' + switch_id + '_' + port_id,
'single_access_' + switch_id + '_' + port_id)
obj_response.script("create_notification('Removed', '', 'success', 5000)")
except Exception as e:
print traceback.print_exc()
obj_response.script(
"create_notification('Can not delete single access', '" + str(e).replace("'", "").replace('"', '').
replace("\n", "")[0:100] + "', 'danger', 0)")
finally:
g.db.close()
obj_response.html("#delete_single_access_response", '') | sfloresk/NCA-Container-Builder | NCABase/app/sijax_handlers/single_access_handler.py | Python | apache-2.0 | 13,867 | 0.00786 |
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 20 12:28:32 2015
@author: boland
"""
import sys
sys.path.append('/home/boland/Anaconda/lib/python2.7/site-packages')
import pickle
import numpy as np
import matplotlib.pyplot as plt
from scipy.cluster.vq import kmeans
import multiprocessing as mp
import pyproj
import os
import itertools
import datetime
import pointshape as ps
from math import sqrt, atan2, radians,degrees, cos, tan, sin, asin
import random
import uuid
shape_path = "/home/boland/Dropbox/University/UniMelb/AGOS/PROGRAMS/ANT/Versions/26.04.2015/shapefiles/aus.shp"
N = 130
#enter km spacing between path density points
km_points = 20.0
# reference elipsoid to calculate distance
wgs84 = pyproj.Geod(ellps='WGS84')
nbins = 200
def haversine(coordinates):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
lon1, lat1, lon2, lat2= coordinates[0],coordinates[1],\
coordinates[2],coordinates[3]
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon, dlat = lon2 - lon1, lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
km = 6367 * c
return km
def haversine2(lon1, lat1, lon2, lat2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon, dlat = lon2 - lon1, lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
km = 6367 * c
return km
def geodesic(coord1, coord2, npts):
"""
Returns a list of *npts* points along the geodesic between
(and including) *coord1* and *coord2*, in an array of
shape (*npts*, 2).
@rtype: L{ndarray}
"""
if npts < 2:
raise Exception('nb of points must be at least 2')
path = wgs84.npts(lon1=coord1[0], lat1=coord1[1],
lon2=coord2[0], lat2=coord2[1],
npts=npts-2)
return np.array([coord1] + path + [coord2])
def new_geodesic(lon1,lat1,lon2,lat2, npts):
"""
Returns a list of *npts* points along the geodesic between
(and including) *coord1* and *coord2*, in an array of
shape (*npts*, 2).
@rtype: L{ndarray}
"""
if npts < 2:
raise Exception('nb of points must be at least 2')
path = wgs84.npts(lon1=lon1, lat1=lat1,
lon2=lon2, lat2=lat2,
npts=npts-2)
return np.array([[lon1,lat1]] + path + [[lon2,lat2]])
def cluster_points(coord_points, N):
"""
Function that returns k which is an nx2 matrix of lon-lat vector columns
containing the optimal cluster centroid spacings within a large set of random
numbers e.g. those produced by the many_points() function above!
"""
k = kmeans(coord_points, N)
return k[0]
def paths_func(path_info, km=km_points):
lon1, lat1, lon2, lat2 = path_info[0], \
path_info[1], path_info[2], path_info[3]
#lon1, lat1, lon2, lat2, dist = path_info[0], \
#path_info[1], path_info[2], path_info[3], \
#path_info[4]
dist = haversine2(lon1, lat1, lon2, lat2)
# interpoint distance <= 1 km, and nb of points >= 100
npts = max(int((np.ceil(dist) + 1)/km), 100)
path = new_geodesic(lon1,lat1,lon2,lat2, npts)
#print("still going strong\n")
length = len(path)
lons = [lon1 for i in range(0,length)]
lats = [lat1 for i in range(0,length)]
path = np.column_stack((path,lons,lats))
return path
def HIST2D(nbins,paths, grad=False):
H, xedges, yedges = np.histogram2d(paths[:,0],paths[:,1],bins=nbins)
#name = "path_density_2Dhist.png"
if grad:
H = np.abs(np.asarray(np.gradient(H)[0]))#name = "path_density_2Dhist_grad.png"
# H needs to be rotated and flipped
H = np.rot90(H)
H = np.flipud(H)
# Mask zeros
Hmasked = np.ma.masked_where(H==0,H) # Mask pixels with a value of zero
return Hmasked
#fig = plt.figure()
#plt.pcolormesh(xedges,yedges,Hmasked)
#plt.xlabel('longitude (degrees)')
#plt.ylabel('longitude (degrees)')
#cbar = plt.colorbar()
#cbar.ax.set_ylabel('Counts')
#fig.savefig(name)
def latitude(dist, sigma01, alpha0, lon0):
sigma = sigma01 + dist#/R
lat = degrees(asin(cos(alpha0)*sin(sigma)))
#alpha = atan2(tan(alpha0),cos(sigma))
return lat
def longitude(dist, sigma01, alpha0, lon0):
sigma = sigma01 + dist#/R
lon = degrees(atan2(sin(alpha0)*sin(sigma), cos(sigma))) + degrees(lon0)
#alpha = atan2(tan(alpha0),cos(sigma))
return lon
vlat_func = np.vectorize(latitude)
vlon_func = np.vectorize(longitude)
def waypoint_init(path_info, km=km_points):
R = 6371
lon1, lat1, lon2, lat2, dist = radians(path_info[0]), \
radians(path_info[1]), radians(path_info[2]), \
radians(path_info[3]), radians(path_info[4])
#lon1, lat1, lon2, lat2, dist = map(radians, [path_info[0],path_info[1],path_info[2],path_info[3],path_info[4]])
lon_diff = lon2-lon1
alpha1 = atan2(sin(lon_diff),(cos(lat1)*tan(lat2)-sin(lat1)*cos(lon_diff)))
#alpha2 = atan2(sin(lon_diff),(-cos(lat2)*tan(lat1)+sin(lat2)*cos(lon_diff)))
#try:
#sigma12 = acos(sin(lat1)*sin(lat2)+cos(lat1)*cos(lat2)*cos(lon_diff))
#except:
#return
sigma01, alpha0 = atan2(tan(lat1), cos(alpha1)), asin(sin(alpha1)*cos(lat1))
#sigma02 = sigma01+sigma12
lon01 = atan2(sin(alpha0)*sin(sigma01), cos(sigma01))
lon0 = lon1 - lon01
npts = max(int((np.ceil(dist) + 1)/km), 100)
all_d = np.linspace(0,dist,npts)/R
lons, lats = vlon_func(all_d, sigma01, alpha0, lon0), vlat_func(all_d, sigma01, alpha0, lon0)
return np.column_stack((lons, lats))
t_total0 = datetime.datetime.now()
t0 = datetime.datetime.now()
ideal_path = 'ideal_coordinates.pickle'
#if no paths have been done before, start afresh!
if not os.path.exists(ideal_path):
M = 1e5
many_points = ps.points_in_shape(shape_path, M)
coords = cluster_points(many_points,N)
#else import already processed coordinates if the program has already done so.
else:
f = open(name=ideal_path, mode='rb')
coords = pickle.load(f)
f.close()
#generate N kmeans cluster points from massive M number of randomly distributed
#points inside the shape file.
lonmin = np.floor(min(coords[:,0]))
lonmax = np.ceil(max(coords[:,0]))
latmin = np.floor(min(coords[:,1]))
latmax = np.ceil(max(coords[:,1]))
print lonmin,lonmax,latmin,latmax
#coords1 = [coord1 for coord1 in coords for coord2 in coords]
#coords2 = [coord2 for coord1 in coords for coord2 in coords]
#columns = np.column_stack((coords1, coords2))
kappa = [np.vstack([[coord1[0],coord1[1],coord2[0],coord2[1]]\
for coord2 in coords]) for coord1 in coords]
def spread_paths(nets):
#pool = mp.Pool()
#paths = pool.map(new_paths, nets)
#pool.close()
#pool.join()
paths = map(paths_func, nets)
#create a flattened numpy array of size 2xN from the paths created!
#paths = np.asarray(list(itertools.chain(*paths)))
#keep all but the repeated coordinates by keeping only unique whole rows!
#method is slowed without the b contiguous array
#b = np.ascontiguousarray(paths).view(np.dtype((np.void, paths.dtype.itemsize * paths.shape[1])))
#_, idx = np.unique(b, return_index=True)
#paths = np.unique(b).view(paths.dtype).reshape(-1, paths.shape[1])
#plt.figure()
#plt.scatter(paths[:,0],paths[:,1])
#name = uuid.uuid4()
#plt.savefig('{}.png'.format(name))
return paths
t0 = datetime.datetime.now()
pool = mp.Pool()
paths = pool.map(spread_paths, kappa)
pool.close()
pool.join()
t1 = datetime.datetime.now()
print t1-t0
#paths = list(paths)
counter = 0
#cd Desktop/Link\ to\ SIMULATIONS/Network_Tracks/smarter_model/
grad_ideal, grad_check1, grad_check2, H_avg1, H_avg2 = 0, 0, 0, 0, 0
SHAPE = (1,1)
counter2 = 0
perc_high = 0.01
#counter of how many times the points
#have been chosen from the lowest path density spots
low_counter = 0
#counter of how many times the points
#have been chosen from the random spots.
random_counter = 0
new_coord = 0
infinite_counter = 0
while infinite_counter <= 1:
t0 = datetime.datetime.now()
#the following while loop is a work around fix to a:
#new paths shape: (130, 100, 4) rather than being (130,) like it should be!
while SHAPE != (130,):
#if counter2 >= len(paths)-1:
# counter2 = 0
#cycle through paths
#----------------------------------------------------------------------
#old_path = paths[counter2]
#del paths[counter2]
#old_coord = [old_path[0][0][0],old_path[0][0][1]]
#itemindex = np.where(coords==old_coord)[0][0]
#coords = list(coords)
#find index of array in nested array to remove!
#del coords[itemindex]
#print(counter2)
#----------------------------------------------------------------------
#or random selection of paths?!
#----------------------------------------------------------------------
#remove a random set of paths associated with a single one of the N coordinates
rand_int = random.randint(0,len(paths)-1)
old_path = paths[rand_int]
#figure out which old coordinate to remove from the coordinates list
old_coord = [old_path[0][0][0],old_path[0][0][1]]
#print "old coord:", old_coord
#NEED TO REMOVE OLD POINT FROM COORDS!
#find index of array in nested array to remove!
itemindex = np.where(coords==old_coord)[0][0]
coords = list(coords)
#find index of array in nested array to remove!
del coords[itemindex]
coords = np.asarray(coords)
new_coord_first = new_coord
#----------------------------------------------------------------------
#generate new point coordinate
if not counter >= 1:
new_coord = ps.points_in_shape(shape_path, 1)[0]
else:
new_coord = new_coord
#place new coordinate in old set of coordinates
coords = np.append(coords, [new_coord], axis=0)
#generate new array of points in conjunction with the new randomly generated point!
new_coord_set = np.vstack([[new_coord[0],new_coord[1],coord1[0],\
coord1[1]] for coord1 in coords])
#generate new random point in place of all 'popped' points!
new_paths = map(paths_func, new_coord_set)
SHAPE = np.asarray(new_paths).shape
if not SHAPE == (130,):
#remove substitude back the old coordinate for the new coordinate!
coords = list(coords)
#find index of array in nested array to remove!
del coords[-1]
coords = np.asarray(coords)
#place new coordinate in old set of coordinates
coords = np.append(coords, [old_coord], axis=0)
#print "new paths shape:", SHAPE
#paths = np.asarray(paths)
#if np.asarray(new_paths).shape != (130,):
# print("This one's trouble")
# print np.asarray(new_paths).shape
# new_paths = np.asarray(new_paths[0]).reshape(130,)
del paths[rand_int]
SHAPE = (1,1)
#place new_paths in original path set!
#paths = np.insert(paths, [1], [new_paths], axis=0)
paths = np.append(paths, [new_paths], axis=0)
#paths = paths.append(new_paths)
#paths = np.concatenate((paths, [new_paths]), axis=0)
#paths = np.append(paths, new_paths, axis=0)
#create a flattened numpy array of size 2xN from the paths created!
paths_density_check = list(itertools.chain(*paths))
paths_density_check = np.asarray(list(itertools.chain(*paths_density_check)))
#keep all but the repeated coordinates by keeping only unique whole rows!
#method is slowed without the b contiguous array
b = np.ascontiguousarray(paths_density_check).view(np.dtype\
((np.void, paths_density_check.dtype.itemsize * \
paths_density_check.shape[1])))
_, idx = np.unique(b, return_index=True)
paths_density_check = np.unique(b).view(paths_density_check.dtype)\
.reshape(-1, paths_density_check.shape[1])
#plt.figure()
#plt.scatter(paths_density_check[:,0],paths_density_check[:,1])
#plt.savefig('{}.png'.format(counter))
#remove 3rd and 4th columns
#paths_density_check = np.column_stack((paths_density_check[:,0],
# paths_density_check[:,1]))
#remove all path points that lay outside the shape file polygon
#paths_density_check = ps.paths_in_shape(paths_density_check)
paths = list(paths)
# Estimate the 2D histogram
H, xedges, yedges = np.histogram2d(paths_density_check[:,0],
paths_density_check[:,1],
bins=nbins)
#edges_new = ps.paths_in_shape(np.column_stack((xedges,yedges)))
GRAD = np.abs(np.asarray(np.gradient(H)[0]))
# H needs to be rotated and flipped
H = np.rot90(H)
GRAD = np.rot90(GRAD)
H = np.flipud(H)
GRAD = np.flipud(GRAD)
# Mask zeros
H = np.ma.masked_where(H==0,H) # Mask pixels with a value of zero
GRAD = np.ma.masked_where(GRAD==0,GRAD) # Mask pixels with a value of zero
H_avg1 = np.average(H)
grad_check1 = np.std(GRAD)
rand_indicator = random.randint(1,10)
if 0 < rand_indicator <= 5:
#half the time move the coordinates to low density locations.
WHERE = np.where(H < perc_high*H_avg1)
#scale these points with respect to the lat-lon limits!
Hminx, Hminy = WHERE[1], WHERE[0]
Hminx = (lonmax-lonmin)/(nbins) * Hminx + lonmin
Hminy = (latmax-latmin)/(nbins) * Hminy + latmin
#make sure all low density coordinates ARE within shapefile!
low_density_coords = ps.paths_in_shape(np.column_stack((Hminx, Hminy)))
if len(low_density_coords) == 0:
new_coord = ps.points_in_shape(shape_path, 1)[0]
#increase percentage of search if no new low density points are created!
perc_high +=0.05
elif len(low_density_coords) == 1:
new_coord = low_density_coords[0]
perc_high +=0.05
else:
new_coord = low_density_coords[random.randint(0,len(low_density_coords)-1)]
elif 5 < rand_indicator <= 10:
#half the time move coordinates to random locations.
new_coord = ps.points_in_shape(shape_path, 1)[0]
if counter == 0:
grad_ideal = 1e6
avg_ideal = 0
if grad_check1 < grad_ideal and avg_ideal < H_avg1:
#counter >= 1 and
#dump the coordinates!
#print grad_check1, grad_ideal
#print avg_ideal, H_avg1
print "Exporting new ideal coordinates."
with open(u'ideal_coordinates.pickle', 'wb') as f:
print "\nExporting new ideal coordinates."
pickle.dump(coords, f, protocol=2)
grad_ideal = grad_check1
avg_ideal = H_avg1
# find indices of pixels where H==HMIN
#HMATMIN = np.ma.masked_where(H>HMIN,H)
#only select coordinates where the density is 10% of the average or below!
fig = plt.figure()
plt.pcolormesh(xedges,yedges,H)
plt.xlabel('longitude (degrees)')
plt.ylabel('latitude (degrees)')
cbar = plt.colorbar()
cbar.ax.set_ylabel('Counts')
#plt.scatter(low_density_coords[:,0], low_density_coords[:,1], color='red')
fig.savefig("min_density.png".format(counter))
#print(len(paths))
#print(len(KEEP_PATHS))
else:
#RESET!
#remove new coordinate and replace with old coordinate
coords = list(coords)
del coords[-1]
coords = np.asarray(coords)
#place new coordinate in old set of coordinates
coords = np.append(coords, [old_coord], axis=0)
#remove new path and replace it with the old set!
paths = list(paths)
del paths[-1]
paths = list(np.append(paths, [old_path], axis=0))
#plt.scatter(Hminx, Hminy, color='yellow')
#grad_check2 = grad_check1
#H_avg2 = H_avg1
#print(counter)
counter+=1
counter2+=1
t1 = datetime.datetime.now()
print t1-t0 | boland1992/SeisSuite | seissuite/sort_later/find_holes.py | Python | gpl-3.0 | 17,030 | 0.021315 |
"""
Copyright (c) 2012-2013 RockStor, Inc. <http://rockstor.com>
This file is part of RockStor.
RockStor is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
RockStor is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
from tempfile import mkstemp
from shutil import move
from django.db import transaction
from django.conf import settings
from rest_framework.response import Response
from storageadmin.models import (NetworkInterface, Appliance)
from storageadmin.util import handle_exception
from storageadmin.serializers import NetworkInterfaceSerializer
from system.osi import (config_network_device, get_net_config, update_issue)
from system.samba import update_samba_discovery
from system.services import superctl
import rest_framework_custom as rfc
import logging
logger = logging.getLogger(__name__)
class NetworkMixin(object):
@staticmethod
def _update_ni_obj(nio, values):
nio.dname = values.get('dname', None)
nio.mac = values.get('mac', None)
nio.method = values.get('method', 'manual')
nio.autoconnect = values.get('autoconnect', 'no')
nio.netmask = values.get('netmask', None)
nio.ipaddr = values.get('ipaddr', None)
nio.gateway = values.get('gateway', None)
nio.dns_servers = values.get('dns_servers', None)
nio.ctype = values.get('ctype', None)
nio.dtype = values.get('dtype', None)
nio.dspeed = values.get('dspeed', None)
nio.state = values.get('state', None)
return nio
@staticmethod
def _update_nginx(ipaddr=None):
#update nginx config and restart the service
conf = '%s/etc/nginx/nginx.conf' % settings.ROOT_DIR
fo, npath = mkstemp()
with open(conf) as ifo, open(npath, 'w') as tfo:
for line in ifo.readlines():
if (re.search('listen.*80 default_server', line) is not None):
substr = 'listen 80'
if (ipaddr is not None):
substr = 'listen %s:80' % ipaddr
line = re.sub(r'listen.*80', substr, line)
elif (re.search('listen.*443 default_server', line) is not None):
substr = 'listen 443'
if (ipaddr is not None):
substr = 'listen %s:443' % ipaddr
line = re.sub(r'listen.*443', substr, line)
tfo.write(line)
move(npath, conf)
superctl('nginx', 'restart')
class NetworkListView(rfc.GenericView, NetworkMixin):
serializer_class = NetworkInterfaceSerializer
def get_queryset(self, *args, **kwargs):
with self._handle_exception(self.request):
self._net_scan()
#to be deprecated soon
update_samba_discovery()
return NetworkInterface.objects.all()
@classmethod
@transaction.atomic
def _net_scan(cls):
config_d = get_net_config(all=True)
for dconfig in config_d.values():
ni = None
if (NetworkInterface.objects.filter(
name=dconfig['name']).exists()):
ni = NetworkInterface.objects.get(name=dconfig['name'])
ni = cls._update_ni_obj(ni, dconfig)
else:
ni = NetworkInterface(name=dconfig.get('name', None),
dname=dconfig.get('dname', None),
dtype=dconfig.get('dtype', None),
dspeed=dconfig.get('dspeed', None),
mac=dconfig.get('mac', None),
method=dconfig.get('method', None),
autoconnect=dconfig.get('autoconnect', None),
netmask=dconfig.get('netmask', None),
ipaddr=dconfig.get('ipaddr', None),
gateway=dconfig.get('gateway', None),
dns_servers=dconfig.get('dns_servers', None),
ctype=dconfig.get('ctype', None),
state=dconfig.get('state', None))
ni.save()
devices = []
for ni in NetworkInterface.objects.all():
if (ni.dname not in config_d):
logger.debug('network interface(%s) does not exist in the '
'system anymore. Removing from db' % (ni.name))
ni.delete()
else:
devices.append(ni)
serializer = NetworkInterfaceSerializer(devices, many=True)
return Response(serializer.data)
def post(self, request):
with self._handle_exception(request):
return self._net_scan()
class NetworkDetailView(rfc.GenericView, NetworkMixin):
serializer_class = NetworkInterfaceSerializer
def get(self, *args, **kwargs):
try:
data = NetworkInterface.objects.get(name=self.kwargs['iname'])
serialized_data = NetworkInterfaceSerializer(data)
return Response(serialized_data.data)
except:
return Response()
@transaction.atomic
def delete(self, request, iname):
with self._handle_exception(request):
if (NetworkInterface.objects.filter(name=iname).exists()):
i = NetworkInterface.objects.get(name=iname)
i.delete()
return Response()
def _validate_netmask(self, request):
netmask = request.data.get('netmask', None)
e_msg = ('Provided netmask value(%s) is invalid. You can provide it '
'in a IP address format(eg: 255.255.255.0) or number of '
'bits(eg: 24)' % netmask)
if (netmask is None):
handle_exception(Exception(e_msg), request)
bits = 0
try:
bits = int(netmask)
except ValueError:
#assume ip address format was provided
bits = sum([bin(int(x)).count('1') for x in '255.255.255'.split('.')])
if (bits < 1 or bits > 32):
e_msg = ('Provided netmask value(%s) is invalid. Number of '
'bits in netmask must be between 1-32' % netmask)
handle_exception(Exception(e_msg), request)
return bits
@transaction.atomic
def put(self, request, iname):
with self._handle_exception(request):
if (not NetworkInterface.objects.filter(name=iname).exists()):
e_msg = ('Netowrk interface(%s) does not exist.' % iname)
handle_exception(Exception(e_msg), request)
ni = NetworkInterface.objects.get(name=iname)
itype = request.data.get('itype')
if (itype != 'management'):
itype = 'io'
method = request.data.get('method')
ni.onboot = 'yes'
if (method == 'auto'):
config_network_device(ni.name)
elif (method == 'manual'):
ipaddr = request.data.get('ipaddr')
for i in NetworkInterface.objects.filter(ipaddr=ipaddr):
if (i.id != ni.id):
e_msg = ('IP: %s already in use by another '
'interface: %s' % (ni.ipaddr, i.name))
handle_exception(Exception(e_msg), request)
netmask = self._validate_netmask(request)
gateway = request.data.get('gateway', None)
dns_servers = request.data.get('dns_servers', None)
config_network_device(ni.name, dtype=ni.dtype, method='manual',
ipaddr=ipaddr, netmask=netmask,
gateway=gateway, dns_servers=dns_servers)
else:
e_msg = ('Method must be auto(for dhcp) or manual(for static IP). not: %s' %
method)
handle_exception(Exception(e_msg), request)
dconfig = get_net_config(name=ni.name)[ni.dname]
ni = self._update_ni_obj(ni, dconfig)
if (itype == 'management' and ni.itype != 'management'):
for i in NetworkInterface.objects.filter(itype='management'):
if (i.name != ni.name):
e_msg = ('Another interface(%s) is already configured '
'for management. You must disable it first '
'before making this change.' % i.name)
handle_exception(Exception(e_msg), request)
a = Appliance.objects.get(current_appliance=True)
a.ip = ni.ipaddr
a.save()
try:
self._update_nginx(ni.ipaddr)
except Exception, e:
logger.error('Failed to update Nginx. Exception: %s' % e.__str__())
elif (itype == 'io' and ni.itype == 'management'):
try:
self._update_nginx()
except Exception, e:
logger.error('Failed to update Nginx. Exception: %s' % e.__str__())
ni.itype = itype
ni.save()
if (ni.itype == 'management'):
try:
update_issue(ni.ipaddr)
except Exception, e:
logger.error('Unable to update /etc/issue. Exception: %s' % e.__str__())
return Response(NetworkInterfaceSerializer(ni).data)
| nkhare/rockstor-core | src/rockstor/storageadmin/views/network.py | Python | gpl-3.0 | 10,047 | 0.001294 |
from unittest import TestCase
from rfxcom.protocol.temperature import Temperature
from rfxcom.exceptions import (InvalidPacketLength, UnknownPacketSubtype,
UnknownPacketType)
class TemperatureTestCase(TestCase):
def setUp(self):
self.data = bytearray(b'\x08\x50\x02\x11\x70\x02\x00\xA7\x89')
self.parser = Temperature()
def test_parse_bytes(self):
self.assertTrue(self.parser.validate_packet(self.data))
self.assertTrue(self.parser.can_handle(self.data))
result = self.parser.load(self.data)
self.assertEquals(result, {
'packet_length': 8,
'packet_type': 80,
'packet_type_name': 'Temperature sensors',
'sequence_number': 17,
'packet_subtype': 2,
'packet_subtype_name':
'THC238/268,THN132,THWR288,THRN122,THN122,AW129/131',
'temperature': 16.7,
'id': '0x7002',
# 'channel': 2, TBC
'signal_level': 8,
'battery_level': 9
})
self.assertEquals(str(self.parser), "<Temperature ID:0x7002>")
def test_parse_bytes2(self):
self.data = bytearray(b'\x08\x50\x03\x02\xAE\x01\x00\x63\x59')
self.assertTrue(self.parser.validate_packet(self.data))
self.assertTrue(self.parser.can_handle(self.data))
result = self.parser.load(self.data)
self.assertEquals(result, {
'packet_length': 8,
'packet_type': 80,
'packet_type_name': 'Temperature sensors',
'sequence_number': 2,
'packet_subtype': 3,
'packet_subtype_name': 'THWR800',
'temperature': 9.9,
'id': '0xAE01',
# 'channel': 1, TBC
'signal_level': 5,
'battery_level': 9
})
self.assertEquals(str(self.parser), "<Temperature ID:0xAE01>")
def test_parse_bytes_negative_temp(self):
self.data = bytearray(b'\x08\x50\x06\x02\xAE\x01\x80\x55\x59')
self.assertTrue(self.parser.validate_packet(self.data))
self.assertTrue(self.parser.can_handle(self.data))
result = self.parser.load(self.data)
self.assertEquals(result, {
'packet_length': 8,
'packet_type': 80,
'packet_type_name': 'Temperature sensors',
'sequence_number': 2,
'packet_subtype': 6,
'packet_subtype_name': 'TS15C',
'temperature': -8.5,
'id': '0xAE01',
# 'channel': 1, TBC
'signal_level': 5,
'battery_level': 9
})
self.assertEquals(str(self.parser), "<Temperature ID:0xAE01>")
def test_validate_bytes_short(self):
data = self.data[:1]
with self.assertRaises(InvalidPacketLength):
self.parser.validate_packet(data)
def test_validate_unkown_packet_type(self):
self.data[1] = 0xFF
self.assertFalse(self.parser.can_handle(self.data))
with self.assertRaises(UnknownPacketType):
self.parser.validate_packet(self.data)
def test_validate_unknown_sub_type(self):
self.data[2] = 0xEE
self.assertFalse(self.parser.can_handle(self.data))
with self.assertRaises(UnknownPacketSubtype):
self.parser.validate_packet(self.data)
def test_log_name(self):
self.assertEquals(self.parser.log.name, 'rfxcom.protocol.Temperature')
| skimpax/python-rfxcom | tests/protocol/test_temperature.py | Python | bsd-3-clause | 3,484 | 0 |
#! /usr/bin/env python3
import sys
in_class = False
for l in sys.stdin:
if l.startswith("class"):
in_class = True
if in_class:
if l.startswith("};"):
in_class = False
continue
else:
print(l, end='')
| ctongfei/nexus | torch/remove_classes.py | Python | mit | 259 | 0.003861 |
"""
Copyright 2013 Shine Wang
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import urllib
import re
from HTMLParser import HTMLParser
from courseClasses import Course, Lecture, Tutorial, Reserve
class CustomHTMLParser(HTMLParser):
"""this class reads a HTML stream, then parses out the "data" fields"""
def __init__(self, webData):
HTMLParser.__init__(self)
self.webData = webData
def handle_data(self, data):
"""takes out the data"""
self.webData.append(data.strip())
class WebParser:
""""A WebParser is created for each and every course,
to parse the corresponding web page"""
requestURL = "http://www.adm.uwaterloo.ca/cgi-bin/" \
"cgiwrap/infocour/salook.pl"
def __init__(self):
self.webData = []
self.index = -1
self.session = None
self.thisCourse = None
def run(self, courseString, sessionString):
"""this is the method that the main class can call
if successful, returns the Course class
if not, returns an error message"""
self.session = self.parseSession(sessionString)
if self.session is None:
return "SessionNameWrongError"
courseString = map(lambda x: x.upper(), courseString.split())
try:
self.thisCourse = Course(self.session, courseString[0],
courseString[1])
except:
return "CourseNameWrongError"
if self.getWebData(self.thisCourse):
return "WebPageError"
elif self.parseWebData():
return "CourseNotFoundError"
else:
self.processCourseInfo()
self.postProcess(self.thisCourse)
return self.thisCourse
def parseSession(self, sessionString):
try:
ret = "1"
ret += sessionString.split()[1][-2:] # last 2 digits of year
tempMap = (("fall", "9"), ("winter", "1"), ("spring", "5"))
for season in tempMap:
if season[0] in sessionString.lower():
ret += season[1]
return ret
except:
return None
def getWebData(self, course):
"""submits a POST query, initializes HTMLParser"""
try:
params = urllib.urlencode({"sess": course.session,
"subject": course.subject,
"cournum": course.catalogNumber})
page = urllib.urlopen(WebParser.requestURL, params)
parser = CustomHTMLParser(self.webData)
# we use .replace() because HTMLParser ignores " ",
# which would screwn up our table
parser.feed(page.read().replace(" ", " "))
except:
return "WebPageError"
def parseWebData(self):
"""We try to find the beginning of the desired table"""
# now, we find the start index and pass that on along
# with the webData
for i in xrange(len(self.webData)-3):
if self.webData[i] == self.thisCourse.subject \
and self.webData[i+2] == self.thisCourse.catalogNumber:
self.index = i
break
if self.index == -1: # website not found
return "CourseNotFound"
def processCourseInfo(self):
"""now, we do the heavy-duty processing of the data table"""
# sets basic attrs of thisCourse
self.thisCourse.units = self.webData[self.index+4]
self.thisCourse.title = self.webData[self.index+6]
while self.webData[self.index] != "Instructor":
self.index += 1
# processing row-by-row
while not self.endOfRow(self.webData[self.index]):
if self.webData[self.index] != "":
self.processSlot()
self.index += 1
if self.index == len(self.webData):
return
def processSlot(self):
"""we check to see if this is the BEGINNING of a valid row"""
if (self.webData[self.index+1][:3].upper() == "LEC"
or self.webData[self.index+1][:3].upper() == "LAB") \
and "ONLINE" not in self.webData[self.index+2]:
# we don't want online classes!
# processing a lecture row
lec = Lecture()
if self.processClass(lec, self.index, self.webData):
return
self.thisCourse.lectures.append(lec)
elif self.webData[self.index+1][:3].upper() == "TUT":
# processing a tutorial row
tut = Tutorial()
if self.processClass(tut, self.index, self.webData):
return
self.thisCourse.tutorials.append(tut)
elif self.webData[self.index][:7].upper() == "RESERVE":
# processing a reserve row
res = Reserve()
self.processReserve(res, self.index, self.webData)
if self.thisCourse.lectures:
self.thisCourse.lectures[-1].reserves.append(res)
# note: we leave out the TST (exam?) times for now
def processReserve(self, res, index, webData):
"""processing reservations for certain types of students"""
res.name = webData[index][9:]
# we remove the "only" suffix (which is annoyingly pointless)
if "only" in res.name:
res.name = res.name[:-5]
# also, the "students" suffx
if "students" in res.name or "Students" in res.name:
res.name = res.name[:-9]
# now, we merge the match list
while not webData[index].isdigit():
index += 1
# retriving enrollment numbers
res.enrlCap = int(webData[index])
res.enrlTotal = int(webData[index+1])
def processClass(self, lec, index, webData):
"""we process a typical lecture or tutorial row"""
attr1 = ["classNumber", "compSec", "campusLocation"]
for i in xrange(len(attr1)):
setattr(lec, attr1[i], webData[index+i].strip())
index += 6
attr2 = ["enrlCap", "enrlTotal", "waitCap", "waitTotal"]
for i in xrange(len(attr2)):
setattr(lec, attr2[i], int(webData[index+i]))
index += 4
# parsing the "Times Days/Date" field
match = re.search(r"([:\d]+)-([:\d]+)(\w+)", webData[index])
if not match:
# we return an error message in the "TBA" case
return "NoTimeError"
attr3 = ["startTime", "endTime", "days"]
for i in xrange(len(attr3)):
setattr(lec, attr3[i], match.group(i+1).strip())
index += 1
if len(webData[index].split()) == 2:
# sometimes, no building, room, and instructor will be given
# this is mostly for Laurier courses
lec.building, lec.room = webData[index].split()
lec.instructor = webData[index+1].strip()
def endOfRow(self, data):
"""returns true if the current data-cell is the last cell
of this course; else - false"""
# the last cell is of the form: ##/##-##/## or
# "Information last updated
if re.search(r"\d+/\d+-\d+/\d+", data) or \
"Information last updated" in data:
return True
else:
return False
def postProcess(self, course):
"""this function will convert the class times to minutes-past-
the-previous-midnight, and converts the days to numbers.
Also, some reservation-postprocessing"""
map(lambda x: x.calcMiscSeats(), course.lectures)
for lec in course.lectures:
lec.courseID = course.subject + " " + course.catalogNumber
for tut in course.tutorials:
tut.courseID = course.subject + " " + course.catalogNumber
for slot in course.lectures + course.tutorials:
# first, we convert time to 24hr time
# earliest start time for a class is 8:30am
# night classes start at/before 7:00pm
if 1 <= int(slot.startTime.split(":")[0]) <= 7:
slot.startTime, slot.endTime = \
map(lambda x: "{}:{}".format(str(int(x.split(":")[0])
+ 12), x[-2:]), [slot.startTime,
slot.endTime])
elif int(slot.startTime.split(":")[0]) > int(
slot.endTime.split(":")[0]):
# e.g. 12:00 to 1:00
slot.endTime = "{}:{}".format(str(int(
slot.endTime.split(":")[0])+12), slot.endTime[-2:])
# now, we write to slot.sTime, slot.eTime
# (minutes-past-midnight...)
slot.sTime, slot.eTime = map(lambda x: int(x[:2]) * 60 +
int(x[-2:]),
[slot.startTime, slot.endTime])
# we write to slot.ndays, where ndays is a string of numbers,
# 0->4
if "M" in slot.days:
slot.ndays += "0"
i = slot.days.find("T")
if i != -1 and (i == len(slot.days) - 1 or
slot.days[i+1] != 'h'):
# basically, if not Th (for Thursday)
slot.ndays += "1"
# now, for the rest of the days...
for i in [("W", "2"), ("Th", "3"), ("F", "4")]:
if i[0] in slot.days:
slot.ndays += i[1]
# we make a small adjustment to campusLocation,
# removing whitespace
slot.campusLocation = slot.campusLocation.split()[0]
# we make the prof name "first last" instead of
# "last,first middle"
if slot.instructor != "":
s = slot.instructor.split(" ")
for i in s:
if "," in i:
# we want the 2 words connected by the ","
slot.instructor = " ".join(reversed(list(
i.split(","))))
| shinexwang/Classy | Main/webParser.py | Python | apache-2.0 | 10,545 | 0.000379 |
# coding=utf-8
'''
cron trigger
@author: Huiyugeng
'''
import datetime
import trigger
class CronTrigger(trigger.Trigger):
def __init__(self, cron):
trigger.Trigger.__init__(self, 0, 1);
self.cron = cron
def _is_match(self):
parser = CronParser(self.cron)
_date = datetime.date.today()
_time = datetime.datetime.now()
return parser._is_match(_date, _time)
class CronParser():
def __init__(self, cron):
cron_item = cron.split(' ')
if len(cron_item) == 6 or len(cron_item) == 7:
self.second_set = self._parse_integer(cron_item[0], 0, 59)
self.minute_set = self._parse_integer(cron_item[1], 0, 59)
self.hour_set = self._parse_integer(cron_item[2], 0, 23)
self.day_of_month_set = self._parse_integer(cron_item[3], 1, 31)
self.month_set = self._parse_month(cron_item[4])
self.day_of_week_set = self._parse_day_of_week(cron_item[5])
if len(cron_item) == 7:
self.year_set = self._parse_integer(cron_item[6], 1970, 2100)
def _parse_integer(self, value, min_val, max_val):
result = []
range_items = []
if ',' in value:
range_items = value.split(',')
else:
range_items.append(value)
for range_item in range_items:
temp_result = []
interval = 1
if '/' in range_item:
temp = range_item.split('/')
range_item = temp[0]
interval = int(temp[1])
if interval < 1:
interval = 1
if '*' in range_item:
temp_result.extend(self._add_to_set(min_val, max_val))
elif '-' in range_item:
item = range_item.split('-')
temp_result.extend(self._add_to_set(int(item[0]), int(item[1])))
else:
temp_result.append(int(range_item))
count = 0
for item in temp_result:
if count % interval == 0:
result.append(item)
count = count + 1
return result
def _add_to_set(self, start, end):
result = [i for i in range(start, end + 1)]
return result
def _parse_month(self, value):
months = ["JAN", "FEB", "MAR", "APR", "MAY", "JUN", "JUL", "AUG", "SEP", "OCT", "NOV", "DEC"]
for i in range(0, 12):
value = value.replace(months[i], str(i + 1))
return self._parse_integer(value, 1, 12);
def _parse_day_of_week(self, value):
day_of_weeks = ["MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN"]
for i in range(0, 7):
value = value.replace(day_of_weeks[i], str(i + 1));
return self._parse_integer(value, 1, 7);
def _is_match(self, _date, _time):
# In Python datetime's weekday Monday is 0 and Sunday is 6
day_of_week = _date.weekday() + 1
result = True and \
_time.second in self.second_set and \
_time.minute in self.minute_set and \
_time.hour in self.hour_set and \
_date.day in self.day_of_month_set and \
_date.month in self.month_set and \
_date.year in self.year_set and \
day_of_week in self.day_of_week_set
return result
| interhui/py_task | task/trigger/cron_trigger.py | Python | artistic-2.0 | 3,634 | 0.008255 |
# Copyright (c) 2014, Hubert Kario
#
# Efthimios Iosifidis - Speck Cipher Additions
# See the LICENSE file for legal information regarding use of this file.
"""Implementation of the TLS Record Layer protocol"""
import socket
import errno
import hashlib
from .constants import ContentType, CipherSuite
from .messages import RecordHeader3, RecordHeader2, Message
from .utils.cipherfactory import createAESGCM, createAES, createRC4, \
createTripleDES, createCHACHA20,createSPECK, createSPECK128GCM, createSPECK192GCM
from .utils.codec import Parser, Writer
from .utils.compat import compatHMAC
from .utils.cryptomath import getRandomBytes
from .utils.constanttime import ct_compare_digest, ct_check_cbc_mac_and_pad
from .errors import TLSRecordOverflow, TLSIllegalParameterException,\
TLSAbruptCloseError, TLSDecryptionFailed, TLSBadRecordMAC
from .mathtls import createMAC_SSL, createHMAC, PRF_SSL, PRF, PRF_1_2, \
PRF_1_2_SHA384
class RecordSocket(object):
"""Socket wrapper for reading and writing TLS Records"""
def __init__(self, sock):
"""
Assign socket to wrapper
@type sock: socket.socket
"""
self.sock = sock
self.version = (0, 0)
def _sockSendAll(self, data):
"""
Send all data through socket
@type data: bytearray
@param data: data to send
@raise socket.error: when write to socket failed
"""
while 1:
try:
bytesSent = self.sock.send(data)
except socket.error as why:
if why.args[0] in (errno.EWOULDBLOCK, errno.EAGAIN):
yield 1
continue
raise
if bytesSent == len(data):
return
data = data[bytesSent:]
yield 1
def send(self, msg):
"""
Send the message through socket.
@type msg: bytearray
@param msg: TLS message to send
@raise socket.error: when write to socket failed
"""
data = msg.write()
header = RecordHeader3().create(self.version,
msg.contentType,
len(data))
data = header.write() + data
for result in self._sockSendAll(data):
yield result
def _sockRecvAll(self, length):
"""
Read exactly the amount of bytes specified in L{length} from raw socket.
@rtype: generator
@return: generator that will return 0 or 1 in case the socket is non
blocking and would block and bytearray in case the read finished
@raise TLSAbruptCloseError: when the socket closed
"""
buf = bytearray(0)
if length == 0:
yield buf
while True:
try:
socketBytes = self.sock.recv(length - len(buf))
except socket.error as why:
if why.args[0] in (errno.EWOULDBLOCK, errno.EAGAIN):
yield 0
continue
else:
raise
#if the connection closed, raise socket error
if len(socketBytes) == 0:
raise TLSAbruptCloseError()
buf += bytearray(socketBytes)
if len(buf) == length:
yield buf
def _recvHeader(self):
"""Read a single record header from socket"""
#Read the next record header
buf = bytearray(0)
ssl2 = False
result = None
for result in self._sockRecvAll(1):
if result in (0, 1):
yield result
else: break
assert result is not None
buf += result
if buf[0] in ContentType.all:
ssl2 = False
# SSLv3 record layer header is 5 bytes long, we already read 1
result = None
for result in self._sockRecvAll(4):
if result in (0, 1):
yield result
else: break
assert result is not None
buf += result
# XXX this should be 'buf[0] & 128', otherwise hello messages longer
# than 127 bytes won't be properly parsed
elif buf[0] == 128:
ssl2 = True
# in SSLv2 we need to read 2 bytes in total to know the size of
# header, we already read 1
result = None
for result in self._sockRecvAll(1):
if result in (0, 1):
yield result
else: break
assert result is not None
buf += result
else:
raise TLSIllegalParameterException(
"Record header type doesn't specify known type")
#Parse the record header
if ssl2:
record = RecordHeader2().parse(Parser(buf))
else:
record = RecordHeader3().parse(Parser(buf))
yield record
def recv(self):
"""
Read a single record from socket, handle SSLv2 and SSLv3 record layer
@rtype: generator
@return: generator that returns 0 or 1 in case the read would be
blocking or a tuple containing record header (object) and record
data (bytearray) read from socket
@raise socket.error: In case of network error
@raise TLSAbruptCloseError: When the socket was closed on the other
side in middle of record receiving
@raise TLSRecordOverflow: When the received record was longer than
allowed by TLS
@raise TLSIllegalParameterException: When the record header was
malformed
"""
record = None
for record in self._recvHeader():
if record in (0, 1):
yield record
else: break
assert record is not None
#Check the record header fields
# 18432 = 2**14 (basic record size limit) + 1024 (maximum compression
# overhead) + 1024 (maximum encryption overhead)
if record.length > 18432:
raise TLSRecordOverflow()
#Read the record contents
buf = bytearray(0)
result = None
for result in self._sockRecvAll(record.length):
if result in (0, 1):
yield result
else: break
assert result is not None
buf += result
yield (record, buf)
class ConnectionState(object):
"""Preserve the connection state for reading and writing data to records"""
def __init__(self):
"""Create an instance with empty encryption and MACing contexts"""
self.macContext = None
self.encContext = None
self.fixedNonce = None
self.seqnum = 0
def getSeqNumBytes(self):
"""Return encoded sequence number and increment it."""
writer = Writer()
writer.add(self.seqnum, 8)
self.seqnum += 1
return writer.bytes
class RecordLayer(object):
"""
Implementation of TLS record layer protocol
@ivar version: the TLS version to use (tuple encoded as on the wire)
@ivar sock: underlying socket
@ivar client: whether the connection should use encryption
@ivar encryptThenMAC: use the encrypt-then-MAC mechanism for record
integrity
"""
def __init__(self, sock):
self.sock = sock
self._recordSocket = RecordSocket(sock)
self._version = (0, 0)
self.client = True
self._writeState = ConnectionState()
self._readState = ConnectionState()
self._pendingWriteState = ConnectionState()
self._pendingReadState = ConnectionState()
self.fixedIVBlock = None
self.encryptThenMAC = False
@property
def blockSize(self):
"""Return the size of block used by current symmetric cipher (R/O)"""
return self._writeState.encContext.block_size
@property
def version(self):
"""Return the TLS version used by record layer"""
return self._version
@version.setter
def version(self, val):
"""Set the TLS version used by record layer"""
self._version = val
self._recordSocket.version = val
def getCipherName(self):
"""
Return the name of the bulk cipher used by this connection
@rtype: str
@return: The name of the cipher, like 'aes128', 'rc4', etc.
"""
if self._writeState.encContext is None:
return None
return self._writeState.encContext.name
def getCipherImplementation(self):
"""
Return the name of the implementation used for the connection
'python' for tlslite internal implementation, 'openssl' for M2crypto
and 'pycrypto' for pycrypto
@rtype: str
@return: Name of cipher implementation used, None if not initialised
"""
if self._writeState.encContext is None:
return None
return self._writeState.encContext.implementation
def shutdown(self):
"""Clear read and write states"""
self._writeState = ConnectionState()
self._readState = ConnectionState()
self._pendingWriteState = ConnectionState()
self._pendingReadState = ConnectionState()
def isCBCMode(self):
"""Returns true if cipher uses CBC mode"""
if self._writeState and self._writeState.encContext and \
self._writeState.encContext.isBlockCipher:
return True
else:
return False
#
# sending messages
#
def addPadding(self, data):
"""Add padding to data so that it is multiple of block size"""
currentLength = len(data)
blockLength = self.blockSize
paddingLength = blockLength - 1 - (currentLength % blockLength)
paddingBytes = bytearray([paddingLength] * (paddingLength+1))
data += paddingBytes
return data
def calculateMAC(self, mac, seqnumBytes, contentType, data):
"""Calculate the SSL/TLS version of a MAC"""
mac.update(compatHMAC(seqnumBytes))
mac.update(compatHMAC(bytearray([contentType])))
assert self.version in ((3, 0), (3, 1), (3, 2), (3, 3))
if self.version != (3, 0):
mac.update(compatHMAC(bytearray([self.version[0]])))
mac.update(compatHMAC(bytearray([self.version[1]])))
mac.update(compatHMAC(bytearray([len(data)//256])))
mac.update(compatHMAC(bytearray([len(data)%256])))
mac.update(compatHMAC(data))
return bytearray(mac.digest())
def _macThenEncrypt(self, data, contentType):
"""MAC, pad then encrypt data"""
if self._writeState.macContext:
seqnumBytes = self._writeState.getSeqNumBytes()
mac = self._writeState.macContext.copy()
macBytes = self.calculateMAC(mac, seqnumBytes, contentType, data)
data += macBytes
#Encrypt for Block or Stream Cipher
if self._writeState.encContext:
#Add padding (for Block Cipher):
if self._writeState.encContext.isBlockCipher:
#Add TLS 1.1 fixed block
if self.version >= (3, 2):
data = self.fixedIVBlock + data
data = self.addPadding(data)
#Encrypt
data = self._writeState.encContext.encrypt(data)
return data
def _encryptThenMAC(self, buf, contentType):
"""Pad, encrypt and then MAC the data"""
if self._writeState.encContext:
# add IV for TLS1.1+
if self.version >= (3, 2):
buf = self.fixedIVBlock + buf
buf = self.addPadding(buf)
buf = self._writeState.encContext.encrypt(buf)
# add MAC
if self._writeState.macContext:
seqnumBytes = self._writeState.getSeqNumBytes()
mac = self._writeState.macContext.copy()
# append MAC
macBytes = self.calculateMAC(mac, seqnumBytes, contentType, buf)
buf += macBytes
return buf
def _encryptThenSeal(self, buf, contentType):
"""Encrypt with AEAD cipher"""
#Assemble the authenticated data.
seqNumBytes = self._writeState.getSeqNumBytes()
authData = seqNumBytes + bytearray([contentType,
self.version[0],
self.version[1],
len(buf)//256,
len(buf)%256])
#The nonce is always the fixed nonce and the sequence number.
nonce = self._writeState.fixedNonce + seqNumBytes
assert len(nonce) == self._writeState.encContext.nonceLength
buf = self._writeState.encContext.seal(nonce, buf, authData)
#AES-GCM, has an explicit variable nonce.
if "aes" or "speck" in self._writeState.encContext.name:
buf = seqNumBytes + buf
return buf
def sendRecord(self, msg):
"""
Encrypt, MAC and send arbitrary message as-is through socket.
Note that if the message was not fragmented to below 2**14 bytes
it will be rejected by the other connection side.
@param msg: TLS message to send
@type msg: ApplicationData, HandshakeMessage, etc.
"""
data = msg.write()
contentType = msg.contentType
if self._writeState and \
self._writeState.encContext and \
self._writeState.encContext.isAEAD:
data = self._encryptThenSeal(data, contentType)
elif self.encryptThenMAC:
data = self._encryptThenMAC(data, contentType)
else:
data = self._macThenEncrypt(data, contentType)
encryptedMessage = Message(contentType, data)
for result in self._recordSocket.send(encryptedMessage):
yield result
#
# receiving messages
#
def _decryptStreamThenMAC(self, recordType, data):
"""Decrypt a stream cipher and check MAC"""
if self._readState.encContext:
assert self.version in ((3, 0), (3, 1), (3, 2), (3, 3))
data = self._readState.encContext.decrypt(data)
if self._readState.macContext:
#Check MAC
macGood = True
macLength = self._readState.macContext.digest_size
endLength = macLength
if endLength > len(data):
macGood = False
else:
#Read MAC
startIndex = len(data) - endLength
endIndex = startIndex + macLength
checkBytes = data[startIndex : endIndex]
#Calculate MAC
seqnumBytes = self._readState.getSeqNumBytes()
data = data[:-endLength]
mac = self._readState.macContext.copy()
macBytes = self.calculateMAC(mac, seqnumBytes, recordType,
data)
#Compare MACs
if not ct_compare_digest(macBytes, checkBytes):
macGood = False
if not macGood:
raise TLSBadRecordMAC()
return data
def _decryptThenMAC(self, recordType, data):
"""Decrypt data, check padding and MAC"""
if self._readState.encContext:
assert self.version in ((3, 0), (3, 1), (3, 2), (3, 3))
assert self._readState.encContext.isBlockCipher
assert self._readState.macContext
#
# decrypt the record
#
blockLength = self._readState.encContext.block_size
if len(data) % blockLength != 0:
raise TLSDecryptionFailed()
data = self._readState.encContext.decrypt(data)
if self.version >= (3, 2): #For TLS 1.1, remove explicit IV
data = data[self._readState.encContext.block_size : ]
#
# check padding and MAC
#
seqnumBytes = self._readState.getSeqNumBytes()
if not ct_check_cbc_mac_and_pad(data,
self._readState.macContext,
seqnumBytes,
recordType,
self.version):
raise TLSBadRecordMAC()
#
# strip padding and MAC
#
endLength = data[-1] + 1 + self._readState.macContext.digest_size
data = data[:-endLength]
return data
def _macThenDecrypt(self, recordType, buf):
"""
Check MAC of data, then decrypt and remove padding
@raise TLSBadRecordMAC: when the mac value is invalid
@raise TLSDecryptionFailed: when the data to decrypt has invalid size
"""
if self._readState.macContext:
macLength = self._readState.macContext.digest_size
if len(buf) < macLength:
raise TLSBadRecordMAC("Truncated data")
checkBytes = buf[-macLength:]
buf = buf[:-macLength]
seqnumBytes = self._readState.getSeqNumBytes()
mac = self._readState.macContext.copy()
macBytes = self.calculateMAC(mac, seqnumBytes, recordType, buf)
if not ct_compare_digest(macBytes, checkBytes):
raise TLSBadRecordMAC("MAC mismatch")
if self._readState.encContext:
blockLength = self._readState.encContext.block_size
if len(buf) % blockLength != 0:
raise TLSDecryptionFailed("data length not multiple of "\
"block size")
buf = self._readState.encContext.decrypt(buf)
# remove explicit IV
if self.version >= (3, 2):
buf = buf[blockLength:]
if len(buf) == 0:
raise TLSBadRecordMAC("No data left after IV removal")
# check padding
paddingLength = buf[-1]
if paddingLength + 1 > len(buf):
raise TLSBadRecordMAC("Invalid padding length")
paddingGood = True
totalPaddingLength = paddingLength+1
if self.version != (3, 0):
paddingBytes = buf[-totalPaddingLength:-1]
for byte in paddingBytes:
if byte != paddingLength:
paddingGood = False
if not paddingGood:
raise TLSBadRecordMAC("Invalid padding byte values")
# remove padding
buf = buf[:-totalPaddingLength]
return buf
def _decryptAndUnseal(self, recordType, buf):
"""Decrypt AEAD encrypted data"""
seqnumBytes = self._readState.getSeqNumBytes()
#AES-GCM, has an explicit variable nonce.
if "aes" or "speck" in self._readState.encContext.name:
explicitNonceLength = 8
if explicitNonceLength > len(buf):
#Publicly invalid.
raise TLSBadRecordMAC("Truncated nonce")
nonce = self._readState.fixedNonce + buf[:explicitNonceLength]
buf = buf[8:]
else:
nonce = self._readState.fixedNonce + seqnumBytes
if self._readState.encContext.tagLength > len(buf):
#Publicly invalid.
raise TLSBadRecordMAC("Truncated tag")
plaintextLen = len(buf) - self._readState.encContext.tagLength
authData = seqnumBytes + bytearray([recordType, self.version[0],
self.version[1],
plaintextLen//256,
plaintextLen%256])
buf = self._readState.encContext.open(nonce, buf, authData)
if buf is None:
raise TLSBadRecordMAC("Invalid tag, decryption failure")
return buf
def recvRecord(self):
"""
Read, decrypt and check integrity of a single record
@rtype: tuple
@return: message header and decrypted message payload
@raise TLSDecryptionFailed: when decryption of data failed
@raise TLSBadRecordMAC: when record has bad MAC or padding
@raise socket.error: when reading from socket was unsuccessful
"""
result = None
for result in self._recordSocket.recv():
if result in (0, 1):
yield result
else: break
assert result is not None
(header, data) = result
if self._readState and \
self._readState.encContext and \
self._readState.encContext.isAEAD:
data = self._decryptAndUnseal(header.type, data)
elif self.encryptThenMAC:
data = self._macThenDecrypt(header.type, data)
elif self._readState and \
self._readState.encContext and \
self._readState.encContext.isBlockCipher:
data = self._decryptThenMAC(header.type, data)
else:
data = self._decryptStreamThenMAC(header.type, data)
yield (header, Parser(data))
#
# cryptography state methods
#
def changeWriteState(self):
"""
Change the cipher state to the pending one for write operations.
This should be done only once after a call to L{calcPendingStates} was
performed and directly after sending a L{ChangeCipherSpec} message.
"""
self._writeState = self._pendingWriteState
self._pendingWriteState = ConnectionState()
def changeReadState(self):
"""
Change the cipher state to the pending one for read operations.
This should be done only once after a call to L{calcPendingStates} was
performed and directly after receiving a L{ChangeCipherSpec} message.
"""
self._readState = self._pendingReadState
self._pendingReadState = ConnectionState()
@staticmethod
def _getCipherSettings(cipherSuite):
"""Get the settings for cipher suite used"""
if cipherSuite in CipherSuite.aes256GcmSuites:
keyLength = 32
ivLength = 4
createCipherFunc = createAESGCM
elif cipherSuite in CipherSuite.aes128GcmSuites:
keyLength = 16
ivLength = 4
createCipherFunc = createAESGCM
elif cipherSuite in CipherSuite.chacha20Suites:
keyLength = 32
ivLength = 4
createCipherFunc = createCHACHA20
elif cipherSuite in CipherSuite.aes128Suites:
keyLength = 16
ivLength = 16
createCipherFunc = createAES
elif cipherSuite in CipherSuite.aes256Suites:
keyLength = 32
ivLength = 16
createCipherFunc = createAES
elif cipherSuite in CipherSuite.rc4Suites:
keyLength = 16
ivLength = 0
createCipherFunc = createRC4
elif cipherSuite in CipherSuite.tripleDESSuites:
keyLength = 24
ivLength = 8
createCipherFunc = createTripleDES
elif cipherSuite in CipherSuite.nullSuites:
keyLength = 0
ivLength = 0
createCipherFunc = None
elif cipherSuite in CipherSuite.speckSuites:
keyLength = 16
ivLength = 16
createCipherFunc = createSPECK
elif cipherSuite in CipherSuite.speck128GcmSuites:
keyLength = 16
ivLength = 4
createCipherFunc = createSPECK128GCM
elif cipherSuite in CipherSuite.speck192GcmSuites:
keyLength = 24
ivLength = 4
createCipherFunc = createSPECK192GCM
else:
raise AssertionError()
return (keyLength, ivLength, createCipherFunc)
@staticmethod
def _getMacSettings(cipherSuite):
"""Get settings for HMAC used"""
if cipherSuite in CipherSuite.aeadSuites:
macLength = 0
digestmod = None
elif cipherSuite in CipherSuite.shaSuites:
macLength = 20
digestmod = hashlib.sha1
elif cipherSuite in CipherSuite.sha256Suites:
macLength = 32
digestmod = hashlib.sha256
elif cipherSuite in CipherSuite.md5Suites:
macLength = 16
digestmod = hashlib.md5
else:
raise AssertionError()
return macLength, digestmod
@staticmethod
def _getHMACMethod(version):
"""Get the HMAC method"""
assert version in ((3, 0), (3, 1), (3, 2), (3, 3))
if version == (3, 0):
createMACFunc = createMAC_SSL
elif version in ((3, 1), (3, 2), (3, 3)):
createMACFunc = createHMAC
return createMACFunc
def _calcKeyBlock(self, cipherSuite, masterSecret, clientRandom,
serverRandom, outputLength):
"""Calculate the overall key to slice up"""
if self.version == (3, 0):
keyBlock = PRF_SSL(masterSecret,
serverRandom + clientRandom,
outputLength)
elif self.version in ((3, 1), (3, 2)):
keyBlock = PRF(masterSecret,
b"key expansion",
serverRandom + clientRandom,
outputLength)
elif self.version == (3, 3):
if cipherSuite in CipherSuite.sha384PrfSuites:
keyBlock = PRF_1_2_SHA384(masterSecret,
b"key expansion",
serverRandom + clientRandom,
outputLength)
else:
keyBlock = PRF_1_2(masterSecret,
b"key expansion",
serverRandom + clientRandom,
outputLength)
else:
raise AssertionError()
return keyBlock
def calcPendingStates(self, cipherSuite, masterSecret, clientRandom,
serverRandom, implementations):
"""Create pending states for encryption and decryption."""
keyLength, ivLength, createCipherFunc = \
self._getCipherSettings(cipherSuite)
macLength, digestmod = self._getMacSettings(cipherSuite)
if not digestmod:
createMACFunc = None
else:
createMACFunc = self._getHMACMethod(self.version)
outputLength = (macLength*2) + (keyLength*2) + (ivLength*2)
#Calculate Keying Material from Master Secret
keyBlock = self._calcKeyBlock(cipherSuite, masterSecret, clientRandom,
serverRandom, outputLength)
#Slice up Keying Material
clientPendingState = ConnectionState()
serverPendingState = ConnectionState()
parser = Parser(keyBlock)
clientMACBlock = parser.getFixBytes(macLength)
serverMACBlock = parser.getFixBytes(macLength)
clientKeyBlock = parser.getFixBytes(keyLength)
serverKeyBlock = parser.getFixBytes(keyLength)
clientIVBlock = parser.getFixBytes(ivLength)
serverIVBlock = parser.getFixBytes(ivLength)
if digestmod:
# Legacy cipher
clientPendingState.macContext = createMACFunc(
compatHMAC(clientMACBlock), digestmod=digestmod)
serverPendingState.macContext = createMACFunc(
compatHMAC(serverMACBlock), digestmod=digestmod)
if createCipherFunc is not None:
clientPendingState.encContext = \
createCipherFunc(clientKeyBlock,
clientIVBlock,
implementations)
serverPendingState.encContext = \
createCipherFunc(serverKeyBlock,
serverIVBlock,
implementations)
else:
# AEAD
clientPendingState.macContext = None
serverPendingState.macContext = None
clientPendingState.encContext = createCipherFunc(clientKeyBlock,
implementations)
serverPendingState.encContext = createCipherFunc(serverKeyBlock,
implementations)
clientPendingState.fixedNonce = clientIVBlock
serverPendingState.fixedNonce = serverIVBlock
#Assign new connection states to pending states
if self.client:
self._pendingWriteState = clientPendingState
self._pendingReadState = serverPendingState
else:
self._pendingWriteState = serverPendingState
self._pendingReadState = clientPendingState
if self.version >= (3, 2) and ivLength:
#Choose fixedIVBlock for TLS 1.1 (this is encrypted with the CBC
#residue to create the IV for each sent block)
self.fixedIVBlock = getRandomBytes(ivLength)
| ioef/tlslite-ng | tlslite/recordlayer.py | Python | lgpl-2.1 | 29,445 | 0.001834 |
from django.contrib import admin
from django.contrib.admin.filters import RelatedFieldListFilter
from .models import ClientLog, Client, Feedback
def client_id(obj):
return obj.client.externid
class AliveClientsRelatedFieldListFilter(RelatedFieldListFilter):
def __init__(self, field, request, *args, **kwargs):
field.rel.limit_choices_to = {'status': Client.STATUS_ALIVE }
super(AliveClientsRelatedFieldListFilter, self).__init__(field, request, *args, **kwargs)
class ClientLogAdmin(admin.ModelAdmin):
list_display = ('client', 'tag', 'log', 'updated')
list_filter = ('client', )
ordering = ('-updated',)
search_fields = ("client__ip", "client__externid", "log", "tag",)
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "client":
kwargs["queryset"] = Client.objects.filter(status = Client.STATUS_ALIVE)
return super(ClientLogAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
admin.site.register(ClientLog, ClientLogAdmin)
class ClientAdmin(admin.ModelAdmin):
list_display = ("status", "externid", "ip", "updated", "created", "useragent")
list_filter = ("status", "useragent", "failures", "complets")
ordering = ("status", "-updated", "-created", )
search_fields = ("ip", "useragent", "externid", )
admin.site.register(Client, ClientAdmin)
class FeedbackAdmin(admin.ModelAdmin):
list_display = ("id", "useremail", "ip", "created")
ordering = ("-id",)
admin.site.register(Feedback, FeedbackAdmin)
| ddalex/p9 | sign/admin.py | Python | mit | 1,555 | 0.009646 |
from sqlalchemy import Column, String, BigInteger
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
import time
BaseModel = declarative_base()
class Video(BaseModel):
__tablename__ = 'video'
id = Column(BigInteger, primary_key=True, autoincrement=True)
name = Column(String(80), nullable=False)
image = Column(String(200))
desc = Column(String(100))
play_num = Column(String(50))
update_num = Column(String(50))
link = Column(String(200))
score = Column(String(10))
platform = Column(String(10), nullable=False) # 来源平台
video_category = Column(String(10), nullable=False) # 视频大分类:电视剧、电影、综艺
series_region = Column(String(20)) # 电视剧地区分类:全部热播、内地、网剧、韩剧、美剧
movie_region = Column(String(20)) # 电影地区分类:全部热播、院线、内地、香港、美国
veriety_region = Column(String(20)) # 综艺分类:热门
created_at = Column(BigInteger, default=time.time)
engine = create_engine('mysql+pymysql://root:123456@localhost:3306/videoSpider?charset=utf8mb4')
BaseModel.metadata.create_all(engine)
"""
data = {
'name' : name.get_text(),
'image' : 'http:' + image.get('r-lazyload'),
'desc' : ' '.join(desc.get_text().strip().split()),
'play_number' : num.get_text(),
'update_status' : status,
'link' : link.get('href')
}
# 视频类型:电视剧、电影、综艺
Video_large_type = Enum('Video_large_type', ('Series', 'Movies', 'Variety'))
# 电视剧类型:全部热播、内地、网剧、韩剧、美剧
Series_region = Enum('Series_region', ('All', 'Local', 'Net', 'SouthKorea', 'EuropeAndAmerica'))
# 电影类型:全部热播、院线、内地、香港、美国
Movie_region = Enum('Movie_region', ('All', 'Cinemas', 'Local', 'HongKong', 'America'))
# 综艺类型:全部热播
Variety_type = Enum('Variety_type', ('Hot'))
"""
class RequestModel(object):
def __init__(self, source_url, platform, video_category, *, series_region=None, movie_region=None, veriety_region=None):
self.source_url = source_url
self.platform = platform
self.video_category = video_category
self.series_region = series_region
self.movie_region = movie_region
self.veriety_region = veriety_region
| AcerFeng/videoSpider | spider/models.py | Python | apache-2.0 | 2,458 | 0.003211 |
#!/usr/bin/env python
#coding=utf-8
import sys
sys.path.append("..")
import urllib
import myjson
from datetime import datetime, date, timedelta
import time
from define import *
from data_interface.stock_dataset import stock_dataset
class turtle(object):
"""
turtle model
"""
def get_mean(self, data, end_index, k):
if end_index < k-1:
return 0
else:
sum = 0
for num in data[end_index-k+1 : end_index+1]:
sum += num
return float(sum / (k * 1.0))
def get_max(self, data, end_index, k):
if end_index < k:
return 0
else:
tmp = data[end_index-k : end_index]
max = tmp[0]
for num in tmp:
if num > max:
max = num
return max
def get_max_date(self, dataset, end_index, k):
if end_index < k:
return 0
else:
tmp = dataset.data[end_index-k : end_index]
max = tmp[0].close_price
date = tmp[0].date
for num in tmp:
if num.close_price > max:
max = num.close_price
date = num.date
return (max, date)
def get_min(self, data, end_index, k):
if end_index < k:
return 0
else:
tmp = data[end_index-k : end_index]
min = tmp[0]
for num in tmp:
if num < min:
min = num
return min
def get_min_date(self, dataset, end_index, k):
if end_index < k:
return 0
else:
tmp = dataset.data[end_index-k : end_index]
min = tmp[0].close_price
date = tmp[0].date
for num in tmp:
if num.close_price < min:
min = num.close_price
date = num.date
return (min, date)
def get_trading_plan(self, dataset, date_str):
"""
get trading plan of 28 lundong model, return a empty map when no decision can be made
choise:
-3: not enough data, do not trade
-2: date_str error
-1: unknown problem, do not trade
0: sell all
1: sell half
2: close_price unsatisfy, do not trade
3: mean line unsatisfy, do not trade
4: buy
策略:
1. 我只将此交易模型用于小时图
2. 一般也只用于趋势性较强的大盘指数,不用于个股
3. 首先绘制10小时和100小时两根无线,然后绘制50小时最高点和最低点曲线,再加25小时内最低点曲线(如果你使用通达信,可直接复制我下面的指标源代码,记得设为图叠加)。
4. 买入条件:当且仅当10小时无线大于100小时均线的前提下,小时收盘突破此前50小时的最高点时做多。
5. 平仓条件:当小时收盘跌破此前25小时的最低点时平仓一半,跌破此前50小时最低点时全部平仓。
"""
result = {}
result["choise"] = -1
# Get stock data by date_str. If not exist, return.
data = dataset.get_data(date_str)
if data == None:
result["choise"] = -2
return result
data_index = dataset.get_data_index(date_str)
close_prices = [ item.close_price for item in dataset.data ]
result["close_price"] = close_prices[data_index]
result["10_mean"] = self.get_mean(close_prices, data_index, 10)
result["100_mean"] = self.get_mean(close_prices, data_index, 100)
result["50_max"] = self.get_max(close_prices, data_index, 50)
result["50_min"] = self.get_min(close_prices, data_index, 50)
result["25_min"] = self.get_min(close_prices, data_index, 25)
if result["10_mean"] == 0 or result["100_mean"] == 0 or result["50_max"] == 0 or result["50_min"] == 0 or result["25_min"] == 0:
result["choise"] = -3
elif result["close_price"] < result["50_min"]:
result["choise"] = 0
elif result["close_price"] < result["25_min"]:
result["choise"] = 1
elif result["close_price"] > result["50_max"]:
if result["10_mean"] < result["100_mean"]:
result["choise"] = 3
else:
result["choise"] = 4
else:
result["choise"] = 2
return result
def get_trading_plan3(self, dataset, date_str):
"""
策略 https://www.jisilu.cn/question/66127
1. 买入条件:收盘价超过60个交易日里面的盘中最高价(不是收盘价中的最高)
2. 卖出条件:收盘价低于38个交易日里面的盘中最低价
3. 其他时候维持原状。
choise:
-3: not enough data, do not trade
-2: date_str error
-1: unknown problem, do not trade
0: sell all
1: sell half
2: close_price unsatisfy, do not trade
3: mean line unsatisfy, do not trade
4: buy
"""
# Get stock data by date_str. If not exist, return.
result = {}
result["file_end_date"] = dataset.data[-2].date #dataset.data[-1].date的数据为读取文件后加入的今天的数据
result["start_buy_date"] = dataset.data[0 - BUY_DAYS].date
result["start_sell_date"] = dataset.data[0 - SELL_DAYS].date
result["date"] = date_str
result["BUY_DAYS"] = str(BUY_DAYS)
result["SELL_DAYS"] = str(SELL_DAYS)
result["choise"] = -1
result["info"] = "unknown problem, do not trade"
data = dataset.get_data(date_str)
if data == None:
result["choise"] = -2
result["info"] = "date_str error"
return result
data_index = dataset.get_data_index(date_str)
result["close_price"] = dataset.data[data_index].close_price
result["max_date"] = self.get_max_date(dataset, data_index, BUY_DAYS)
result["min_date"] = self.get_min_date(dataset, data_index, SELL_DAYS)
result["BUY_DAYS"] = str(BUY_DAYS)
result["SELL_DAYS"] = str(SELL_DAYS)
if result["close_price"] > result["max_date"][0]:
result["choise"] = 4
result["info"] = "buy"
elif result["close_price"] < result["min_date"][0]:
result["choise"] = 0
result["info"] = "sell all"
elif result["close_price"] < result["max_date"][0] and result["close_price"] > result["min_date"][0]:
result["choise"] = 2
result["info"] = "hold on"
return result
if __name__ == '__main__':
pass
| icemoon1987/stock_monitor | model/turtle.py | Python | gpl-2.0 | 6,761 | 0.00419 |
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.python.target_types import PythonLibrary, PythonTests
from pants.engine.target import BoolField
class SkipIsortField(BoolField):
alias = "skip_isort"
default = False
help = "If true, don't run isort on this target's code."
def rules():
return [
PythonLibrary.register_plugin_field(SkipIsortField),
PythonTests.register_plugin_field(SkipIsortField),
]
| benjyw/pants | src/python/pants/backend/python/lint/isort/skip_field.py | Python | apache-2.0 | 542 | 0 |
import requests
import csv
from configparser import ConfigParser
config = ConfigParser()
config.read("config.cfg")
token = config.get("auth", "token")
domain = config.get("instance", "domain")
headers = {"Authorization" : "Bearer %s" % token}
source_course_id = 311693
csv_file = ""
payload = {'migration_type': 'course_copy_importer', 'settings[source_course_id]': source_course_id}
with open(csv_file, 'rb') as courses:
coursesreader = csv.reader(courses)
for course in coursesreader:
uri = domain + "/api/v1/courses/sis_course_id:%s/content_migrations" % course
r = requests.post(uri, headers=headers,data=payload)
print r.status_code + " " + course | tylerclair/canvas_admin_scripts | course_copy_csv.py | Python | mit | 684 | 0.00731 |
from django_nose.tools import assert_equal
from pontoon.base.tests import TestCase
from pontoon.base.utils import NewlineEscapePlaceable, mark_placeables
class PlaceablesTests(TestCase):
def test_newline_escape_placeable(self):
"""Test detecting newline escape sequences"""
placeable = NewlineEscapePlaceable
assert_equal(placeable.parse(u'A string\\n')[1], placeable([u'\\n']))
assert_equal(placeable.parse(u'\\nA string')[0], placeable([u'\\n']))
assert_equal(placeable.parse(u'A\\nstring')[1], placeable([u'\\n']))
assert_equal(placeable.parse(u'A string'), None)
assert_equal(placeable.parse(u'A\nstring'), None)
def test_mark_newline_escape_placeables(self):
"""Test detecting newline escape sequences"""
assert_equal(
mark_placeables(u'A string\\n'),
u'A string<mark class="placeable" title="Escaped newline">\\n</mark>'
)
assert_equal(
mark_placeables(u'\\nA string'),
u'<mark class="placeable" title="Escaped newline">\\n</mark>A string'
)
assert_equal(
mark_placeables(u'A\\nstring'),
u'A<mark class="placeable" title="Escaped newline">\\n</mark>string'
)
assert_equal(
mark_placeables(u'A string'),
u'A string'
)
assert_equal(
mark_placeables(u'A\nstring'),
u'A\nstring'
)
| Osmose/pontoon | pontoon/base/tests/test_placeables.py | Python | bsd-3-clause | 1,460 | 0.002055 |
# Copyright (C) 2009 - TODAY Renato Lima - Akretion
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from odoo import api, fields, models
class AccountTax(models.Model):
_inherit = 'account.tax'
fiscal_tax_ids = fields.Many2many(
comodel_name='l10n_br_fiscal.tax',
relation='l10n_br_fiscal_account_tax_rel',
colunm1='account_tax_id',
colunm2='fiscal_tax_id',
string='Fiscal Taxes',
)
@api.multi
def compute_all(
self,
price_unit,
currency=None,
quantity=None,
product=None,
partner=None,
fiscal_taxes=None,
operation_line=False,
ncm=None,
nbm=None,
cest=None,
discount_value=None,
insurance_value=None,
other_costs_value=None,
freight_value=None,
fiscal_price=None,
fiscal_quantity=None,
uot=None,
icmssn_range=None
):
""" Returns all information required to apply taxes
(in self + their children in case of a tax goup).
We consider the sequence of the parent for group of taxes.
Eg. considering letters as taxes and alphabetic order
as sequence :
[G, B([A, D, F]), E, C] will be computed as [A, D, F, C, E, G]
RETURN: {
'total_excluded': 0.0, # Total without taxes
'total_included': 0.0, # Total with taxes
'taxes': [{ # One dict for each tax in self
# and their children
'id': int,
'name': str,
'amount': float,
'sequence': int,
'account_id': int,
'refund_account_id': int,
'analytic': boolean,
}]
} """
taxes_results = super().compute_all(
price_unit, currency, quantity, product, partner)
if not fiscal_taxes:
fiscal_taxes = self.env['l10n_br_fiscal.tax']
product = product or self.env['product.product']
# FIXME Should get company from document?
fiscal_taxes_results = fiscal_taxes.compute_taxes(
company=self.env.user.company_id,
partner=partner,
product=product,
price_unit=price_unit,
quantity=quantity,
uom_id=product.uom_id,
fiscal_price=fiscal_price or price_unit,
fiscal_quantity=fiscal_quantity or quantity,
uot_id=uot or product.uot_id,
ncm=ncm or product.ncm_id,
nbm=nbm or product.nbm_id,
cest=cest or product.cest_id,
discount_value=discount_value,
insurance_value=insurance_value,
other_costs_value=other_costs_value,
freight_value=freight_value,
operation_line=operation_line,
icmssn_range=icmssn_range)
account_taxes_by_domain = {}
for tax in self:
tax_domain = tax.tax_group_id.fiscal_tax_group_id.tax_domain
account_taxes_by_domain.update({tax.id: tax_domain})
for account_tax in taxes_results['taxes']:
fiscal_tax = fiscal_taxes_results.get(
account_taxes_by_domain.get(account_tax.get('id'))
)
if fiscal_tax:
tax = self.filtered(lambda t: t.id == account_tax.get('id'))
if not fiscal_tax.get('tax_include') and not tax.deductible:
taxes_results['total_included'] += fiscal_tax.get(
'tax_value')
account_tax.update({
'id': account_tax.get('id'),
'name': '{0} ({1})'.format(
account_tax.get('name'),
fiscal_tax.get('name')
),
'amount': fiscal_tax.get('tax_value'),
'base': fiscal_tax.get('base'),
'tax_include': fiscal_tax.get('tax_include'),
})
if tax.deductible:
account_tax.update({
'amount': fiscal_tax.get('tax_value', 0.0) * -1,
})
return taxes_results
| akretion/l10n-brazil | l10n_br_account/models/account_tax.py | Python | agpl-3.0 | 4,286 | 0 |
import struct
import hashlib
magic_number = 0xD9B4BEF9
block_prefix_format = 'I32s32sIII'
def read_uint1(stream):
return ord(stream.read(1))
def read_uint2(stream):
return struct.unpack('H', stream.read(2))[0]
def read_uint4(stream):
return struct.unpack('I', stream.read(4))[0]
def read_uint8(stream):
return struct.unpack('Q', stream.read(8))[0]
def read_hash32(stream):
return stream.read(32)[::-1] #reverse it since we are little endian
def read_merkle32(stream):
return stream.read(32)[::-1] #reverse it
def read_time(stream):
utctime = read_uint4(stream)
#Todo: convert to datetime object
return utctime
def read_varint(stream):
ret = read_uint1(stream)
if ret < 0xfd: #one byte int
return ret
if ret == 0xfd: #unit16_t in next two bytes
return read_uint2(stream)
if ret == 0xfe: #uint32_t in next 4 bytes
return read_uint4(stream)
if ret == 0xff: #uint42_t in next 8 bytes
return read_uint8(stream)
return -1
def get_hexstring(bytebuffer):
#return ''.join(('%x'%ord(a)) for a in bytebuffer)
return bytebuffer.encode('hex')
def find_magic_number(stream):
'''read byte stream until a magic number is found, returns None if end of stream is reached'''
while True:
byte = stream.read(1)
if not byte: return None # EOF
if (ord(byte) == 0xf9):
stream.seek(-1,1) # move back 1 byte and try to read all 4 bytes
magic = read_uint4(stream)
if (magic == 0xd9b4bef9):
return magic
class Tx_Input(object):
def __init__(self):
super(Tx_Input, self).__init__()
def parse(self, stream):
self.prevhash = read_hash32(stream)
self.prevtx_out_idx = read_uint4(stream)
self.txin_script_len = read_varint(stream)
# TODO in later modules we will convert scriptSig to its own class
self.scriptSig = stream.read(self.txin_script_len)
self.sequence_no = read_uint4(stream)
def updateTxDict(self,txDict):
'''txDict holds arrays of Tx_Input values'''
txDict['txIn_prevhash'] = txDict.get('txIn_prevhash', [])
txDict['txIn_prevhash'].append(get_hexstring(self.prevhash))
txDict['txIn_prevtx_out_idx'] = txDict.get('txIn_prevtx_out_idx', [])
txDict['txIn_prevtx_out_idx'].append(self.prevtx_out_idx)
txDict['txIn_txin_script_len'] = txDict.get('txIn_txin_script_len', [])
txDict['txIn_txin_script_len'] .append(self.txin_script_len)
txDict['txIn_scriptSig'] = txDict.get('txIn_scriptSig', [])
txDict['txIn_scriptSig'].append(get_hexstring(self.scriptSig))
txDict['txIn_sequence_no'] = txDict.get('txIn_sequence_no', [])
txDict['txIn_sequence_no'].append(self.sequence_no)
return txDict
def __str__(self):
return 'PrevHash: %s \nPrev Tx out index: %d \nTxin Script Len: %d \nscriptSig: %s \nSequence: %8x' % \
(get_hexstring(self.prevhash),
self.prevtx_out_idx,
self.txin_script_len,
get_hexstring(self.scriptSig),
self.sequence_no)
def __repr__(self):
return __str__(self)
class Tx_Output(object):
def __init__(self):
super(Tx_Output, self).__init__()
pass
def parse(self, stream):
self.value = read_uint8(stream)
self.txout_script_len = read_varint(stream)
self.scriptPubKey = stream.read(self.txout_script_len)
def updateTxDict(self,txDict):
'''txDict holds arrays of Tx_Output values'''
txDict['txOut_value'] = txDict.get('txOut_value', [])
txDict['txOut_value'].append(self.value)
txDict['txOut_script_len'] = txDict.get('txOut_script_len', [])
txDict['txOut_script_len'].append(self.txout_script_len)
txDict['txOut_scriptPubKey'] = txDict.get('txOut_scriptPubKey', [])
txDict['txOut_scriptPubKey'].append(get_hexstring(self.scriptPubKey))
return txDict
def __str__(self):
return 'Value (satoshis): %d (%f btc)\nTxout Script Len: %d\nscriptPubKey: %s' %\
(self.value, (1.0*self.value)/100000000.00,
self.txout_script_len,
get_hexstring(self.scriptPubKey))
def __repr__(self):
return __str__(self)
class Transaction(object):
"""Holds one Transaction as part of a block"""
def __init__(self):
super(Transaction, self).__init__()
self.version = None
self.in_cnt = None
self.inputs = None
self.out_cnt = None
self.outputs = None
self.lock_time = None
def parse(self,stream):
#TODO: error checking
self.version = read_uint4(stream)
self.in_cnt = read_varint(stream)
self.inputs = []
if self.in_cnt > 0:
for i in range(0, self.in_cnt):
input = Tx_Input()
input.parse(stream)
self.inputs.append(input)
self.out_cnt = read_varint(stream)
self.outputs = []
if self.out_cnt > 0:
for i in range(0, self.out_cnt):
output = Tx_Output()
output.parse(stream)
self.outputs.append(output)
self.lock_time = read_uint4(stream)
def updateTxDict(self,txDict):
txDict['tx_version'] = self.version
txDict['in_cnt'] = self.in_cnt
txDict['out_cnt'] = self.out_cnt
txDict['lock_time'] = self.lock_time
for i in range(self.in_cnt):
txDict = self.inputs[i].updateTxDict(txDict)
for i in range(self.out_cnt):
txDict = self.outputs[i].updateTxDict(txDict)
return txDict
def __str__(self):
s = 'Version: %d\nInputs count: %d\n---Inputs---\n%s\nOutputs count: %d\n---Outputs---\n%s\nLock_time:%8x' % (self.version, self.in_cnt,
'\n'.join(str(i) for i in self.inputs),
self.out_cnt,
'\n'.join(str(o) for o in self.outputs),
self.lock_time)
return s
class BlockHeader(object):
"""BlockHeader represents the header of the block"""
def __init__(self):
super( BlockHeader, self).__init__()
self.version = None
self.prevhash = None
self.merklehash = None
self.time = None
self.bits = None
self.nonce = None
self.blockprefix = None
self.blockhash = None
def parse(self, stream):
#TODO: error checking
self.version = read_uint4(stream)
self.prevhash = read_hash32(stream)
self.merklehash = read_merkle32(stream)
self.time = read_time(stream)
self.bits = read_uint4(stream)
self.nonce = read_uint4(stream)
# construct the prefix and hash
self.blockprefix = ( struct.pack("<L", self.version) + self.prevhash[::-1] + \
self.merklehash[::-1] + struct.pack("<LLL", self.time, self.bits, self.nonce))
self.blockhash = hashlib.sha256(hashlib.sha256(self.blockprefix).digest()).digest()[::-1]
def updateTxDict(self, txDict):
txDict['version'] = self.version
txDict['prevhash'] = get_hexstring(self.prevhash)
txDict['merklehash'] = get_hexstring(self.merklehash)
txDict['time'] = self.time
txDict['bits'] = self.bits
txDict['nonce'] = self.nonce
txDict['blockprefix'] = get_hexstring(self.blockprefix)
txDict['blockhash'] = get_hexstring(self.blockhash)
return txDict
def __str__(self):
return "\n\t\tVersion: %d \n\t\tPreviousHash: %s \n\t\tMerkle: %s \n\t\tTime: %8x \n\t\tBits: %8x \n\t\tNonce: %8x \n\t\tPrefix: %s \n\t\tBlockHash: %s \n\t\t" % (self.version, \
get_hexstring(self.prevhash), \
get_hexstring(self.merklehash), \
self.time, \
self.bits, \
self.nonce, \
get_hexstring(self.blockprefix), \
get_hexstring(self.blockhash))
def __repr__(self):
return __str__(self)
class Block(object):
"""A block to be parsed from file"""
def __init__(self):
self.magic_no = -1
self.blocksize = 0
self.blockheader = None
self.transaction_cnt = 0
self.transactions = None
def parseBlock(self, bf):
self.magic_no = find_magic_number(bf)
if self.magic_no != None:
self.blocksize = read_uint4(bf)
self.blockheader = BlockHeader()
self.blockheader.parse(bf)
self.transaction_cnt = read_varint(bf)
self.transactions = []
#print 'List of transactions'
for i in range(0, self.transaction_cnt):
tx = Transaction()
tx.parse(bf)
self.transactions.append(tx)
def printBlock(self):
print 'magic_no:\t0x%8x' % self.magic_no
print 'size: \t%u bytes' % self.blocksize
print 'Block header:\t%s' % self.blockheader
print 'Transactions: \t%d' % self.transaction_cnt
for i in range(0, self.transaction_cnt):
print '='*50
print ' TX NUMBER: %d' % (i+1)
print '='*50
print self.transactions[i]
print '\n'
def updateTxDict(self,idx,txDict):
'''Return data for a specific transaction as a dict'''
'''Each transaction record will also contain all information about the block as well'''
txDict['magic_no'] = self.magic_no
txDict['blocksize'] = self.blocksize
txDict['transaction_cnt'] = self.transaction_cnt
txDict = self.blockheader.updateTxDict(txDict)
txDict = self.transactions[idx].updateTxDict(txDict)
return txDict
def getBlockHash(self):
return get_hexstring(self.blockheader.blockhash)
def getBlockPrevHash(self):
return get_hexstring(self.blockheader.prevhash)
def getBlockDifficulty(self):
return self.blockheader.bits
def getNumTxs(self):
return self.transaction_cnt
def parseBlockBytes(bytestream):
blocks = []
count = 0;
while True:
curBlock = Block()
curBlock.parseBlock(bytestream)
if (curBlock.blocksize == 0):
break
else:
blocks.append(curBlock)
return blocks
def parseBlockFile(blockfile):
with open(blockfile, 'rb') as bf:
blocks = parseBlockBytes(bf)
return blocks
def printBlockFile(blockfile):
print 'Parsing block file: %s\n' % blockfile
blocks = parseBlockFile(blockfile)
count = 0;
for blk in blocks:
count = count + 1
print("Block Count: " + str(count))
blk.printBlock()
if __name__ == "__main__":
import sys
usage = "Usage: python {0} "
if len(sys.argv) < 2:
print usage.format(sys.argv[0])
else:
parseBlockFile(sys.argv[1])
| jkthompson/block-chain-analytics | block.py | Python | mit | 10,671 | 0.023803 |
import json
import logging
import webapp2
from datetime import datetime
from google.appengine.ext import ndb
from controllers.api.api_base_controller import ApiBaseController
from database.event_query import EventListQuery
from helpers.award_helper import AwardHelper
from helpers.district_helper import DistrictHelper
from helpers.event_insights_helper import EventInsightsHelper
from helpers.model_to_dict import ModelToDict
from models.event import Event
class ApiEventController(ApiBaseController):
CACHE_KEY_FORMAT = "apiv2_event_controller_{}" # (event_key)
CACHE_VERSION = 4
CACHE_HEADER_LENGTH = 60 * 60
def __init__(self, *args, **kw):
super(ApiEventController, self).__init__(*args, **kw)
self.event_key = self.request.route_kwargs["event_key"]
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(self.event_key)
@property
def _validators(self):
return [("event_id_validator", self.event_key)]
def _set_event(self, event_key):
self.event = Event.get_by_id(event_key)
if self.event is None:
self._errors = json.dumps({"404": "%s event not found" % self.event_key})
self.abort(404)
def _track_call(self, event_key):
self._track_call_defer('event', event_key)
def _render(self, event_key):
self._set_event(event_key)
event_dict = ModelToDict.eventConverter(self.event)
return json.dumps(event_dict, ensure_ascii=True)
class ApiEventTeamsController(ApiEventController):
CACHE_KEY_FORMAT = "apiv2_event_teams_controller_{}" # (event_key)
CACHE_VERSION = 3
CACHE_HEADER_LENGTH = 60 * 60 * 24
def __init__(self, *args, **kw):
super(ApiEventTeamsController, self).__init__(*args, **kw)
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(self.event_key)
def _track_call(self, event_key):
self._track_call_defer('event/teams', event_key)
def _render(self, event_key):
self._set_event(event_key)
teams = filter(None, self.event.teams)
team_dicts = [ModelToDict.teamConverter(team) for team in teams]
return json.dumps(team_dicts, ensure_ascii=True)
class ApiEventMatchesController(ApiEventController):
CACHE_KEY_FORMAT = "apiv2_event_matches_controller_{}" # (event_key)
CACHE_VERSION = 2
CACHE_HEADER_LENGTH = 61
def __init__(self, *args, **kw):
super(ApiEventMatchesController, self).__init__(*args, **kw)
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(self.event_key)
def _track_call(self, event_key):
self._track_call_defer('event/matches', event_key)
def _render(self, event_key):
self._set_event(event_key)
matches = self.event.matches
match_dicts = [ModelToDict.matchConverter(match) for match in matches]
return json.dumps(match_dicts, ensure_ascii=True)
class ApiEventStatsController(ApiEventController):
CACHE_KEY_FORMAT = "apiv2_event_stats_controller_{}" # (event_key)
CACHE_VERSION = 5
CACHE_HEADER_LENGTH = 61
def __init__(self, *args, **kw):
super(ApiEventStatsController, self).__init__(*args, **kw)
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(self.event_key)
def _track_call(self, event_key):
self._track_call_defer('event/stats', event_key)
def _render(self, event_key):
self._set_event(event_key)
stats = {}
matchstats = self.event.matchstats
if matchstats:
stats.update(matchstats)
year_specific = EventInsightsHelper.calculate_event_insights(self.event.matches, self.event.year)
if year_specific:
stats['year_specific'] = year_specific
return json.dumps(stats)
class ApiEventRankingsController(ApiEventController):
CACHE_KEY_FORMAT = "apiv2_event_rankings_controller_{}" # (event_key)
CACHE_VERSION = 1
CACHE_HEADER_LENGTH = 61
def __init__(self, *args, **kw):
super(ApiEventRankingsController, self).__init__(*args, **kw)
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(self.event_key)
def _track_call(self, event_key):
self._track_call_defer('event/rankings', event_key)
def _render(self, event_key):
self._set_event(event_key)
ranks = json.dumps(Event.get_by_id(event_key).rankings)
if ranks is None or ranks == 'null':
return '[]'
else:
return ranks
class ApiEventAwardsController(ApiEventController):
CACHE_KEY_FORMAT = "apiv2_event_awards_controller_{}" # (event_key)
CACHE_VERSION = 4
CACHE_HEADER_LENGTH = 60 * 60
def __init__(self, *args, **kw):
super(ApiEventAwardsController, self).__init__(*args, **kw)
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(self.event_key)
def _track_call(self, event_key):
self._track_call_defer('event/awards', event_key)
def _render(self,event_key):
self._set_event(event_key)
award_dicts = [ModelToDict.awardConverter(award) for award in AwardHelper.organizeAwards(self.event.awards)]
return json.dumps(award_dicts, ensure_ascii=True)
class ApiEventDistrictPointsController(ApiEventController):
CACHE_KEY_FORMAT = "apiv2_event_district_points_controller_{}" # (event_key)
CACHE_VERSION = 1
CACHE_HEADER_LENGTH = 61
def __init__(self, *args, **kw):
super(ApiEventDistrictPointsController, self).__init__(*args, **kw)
self.partial_cache_key = self.CACHE_KEY_FORMAT.format(self.event_key)
def _track_call(self, event_key):
self._track_call_defer('event/district_points', event_key)
def _render(self, event_key):
self._set_event(event_key)
points = DistrictHelper.calculate_event_points(self.event)
return json.dumps(points, ensure_ascii=True)
class ApiEventListController(ApiBaseController):
CACHE_KEY_FORMAT = "apiv2_event_list_controller_{}" # (year)
CACHE_VERSION = 2
CACHE_HEADER_LENGTH = 60 * 60 * 24
def __init__(self, *args, **kw):
super(ApiEventListController, self).__init__(*args, **kw)
self.year = int(self.request.route_kwargs.get("year") or datetime.now().year)
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(self.year)
@property
def _validators(self):
return []
def _track_call(self, *args, **kw):
self._track_call_defer('event/list', self.year)
def _render(self, year=None):
if self.year < 1992 or self.year > datetime.now().year + 1:
self._errors = json.dumps({"404": "No events found for %s" % self.year})
self.abort(404)
events = EventListQuery(self.year).fetch()
event_list = [ModelToDict.eventConverter(event) for event in events]
return json.dumps(event_list, ensure_ascii=True)
| synth3tk/the-blue-alliance | controllers/api/api_event_controller.py | Python | mit | 6,883 | 0.001017 |
"""
Check the measured process sizes. If we are on a platform which supports
multiple measuring facilities (e.g. Linux), check if the reported sizes match.
This should help to protect against scaling errors (e.g. Byte vs KiB) or using
the wrong value for a different measure (e.g. resident in physical memory vs
virtual memory size).
"""
import sys
import unittest
from unittest import mock
from pympler import process
class ProcessMemoryTests(unittest.TestCase):
def _match_sizes(self, pi1, pi2, ignore=[]):
"""
Match sizes by comparing each set field. Process size may change
inbetween two measurements.
"""
if pi1.available and pi2.available:
for arg in ('vsz', 'rss', 'data_segment', 'shared_segment',
'stack_segment', 'code_segment'):
if arg in ignore:
continue
size1 = getattr(pi1, arg)
size2 = getattr(pi2, arg)
if size1 and size2:
delta = abs(size1 - size2)
# Allow for a difference of the size of two pages or 5%
if delta > pi1.pagesize * 2 and delta > size1 * 0.05:
self.fail("%s mismatch: %d != %d" % (arg, size1, size2))
if pi1.pagefaults and pi2.pagefaults:
# If both records report pagefaults compare the reported
# number. If a pagefault happens after taking the first
# snapshot and before taking the second the latter will show a
# higher pagefault number. In that case take another snapshot
# with the first variant and check it's now reporting a higher
# number as well. We assume pagefaults statistics are
# monotonic.
if pi1.pagefaults < pi2.pagefaults:
pi1.update()
if pi1.pagefaults < pi2.pagefaults:
pf1 = pi1.pagefaults
pf2 = pi2.pagefaults
self.fail("Pagefault mismatch: %d != %d" % (pf1, pf2))
else:
self.assertEqual(pi1.pagefaults, pi2.pagefaults)
if pi1.pagesize and pi2.pagesize:
self.assertEqual(pi1.pagesize, pi2.pagesize)
def test_ps_vs_proc_sizes(self):
'''Test process sizes match: ps util vs /proc/self/stat
'''
psinfo = process._ProcessMemoryInfoPS()
procinfo = process._ProcessMemoryInfoProc()
self._match_sizes(psinfo, procinfo)
def test_ps_vs_getrusage(self):
'''Test process sizes match: ps util vs getrusage
'''
psinfo = process._ProcessMemoryInfoPS()
try:
resinfo = process._ProcessMemoryInfoResource()
except AttributeError:
pass
else:
self._match_sizes(psinfo, resinfo, ignore=['rss'])
if psinfo.available and resinfo.available:
self.assertTrue(resinfo.rss >= psinfo.rss)
def test_proc_vs_getrusage(self):
'''Test process sizes match: /proc/self/stat util vs getrusage
'''
procinfo = process._ProcessMemoryInfoProc()
try:
resinfo = process._ProcessMemoryInfoResource()
except AttributeError:
pass
else:
self._match_sizes(procinfo, resinfo, ignore=['rss'])
if procinfo.available and resinfo.available:
self.assertTrue(resinfo.rss >= procinfo.rss)
def test_get_current_threads(self):
'''Test thread info is extracted.'''
tinfos = process.get_current_threads()
for tinfo in tinfos:
self.assertEqual(type(tinfo.ident), int)
self.assertEqual(type(tinfo.name), type(''))
self.assertEqual(type(tinfo.daemon), type(True))
self.assertNotEqual(tinfo.ident, 0)
def test_proc(self):
'''Test reading proc stats with mock data.'''
mock_stat = mock.mock_open(read_data='22411 (cat) R 22301 22411 22301 34818 22411 4194304 82 0 0 0 0 0 0 0 20 0 1 0 709170 8155136 221 18446744073709551615 94052544688128 94052544719312 140729623469552 0 0 0 0 0 0 0 0 0 17 6 0 0 0 0 0 94052546816624 94052546818240 94052566347776 140729623473446 140729623473466 140729623473466 140729623478255 0')
mock_status = mock.mock_open(read_data='Name: cat\n\nVmData: 2 kB\nMultiple colons: 1:1')
with mock.patch('builtins.open', new_callable=mock.mock_open) as mock_file:
mock_file.side_effect = [mock_stat.return_value, mock_status.return_value]
procinfo = process._ProcessMemoryInfoProc()
self.assertTrue(procinfo.available)
self.assertEqual(procinfo.vsz, 8155136)
self.assertEqual(procinfo.data_segment, 2048)
if __name__ == "__main__":
suite = unittest.TestSuite()
tclasses = [ ProcessMemoryTests, ]
for tclass in tclasses:
names = unittest.getTestCaseNames(tclass, 'test_')
suite.addTests(map(tclass, names))
if not unittest.TextTestRunner().run(suite).wasSuccessful():
sys.exit(1)
| pympler/pympler | test/test_process.py | Python | apache-2.0 | 5,151 | 0.002136 |
"""Collection of fixtures and functions for the HomeKit tests."""
from unittest.mock import patch
def patch_debounce():
"""Return patch for debounce method."""
return patch(
"homeassistant.components.homekit.accessories.debounce",
lambda f: lambda *args, **kwargs: f(*args, **kwargs),
)
| fbradyirl/home-assistant | tests/components/homekit/common.py | Python | apache-2.0 | 317 | 0 |
# python-jinjatools
#
# Various tools for Jinja2,
# including new filters and tests based on python-moretools,
# a JinjaLoader class for Django,
# and a simple JinjaBuilder class for SCons.
#
# Copyright (C) 2011-2015 Stefan Zimmermann <zimmermann.code@gmail.com>
#
# python-jinjatools is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# python-jinjatools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with python-jinjatools. If not, see <http://www.gnu.org/licenses/>.
all = ['Environment']
from itertools import chain
import jinja2
class Environment(jinja2.Environment):
def __init__(self, filters={}, tests={}, globals={}, **kwargs):
jinja2.Environment.__init__(self, **kwargs)
morefilters = __import__('jinjatools.filters').filters.filters
for name, func in chain(morefilters.items(), filters.items()):
self.filters[name] = func
for name, func in tests.items():
self.tests[name] = func
for name, value in globals.items():
self.globals[name] = value
# from .filters import filters as morefilters
| userzimmermann/python-jinjatools | jinjatools/env.py | Python | gpl-3.0 | 1,555 | 0.001286 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, Steve Gargan <steve.gargan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
module: consul_session
short_description: "manipulate consul sessions"
description:
- allows the addition, modification and deletion of sessions in a consul
cluster. These sessions can then be used in conjunction with key value pairs
to implement distributed locks. In depth documentation for working with
sessions can be found here http://www.consul.io/docs/internals/sessions.html
requirements:
- "python >= 2.6"
- python-consul
- requests
version_added: "2.0"
author: "Steve Gargan @sgargan"
options:
state:
description:
- whether the session should be present i.e. created if it doesn't
exist, or absent, removed if present. If created, the ID for the
session is returned in the output. If absent, the name or ID is
required to remove the session. Info for a single session, all the
sessions for a node or all available sessions can be retrieved by
specifying info, node or list for the state; for node or info, the
node name or session id is required as parameter.
required: false
choices: ['present', 'absent', 'info', 'node', 'list']
default: present
name:
description:
- the name that should be associated with the session. This is opaque
to Consul and not required.
required: false
default: None
delay:
description:
- the optional lock delay that can be attached to the session when it
is created. Locks for invalidated sessions ar blocked from being
acquired until this delay has expired. Durations are in seconds
default: 15
required: false
node:
description:
- the name of the node that with which the session will be associated.
by default this is the name of the agent.
required: false
default: None
datacenter:
description:
- name of the datacenter in which the session exists or should be
created.
required: false
default: None
checks:
description:
- a list of checks that will be used to verify the session health. If
all the checks fail, the session will be invalidated and any locks
associated with the session will be release and can be acquired once
the associated lock delay has expired.
required: false
default: None
host:
description:
- host of the consul agent defaults to localhost
required: false
default: localhost
port:
description:
- the port on which the consul agent is running
required: false
default: 8500
scheme:
description:
- the protocol scheme on which the consul agent is running
required: false
default: http
version_added: "2.1"
validate_certs:
description:
- whether to verify the tls certificate of the consul agent
required: false
default: True
version_added: "2.1"
behavior:
description:
- the optional behavior that can be attached to the session when it
is created. This can be set to either ‘release’ or ‘delete’. This
controls the behavior when a session is invalidated.
default: release
required: false
version_added: "2.2"
"""
EXAMPLES = '''
- name: register basic session with consul
consul_session:
name: session1
- name: register a session with an existing check
consul_session:
name: session_with_check
checks:
- existing_check_name
- name: register a session with lock_delay
consul_session:
name: session_with_delay
delay: 20s
- name: retrieve info about session by id
consul_session: id=session_id state=info
- name: retrieve active sessions
consul_session: state=list
'''
try:
import consul
from requests.exceptions import ConnectionError
python_consul_installed = True
except ImportError:
python_consul_installed = False
def execute(module):
state = module.params.get('state')
if state in ['info', 'list', 'node']:
lookup_sessions(module)
elif state == 'present':
update_session(module)
else:
remove_session(module)
def lookup_sessions(module):
datacenter = module.params.get('datacenter')
state = module.params.get('state')
consul_client = get_consul_api(module)
try:
if state == 'list':
sessions_list = consul_client.session.list(dc=datacenter)
#ditch the index, this can be grabbed from the results
if sessions_list and sessions_list[1]:
sessions_list = sessions_list[1]
module.exit_json(changed=True,
sessions=sessions_list)
elif state == 'node':
node = module.params.get('node')
if not node:
module.fail_json(
msg="node name is required to retrieve sessions for node")
sessions = consul_client.session.node(node, dc=datacenter)
module.exit_json(changed=True,
node=node,
sessions=sessions)
elif state == 'info':
session_id = module.params.get('id')
if not session_id:
module.fail_json(
msg="session_id is required to retrieve indvidual session info")
session_by_id = consul_client.session.info(session_id, dc=datacenter)
module.exit_json(changed=True,
session_id=session_id,
sessions=session_by_id)
except Exception as e:
module.fail_json(msg="Could not retrieve session info %s" % e)
def update_session(module):
name = module.params.get('name')
delay = module.params.get('delay')
checks = module.params.get('checks')
datacenter = module.params.get('datacenter')
node = module.params.get('node')
behavior = module.params.get('behavior')
consul_client = get_consul_api(module)
try:
session = consul_client.session.create(
name=name,
behavior=behavior,
node=node,
lock_delay=delay,
dc=datacenter,
checks=checks
)
module.exit_json(changed=True,
session_id=session,
name=name,
behavior=behavior,
delay=delay,
checks=checks,
node=node)
except Exception as e:
module.fail_json(msg="Could not create/update session %s" % e)
def remove_session(module):
session_id = module.params.get('id')
if not session_id:
module.fail_json(msg="""A session id must be supplied in order to
remove a session.""")
consul_client = get_consul_api(module)
try:
consul_client.session.destroy(session_id)
module.exit_json(changed=True,
session_id=session_id)
except Exception as e:
module.fail_json(msg="Could not remove session with id '%s' %s" % (
session_id, e))
def get_consul_api(module):
return consul.Consul(host=module.params.get('host'),
port=module.params.get('port'))
def test_dependencies(module):
if not python_consul_installed:
module.fail_json(msg="python-consul required for this module. "\
"see http://python-consul.readthedocs.org/en/latest/#installation")
def main():
argument_spec = dict(
checks=dict(default=None, required=False, type='list'),
delay=dict(required=False,type='int', default='15'),
behavior=dict(required=False,type='str', default='release',
choices=['release', 'delete']),
host=dict(default='localhost'),
port=dict(default=8500, type='int'),
scheme=dict(required=False, default='http'),
validate_certs=dict(required=False, default=True),
id=dict(required=False),
name=dict(required=False),
node=dict(required=False),
state=dict(default='present',
choices=['present', 'absent', 'info', 'node', 'list']),
datacenter=dict(required=False)
)
module = AnsibleModule(argument_spec, supports_check_mode=False)
test_dependencies(module)
try:
execute(module)
except ConnectionError as e:
module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (
module.params.get('host'), module.params.get('port'), str(e)))
except Exception as e:
module.fail_json(msg=str(e))
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| CenturylinkTechnology/ansible-modules-extras | clustering/consul_session.py | Python | gpl-3.0 | 9,707 | 0.001959 |
"""Tests for asyncio/sslproto.py."""
try:
import ssl
except ImportError:
ssl = None
import trollius as asyncio
from trollius import ConnectionResetError
from trollius import sslproto
from trollius import test_utils
from trollius.test_utils import mock
from trollius.test_utils import unittest
@unittest.skipIf(ssl is None, 'No ssl module')
class SslProtoHandshakeTests(test_utils.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
self.set_event_loop(self.loop)
def ssl_protocol(self, waiter=None):
sslcontext = test_utils.dummy_ssl_context()
app_proto = asyncio.Protocol()
proto = sslproto.SSLProtocol(self.loop, app_proto, sslcontext, waiter)
self.addCleanup(proto._app_transport.close)
return proto
def connection_made(self, ssl_proto, do_handshake=None):
transport = mock.Mock()
sslpipe = mock.Mock()
sslpipe.shutdown.return_value = b''
if do_handshake:
sslpipe.do_handshake.side_effect = do_handshake
else:
def mock_handshake(callback):
return []
sslpipe.do_handshake.side_effect = mock_handshake
with mock.patch('trollius.sslproto._SSLPipe', return_value=sslpipe):
ssl_proto.connection_made(transport)
def test_cancel_handshake(self):
# Python issue #23197: cancelling an handshake must not raise an
# exception or log an error, even if the handshake failed
waiter = asyncio.Future(loop=self.loop)
ssl_proto = self.ssl_protocol(waiter)
handshake_fut = asyncio.Future(loop=self.loop)
def do_handshake(callback):
exc = Exception()
callback(exc)
handshake_fut.set_result(None)
return []
waiter.cancel()
self.connection_made(ssl_proto, do_handshake)
with test_utils.disable_logger():
self.loop.run_until_complete(handshake_fut)
def test_eof_received_waiter(self):
waiter = asyncio.Future(loop=self.loop)
ssl_proto = self.ssl_protocol(waiter)
self.connection_made(ssl_proto)
ssl_proto.eof_received()
test_utils.run_briefly(self.loop)
self.assertIsInstance(waiter.exception(), ConnectionResetError)
if __name__ == '__main__':
unittest.main()
| haypo/trollius | tests/test_sslproto.py | Python | apache-2.0 | 2,350 | 0 |
import tensorflow as tf
from tensorflow.contrib import slim as slim
from avb.ops import *
import math
def encoder(x, config, is_training=True):
df_dim = config['df_dim']
z_dim = config['z_dim']
a_dim = config['iaf_a_dim']
# Center x at 0
x = 2*x - 1
net = flatten_spatial(x)
net = slim.fully_connected(net, 300, activation_fn=tf.nn.softplus, scope="fc_0")
net = slim.fully_connected(net, 300, activation_fn=tf.nn.softplus, scope="fc_1")
zmean = slim.fully_connected(net, z_dim, activation_fn=None)
log_zstd = slim.fully_connected(net, z_dim, activation_fn=None)
a = slim.fully_connected(net, a_dim, activation_fn=None)
return zmean, log_zstd, a
| LMescheder/AdversarialVariationalBayes | avb/iaf/models/full0.py | Python | mit | 698 | 0.004298 |
"""
2015 gupon.jp
Connector for C4D Python Generator
"""
import c4d, math, itertools, random
from c4d.modules import mograph as mo
#userdata id
ID_SPLINE_TYPE = 2
ID_SPLINE_CLOSED = 4
ID_SPLINE_INTERPOLATION = 5
ID_SPLINE_SUB = 6
ID_SPLINE_ANGLE = 8
ID_SPLINE_MAXIMUMLENGTH = 9
ID_USE_SCREEN_DIST = 10
ID_USE_MAXSEG = 15
ID_MAXSEG_NUM = 13
ID_USE_CENTER = 19
ID_CENTER_OBJ = 18
class Point:
def __init__(self, p):
self.world = p
self.screen = c4d.Vector(0)
def calc2D(self, bd):
self.screen = bd.WS(self.world)
self.screen.z = 0
class PointGroup:
def __init__(self):
self.points = []
def AddPoint(self, point):
self.points.append(Point(point))
def Make2DPoints(self):
bd = doc.GetRenderBaseDraw()
for point in self.points:
point.calc2D(bd)
def MakeCombsWith(self, target):
combs = []
for pA in self.points:
for pB in target.points:
combs.append([pA, pB])
return combs
def MakeCombsInOrder(self):
combs = []
for i,pA in enumerate(self.points):
if i == len(self.points)-1:
combs.append([pA, self.points[0]])
else:
combs.append([pA, self.points[i+1]])
return combs
def GetPoint(self, index):
return self.points[index]
def GetAllPoints(self):
return self.points
def GetNumPoints(self):
return len(self.points)
def SetSplineGUI():
UD = op.GetUserDataContainer()
intermediatePoints = op[c4d.ID_USERDATA, ID_SPLINE_INTERPOLATION]
for id, bc in UD:
if id[1].id == ID_SPLINE_SUB:
if intermediatePoints == c4d.SPLINEOBJECT_INTERPOLATION_NATURAL \
or intermediatePoints == c4d.SPLINEOBJECT_INTERPOLATION_UNIFORM:
bc[c4d.DESC_HIDE] = False
else:
bc[c4d.DESC_HIDE] = True
if id[1].id == ID_SPLINE_ANGLE:
if intermediatePoints == c4d.SPLINEOBJECT_INTERPOLATION_ADAPTIVE \
or intermediatePoints == c4d.SPLINEOBJECT_INTERPOLATION_SUBDIV:
bc[c4d.DESC_HIDE] = False
else:
bc[c4d.DESC_HIDE] = True
if id[1].id == ID_SPLINE_MAXIMUMLENGTH:
if intermediatePoints == c4d.SPLINEOBJECT_INTERPOLATION_SUBDIV:
bc[c4d.DESC_HIDE] = False
else:
bc[c4d.DESC_HIDE] = True
if id[1].id == ID_MAXSEG_NUM:
bc[c4d.DESC_HIDE] = not op[c4d.ID_USERDATA, ID_USE_MAXSEG]
if id[1].id == ID_CENTER_OBJ:
bc[c4d.DESC_HIDE] = not op[c4d.ID_USERDATA, ID_USE_CENTER]
op.SetUserDataContainer(id, bc)
def SetSplineAttributes(obj):
obj[c4d.SPLINEOBJECT_TYPE] = op[c4d.ID_USERDATA, ID_SPLINE_TYPE]
obj[c4d.SPLINEOBJECT_CLOSED] = op[c4d.ID_USERDATA, ID_SPLINE_CLOSED]
obj[c4d.SPLINEOBJECT_INTERPOLATION] = op[c4d.ID_USERDATA, ID_SPLINE_INTERPOLATION]
obj[c4d.SPLINEOBJECT_SUB] = op[c4d.ID_USERDATA, ID_SPLINE_SUB]
obj[c4d.SPLINEOBJECT_ANGLE] = op[c4d.ID_USERDATA, ID_SPLINE_ANGLE]
obj[c4d.SPLINEOBJECT_MAXIMUMLENGTH] = op[c4d.ID_USERDATA, ID_SPLINE_MAXIMUMLENGTH]
obj.Message(c4d.MSG_UPDATE)
def GetPointsFromObjects(targetList):
step = op[c4d.ID_USERDATA, 12]
# add every points to list
pointGroups = []
baseMg = op.GetMg()
for target in targetList:
if target != None :
group = PointGroup()
moData = mo.GeGetMoData(target)
if moData==None:
group.AddPoint(target.GetMg().off * ~baseMg)
else:
if not moData.GetCount():
continue
moList = moData.GetArray(c4d.MODATA_MATRIX)
clonerMg = target.GetMg()
for i,data in enumerate(moList):
if i % step == 0:
group.AddPoint(data.off * clonerMg * ~baseMg)
pointGroups.append(group)
return pointGroups
def SetCombinations(pointGroups, obj):
bd = doc.GetRenderBaseDraw()
maxDist = op[c4d.ID_USERDATA, 1]
excludeSame = op[c4d.ID_USERDATA, 11]
maxSegNum = op[c4d.ID_USERDATA, 13]
useMaxSeg = op[c4d.ID_USERDATA, 15]
useCenter = op[c4d.ID_USERDATA, ID_USE_CENTER]
useScreenDist = op[c4d.ID_USERDATA, 10]
if useScreenDist:
for group in pointGroups:
group.Make2DPoints()
frame = bd.GetSafeFrame()
baseLength = frame["cr"] - frame["cl"]
maxDist = baseLength * maxDist/1000
_combs = []
inOrder = False
# if inOrder:
# for group in pointGroups:
# _combs = _combs + group.MakeCombsInOrder()
if useCenter:
target = op[c4d.ID_USERDATA, ID_CENTER_OBJ]
if target:
pA = Point(target.GetMg().off * ~op.GetMg())
for group in pointGroups:
for pB in group.GetAllPoints():
_combs.append([pA, pB])
else:
print "no target found"
return
else:
if excludeSame:
numGroups = len(pointGroups)
for i in range(numGroups-1):
groupA = pointGroups[i]
for j in range(i+1, numGroups):
groupB = pointGroups[j]
_combs = _combs + groupA.MakeCombsWith(groupB)
else:
allPoints = []
for group in pointGroups:
allPoints = allPoints + group.GetAllPoints()
numPoints = len(allPoints)
for i in range(numPoints-1):
for j in range(i+1, numPoints):
_combs.append([allPoints[i], allPoints[j]])
combs = []
for comb in _combs:
v0 = comb[0].screen if useScreenDist else comb[0].world
v1 = comb[1].screen if useScreenDist else comb[1].world
if c4d.Vector(v1 - v0).GetLength() < maxDist:
combs.append(comb)
random.shuffle(combs)
obj.ResizeObject(len(combs) * 2)
for i, comb in enumerate(combs):
a = comb[0].world
b = comb[1].world
addP = True
if useMaxSeg:
if maxSegNum:
acnt = 0
bcnt = 0
for p in obj.GetAllPoints():
if p == a: acnt += 1
if p == b: bcnt += 1
if acnt >= maxSegNum or bcnt >= maxSegNum:
addP = False
break
else:
addP = False
if addP:
obj.SetPoint(i * 2 + 0, a)
obj.SetPoint(i * 2 + 1, b)
obj.MakeVariableTag(c4d.Tsegment, len(combs))
for i in range(len(combs)):
obj.SetSegment(i, 2, False)
def main():
random.seed(100)
obj = c4d.BaseObject(c4d.Ospline)
targetListData = op[c4d.ID_USERDATA, 3]
numTargets = targetListData.GetObjectCount()
if numTargets < 1:
return obj
targetList = []
for i in range(numTargets):
targetList.append(targetListData.ObjectFromIndex(doc, i))
pointGroups = GetPointsFromObjects(targetList)
if len(pointGroups) < 1:
return obj
SetCombinations(pointGroups, obj)
SetSplineGUI()
SetSplineAttributes(obj)
return obj | gupon/ConnectorC4D | ConnectorC4D.py | Python | mit | 6,128 | 0.046671 |
import pandas as pd
import numpy as np
from dateutil.relativedelta import relativedelta
#### Utilities
def get_first_visit_date(data_patient):
''' Determines the first visit for a given patient'''
#IDEA Could be parallelized in Dask
data_patient['first_visit_date'] = min(data_patient.visit_date)
return data_patient
def subset_analysis_data(data, date_analysis):
''' Function that subsets the full dataset to only the data available for a certain analysis date'''
if type(data.date_entered.iloc[0]) is str :
data.date_entered = pd.to_datetime(data.date_entered)
data = data[data.date_entered < date_analysis]
return data
def subset_cohort(data, horizon_date, horizon_time, bandwidth):
''' Function that subsets data from a cohort that has initiated care a year before the horizon_date, and after a year + bandwith'''
horizon_date = pd.to_datetime(horizon_date)
data['first_visit_date'] = pd.to_datetime(data['first_visit_date'])
cohort_data = data[(data['first_visit_date'] >= horizon_date - relativedelta(days=horizon_time + bandwidth)) &
(data['first_visit_date'] < horizon_date - relativedelta(days=horizon_time))]
return cohort_data
#### Standard reporting
def status_patient(data_patient, reference_date, grace_period):
''' Determines the status of a patient at a given reference_date, given the data available at a given analysis_date
TODO Also select the available data for Death and Transfer and other outcomes based on data entry time
'''
#IDEA Could be parallelized in Dask
data_patient = get_first_visit_date(data_patient)
date_out = pd.NaT
date_last_appointment = pd.to_datetime(max(data_patient.next_visit_date))
late_time = reference_date - date_last_appointment
if late_time.days > grace_period:
status = 'LTFU'
date_out = date_last_appointment
if late_time.days <= grace_period:
status = 'Followed'
if (data_patient.reasonDescEn.iloc[0] is not np.nan) & (pd.to_datetime(data_patient.discDate.iloc[0]) < reference_date):
status = data_patient.reasonDescEn.iloc[0]
date_out = pd.to_datetime(data_patient.discDate.iloc[0])
return pd.DataFrame([{'status': status,
'late_time': late_time,
'last_appointment': date_last_appointment,
'date_out':date_out ,
'first_visit_date':data_patient.first_visit_date.iloc[0],
'facility':data_patient.facility.iloc[0]}])
def horizon_outcome(data_cohort, reference_date, horizon_time):
# TODO Make sure dates are dates
data_cohort['first_visit_date'] = pd.to_datetime(data_cohort['first_visit_date']) #TODO This conversion should happen earlier
data_cohort.loc[:, 'horizon_date'] = data_cohort['first_visit_date'] + np.timedelta64(horizon_time, 'D')
data_cohort.loc[: , 'horizon_status'] = data_cohort['status']
# If the patient exited the cohort after his horizon date, still consider him followed
# BUG This is marginally invalid, for example if a patient was considered LTFU before he died
data_cohort.horizon_status[~(data_cohort['status'] == 'Followed') & (data_cohort['date_out'] > data_cohort['horizon_date'])] = 'Followed'
return data_cohort
## Transversal description only
def n_visits(data, month):
reporting_month = pd.to_datetime(data['visit_date']).dt.to_period('M')
n_vis = sum(reporting_month == month)
return n_vis
def make_report(data, reference_date, date_analysis, grace_period, horizon_time, cohort_width):
assert reference_date <= date_analysis, 'You should not analyze a period before you have the data (date of analysis is before reference date)'
if type(reference_date) is str :
reference_date = pd.to_datetime(reference_date)
if type(date_analysis) is str:
date_analysis = pd.to_datetime(date_analysis)
report_data = subset_analysis_data(data, date_analysis)
if len(report_data) > 0:
month = reference_date.to_period('M') - 1
n_visits_month = report_data.groupby('facility').apply(n_visits, month)
df_status = report_data.groupby('patient_id').apply(status_patient, reference_date, 90)
cohort_data = subset_cohort(df_status, reference_date, horizon_time, cohort_width)
# print(df_status.head())
horizon_outcome_data = horizon_outcome(cohort_data, month, 365)
transversal_reports = df_status.groupby('facility').status.value_counts()
longitudinal_reports = horizon_outcome_data.groupby('facility').status.value_counts()
out_reports = {'transversal':transversal_reports,
'longitudinal':longitudinal_reports,
'n_visits':n_visits_month}
return out_reports
# QUESTION What are the form_types
| grlurton/hiv_retention_metrics | src/models/cohort_analysis_function.py | Python | mit | 4,874 | 0.008822 |
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import latex_plot_inits
parameter_list = [[20, 5, 1., 1000, 1, None, 5], [100, 5, 1., 1000, 1, None, 10]]
def classifier_perceptron_graphical(n=100, distance=5, learn_rate=1., max_iter=1000, num_threads=1, seed=None, nperceptrons=5):
from shogun import RealFeatures, BinaryLabels
from shogun import Perceptron
from shogun import MSG_INFO
# 2D data
_DIM = 2
# To get the nice message that the perceptron has converged
dummy = BinaryLabels()
dummy.io.set_loglevel(MSG_INFO)
np.random.seed(seed)
# Produce some (probably) linearly separable training data by hand
# Two Gaussians at a far enough distance
X = np.array(np.random.randn(_DIM,n))+distance
Y = np.array(np.random.randn(_DIM,n))
label_train_twoclass = np.hstack((np.ones(n), -np.ones(n)))
fm_train_real = np.hstack((X,Y))
feats_train = RealFeatures(fm_train_real)
labels = BinaryLabels(label_train_twoclass)
perceptron = Perceptron(feats_train, labels)
perceptron.set_learn_rate(learn_rate)
perceptron.set_max_iter(max_iter)
perceptron.set_initialize_hyperplane(False)
# Find limits for visualization
x_min = min(np.min(X[0,:]), np.min(Y[0,:]))
x_max = max(np.max(X[0,:]), np.max(Y[0,:]))
y_min = min(np.min(X[1,:]), np.min(Y[1,:]))
y_max = max(np.max(X[1,:]), np.max(Y[1,:]))
for i in xrange(nperceptrons):
# Initialize randomly weight vector and bias
perceptron.set_w(np.random.random(2))
perceptron.set_bias(np.random.random())
# Run the perceptron algorithm
perceptron.train()
# Construct the hyperplane for visualization
# Equation of the decision boundary is w^T x + b = 0
b = perceptron.get_bias()
w = perceptron.get_w()
hx = np.linspace(x_min-1,x_max+1)
hy = -w[1]/w[0] * hx
plt.plot(hx, -1/w[1]*(w[0]*hx+b))
# Plot the two-class data
plt.scatter(X[0,:], X[1,:], s=40, marker='o', facecolors='none', edgecolors='b')
plt.scatter(Y[0,:], Y[1,:], s=40, marker='s', facecolors='none', edgecolors='r')
# Customize the plot
plt.axis([x_min-1, x_max+1, y_min-1, y_max+1])
plt.title('Rosenblatt\'s Perceptron Algorithm')
plt.xlabel('x')
plt.ylabel('y')
plt.show()
return perceptron
if __name__=='__main__':
print('Perceptron graphical')
classifier_perceptron_graphical(*parameter_list[0])
| MikeLing/shogun | examples/undocumented/python/graphical/classifier_perceptron_graphical.py | Python | gpl-3.0 | 2,302 | 0.032146 |
from typing import Optional
from thinc.api import Model
from .stop_words import STOP_WORDS
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
from .lex_attrs import LEX_ATTRS
from .lemmatizer import RussianLemmatizer
from ...language import Language
class RussianDefaults(Language.Defaults):
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
lex_attr_getters = LEX_ATTRS
stop_words = STOP_WORDS
class Russian(Language):
lang = "ru"
Defaults = RussianDefaults
@Russian.factory(
"lemmatizer",
assigns=["token.lemma"],
default_config={"model": None, "mode": "pymorphy2", "overwrite": False},
default_score_weights={"lemma_acc": 1.0},
)
def make_lemmatizer(
nlp: Language,
model: Optional[Model],
name: str,
mode: str,
overwrite: bool,
):
return RussianLemmatizer(nlp.vocab, model, name, mode=mode, overwrite=overwrite)
__all__ = ["Russian"]
| spacy-io/spaCy | spacy/lang/ru/__init__.py | Python | mit | 905 | 0.001105 |
#! /usr/bin/env python
# Python script to parse SMStext messages from a Windows 8.0 phone's store.vol file
# Author: cheeky4n6monkey@gmail.com (Adrian Leong)
#
# Special Thanks to Detective Cindy Murphy (@cindymurph) and the Madison, WI Police Department (MPD)
# for the test data and encouragement.
# Thanks also to JoAnn Gibb (Ohio Attorney Generals Office) and Brian McGarry (Garda) for providing testing
# data/feedback.
#
# WARNING: This program is provided "as-is" and has been tested with 2 types of Windows Phone 8.0
# (Nokia Lumia 520, HTC PM23300)
# See http://cheeky4n6monkey.blogspot.com/ for further details.
# Copyright (C) 2014, 2015 Adrian Leong (cheeky4n6monkey@gmail.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You can view the GNU General Public License at <http://www.gnu.org/licenses/>
"""
Data Notes:
===========
\\Users\WPCOMMSSERVICES\APPDATA\Local\Unistore\store.vol contains SMS text messages, contact and limited MMS
information.
\\Users\WPCOMMSSERVICES\APPDATA\Local\UserData\Phone contains call log information.
\SharedData\Comms\Unistore\data contains various .dat files for MMS messages
From analysis of MPD store.vol test data (Nokia 520 Windows 8 phone) there are two areas of focus (tables?) for SMS data
Area 1 = The "SMStext" content area. Each SMS message has its own record within this area.
Each content record seems to follow one of these structures:
[?][FILETIME1][?][FILETIME2][?][PHONE0][[1 byte]["IPM.SMStext" string][1 byte][PHONE1][1 byte][PHONE2][1 byte][PHONE3][1 byte][Received Message][?][FILETIME3][?][FILETIME4]
or
[?][FILETIME1][?][FILETIME2][?]["IPM.SMStext" string][1 byte][Sent Message][?][FILETIME3][?][FILETIME4]
? = unknown / varying number of bytes
All strings are Unicode UTF-16-LE and null terminated
FILETIMEs are 8 byte LE and record the number of 100 ns intervals since 1 JAN 1601 (ie MS FILETIME)
For MPD test data, there seems to consistently be:
0xBF bytes between FILETIME2 and "SMStext" for Sent SMS (0xB7 bytes between start of "IPM.SMStext" and start of
FILETIME2)
0xEA bytes between FILETIME2 and "SMStext" for Recvd SMS (subject to length of PHONE0)
For the supplied OHIO data, There seems to consistently be:
0xB4 bytes between FILETIME2 and "SMStext" for Sent SMS
0xDF bytes between FILETIME2 and "SMStext" for Recvd SMS (subject to length of PHONE0)
CHECK YOUR DATA OFFSETS! They will probably vary between phones / data sets.
Unfortunately, sent SMS does not record the destination phone number in Area 1 records.
For these, we need to check an area of store.vol we'll call Area 2. The records in Area 2 look like:
[?][FILETIMEX][0x1B bytes]["SMS" string][1 byte][PHONEX][?]
Note: the Area 2 record formats seemed consistent between the Nokia 520 and HTC phones.
FILETIMEX value seems to correspond exactly to an Area 1 record's FILETIME2 field.
So we might be able to find out the destination number of a sent SMS by doing a search of Area2 fields for a specific
FILETIMEX value.
This seems to work well with our MPD test data.
Program Notes:
==============
Given a specified input store.vol and output TSV filename, this script will
- Search for "SMStext" entries (in Area 1 ie "Message" table) and store the sent/recvd direction, FILETIME2, Text
message, Offset of the Text Message and PHONE1.
- For any sent SMS, it will also look up the destination phone number (in Area 2 ie "Recipient" table) using
FILETIME2 / FILETIMEX as a key.
- Print out results to a nominated Tab Separated Variable file format (screen output is not typically large enough)
Known Issues:
- Offsets might have to be adjusted between phones/datasets particularly between the start of FILETIME2 and the start
of "SMStext".
This script version tries an experimental method of calculating the offset so the user doesn't have to
(theoretically).
- There may be unprintable characters in null term string fields AFTER the NULL but before the 0x1 field marker. Added
goto_next_field function to handle these.
- If the script does not detect Unicode digits 0x11 bytes before the start of "SMStext", it ass-umes that the message is
a Sent SMS (ie no numbers). This also means that SMS with one/two digit phone numbers might not be identified
correctly as received.
Change history:
v2014-08-30:
- Revised for non-printable characters appearing after the null in nullterm unicode strings but before the 0x1.
- Assumes each field is demarcated by 0x01 bytes.
- Also adjusted the max offset range for Sent SMS FILETIME2 based on test data. Increased it to 0xEA (from 0xC4).
v2014-09-01:
- Changed logic so that if we don't see Unicode digits before "SMStext", the script assumes the message is a Sent SMS
(no numbers).
- Decreased Sent SMS "find_timestamp" min parameter based on 1SEP data to x7D (from 0xAF)
v2014-09-05:
- Added trace output for when the script skips record extractions (ie when it can't find/read fields)
- Adjusted minimum "find_timestamp" parameters based on MPD log data to 0x9B for received SMS
v2014-09-29:
- Modified read_nullterm_unistring so it returns whatever valid characters it has read on a bad read exception.
Previously, it was returning an empty string. This was done to handle emoticons ...
v2014-10-05:
- Renamed script from "win8sms-ex2.py" to "wp8-find_my_texts_wp8.py"
v2015-07-10:
- Changed script to search for hex strings in chunks of CHUNK_SIZE rather than in one big read
(makes it quicker when running against whole .bin files). Thanks to Boss Rob :)
v2015-07-12:
- Removed "all_indices" function which was commented out in previous version
- Adjusted some comments
"""
import struct
import sys
import string
import datetime
import codecs
from optparse import OptionParser
import os
__author__ = 'Adrian Leong'
__version__ = "wp8_sms_integrated.py v2015-07-12(modified)"
def read_nullterm_unistring(f):
"""
Read in unicode chars one at a time until a null char ie "0x00 0x00"
Returns empty string on error otherwise it filters out return/newlines and returns the string read
:rtype :
:param f:
:type f:
:return:
:rtype:
"""
readstrg = ""
terminated_flag = True
unprintablechars = False
begin = f.tell()
while (terminated_flag):
try:
# print "char at " + hex(f.tell()).rstrip("L")
readchar = f.read(1)
if (ord(readchar) == 0): # bailout if null char
terminated_flag = False
if (terminated_flag):
if (readchar in string.printable) and (readchar != "\r") and (readchar != "\n"):
readstrg += readchar
else:
readstrg += " "
unprintablechars = True
# print "unprintable at " + hex(f.tell()-1).rstrip("L")
except (IOError, ValueError):
print ("Warning ... bad unicode string at offset " + hex(begin).rstrip("L"))
exctype, value = sys.exc_info()[:2]
print ("Exception type = ", exctype, ", value = ", value)
# readstrg = ""
return readstrg # returns partial strings
if (unprintablechars):
print ("String substitution(s) due to unrecognized/unprintable characters at " + hex(begin).rstrip("L"))
return readstrg
def read_filetime(f):
"""
Author - Adrian Leong
Read in 8 byte MS FILETIME (number of 100 ns since 1 Jan 1601) and
Returns equivalent unix epoch offset or 0 on error
:param f:
:type f:
:return:
:rtype:
"""
begin = f.tell()
try:
# print "time at offset: " + str(begin)
mstime = struct.unpack('<Q', f.read(8))[0]
except struct.error:
print ("Bad FILETIME extraction at " + hex(begin).rstrip("L"))
exctype, value = sys.exc_info()[:2]
print ("Exception type = ", exctype, ", value = ", value)
return 0
# print (mstime)
# print hex(mstime)
# Date Range Sanity Check
# min = 0x01CD000000000000 ns = 03:27 12MAR2012 (Win8 released 29OCT2012)
# max = 0x01D9000000000000 ns = 12:26 24NOV2022 (give it 10 years?)
if (mstime < 0x01CD000000000000) or (mstime > 0x01D9000000000000):
# print "Bad filetime value!"
return 0
# From https://libforensics.googlecode.com/hg-history/a41c6dfb1fdbd12886849ea3ac91de6ad931c363/code/lf/utils/time.py
# Function filetime_to_unix_time(filetime)
unixtime = (mstime - 116444736000000000) // 10000000
return unixtime
def find_timestamp(f, maxoffset, minoffset):
"""
Author - Adrian Leong
Searches backwards for a valid timestamp from a given file ptr and range
Returns 0 if error or not found otherwise returns unix timestamp value
:param f:
:type f:
:param maxoffset:
:type maxoffset:
:param minoffset:
:type minoffset:
:return:
:rtype:
"""
begin = f.tell()
# maxoffset is inclusive => need range from minoffset : maxoffset+1
for i in range(minoffset, maxoffset+1, 1):
if ((begin - i) < 0):
return 0 # FILETIME can't be before start of file
else:
f.seek(begin-i)
value = read_filetime(f)
if (value != 0):
return value
# otherwise keep searching until maxoffset
# if we get here, we haven't found a valid timestamp, so return 0
return 0
def find_flag(f):
"""
Author - Adrian Leong
Given a file ptr to "SMStext" field, looks for the 3rd last "PHONE0" digit value If we see a digit, we know its a
received SMS.
:param f:
:type f: file
:return:
:rtype:
Author: Adrian Leong
"""
# :type begin: long - position of cursor
begin = f.tell()
# fb.seek(begin - 0xD) # last digit offset
f.seek(begin - 0x11) # usually the 3rd last digit offset but can be the last digit eg 1SEP DUB data
byte_val = struct.unpack("B", f.read(1))[0]
if (0x30 <= byte_val <= 0x39):
val2 = struct.unpack("B", f.read(1))[0]
if (val2 == 0x00):
return byte_val # 0x30 0x00 to 0x39 0x00 (corresponds to Unicode for "0" to "9")
else:
return 0 # assume its a sent sms
else:
return byte_val # means no number present (ie sent SMS)
def goto_next_field(f, offset, maxbytes):
"""
Takes a binary file ptr, a starting offset and reads bytes until it finds 0x1 or the maxbytes limit.
Returns True if 0x1 found, False otherwise
Used to get to the next field offset (assuming they are separated by a byte value of 0x1.
:param f:
:type f:
:param offset:
:type offset:
:param maxbytes:
:type maxbytes:
:return:
:rtype:
"""
for i in range(maxbytes+1): # 0 ... maxbytes
f.seek(offset+i)
next_field_val = struct.unpack("B", f.read(1))[0]
if (next_field_val == 0x1):
# print "Found next field marker at " + hex(f.tell()-1).rstrip("L")
return True
return False
def adrians_script(hits, smshits, fb, funi):
# Filter smshits further (the hits above will include some false positives eg "SMStext")
smslogdict = {}
# for each valid "SMS" log hit, grab the filetime and phone number for later use
for smshit in smshits:
# go back 2 bytes and check for "@" (0x40) and process as sms log entry if required
fb.seek(smshit)
val = struct.unpack("B", fb.read(1))[0]
if (val == 0x40):
# print "sms log hit = " + hex(smshit - 2).rstrip("L")
# Get sms log filetime associated with this SMS (ASS-UME it matches with FILETIME2 retrieved later)
fb.seek(smshit - 0x23) # seek to 1st byte of FILETIMEX
smstimeval = read_filetime(fb)
smstimestring = ""
if (smstimeval != 0):
try:
# returns UTC time
smstimestring = datetime.datetime.utcfromtimestamp(smstimeval).isoformat()
except ValueError as e:
smstimestring = "%r" % e # if we get here, the hit is a false one. The date at this offset is not valid
continue
# print "SMS log Time2 (UTC) = " + smstimestring
else:
# must be wrong offset / read error so ignore this hit
continue
# Retrieve phone number string (PHONEX) from sms log
funi.seek(smshit + 0x9) # seek to 1st byte of phone num
smsnumstring = read_nullterm_unistring(funi)
# print "SMS log # = " + smsnumstring + "\n"
if (smstimestring not in smslogdict.keys() and smsnumstring != ""):
# If not already there and not an empty string, store phone number in dictionary keyed by time
smslogdict[smstimestring] = smsnumstring
# print "smslogdict = " + str(len(smslogdict.keys()))
# storage variable for printing parsed data to TSV later
sms_entries = {}
# for each "SMStext" hit
for hit in hits:
nums_listed = -1
string_offset = 0
unistring = ""
phonestring = "Not parsed"
sentflag = "Unknown"
# print "Text Offset = " + hex(hit) # offset to "SMStext"
fb.seek(hit)
# Look for "PHONE0"s 3rd last digit value
flagvalue = find_flag(fb)
# print "flag = " + hex(flagvalue)
# Changed logic for 1SEP DUB data. Assume its a Sent message if no number detected at offset
if (0x30 <= flagvalue <= 0x39):
nums_listed = 1
sentflag = "Recvd" # digit was detected, must be a received SMS
else:
nums_listed = 0
sentflag = "Sent"
# print "Direction: " + sentflag
# Jump forward from start of "SMStext" to get to first unicode text (either number or text message)
funi.seek(hit)
IPMSMStext = read_nullterm_unistring(funi)
offset_after_IPMSMStext = funi.tell()
# Look for next 0x1 value marking the next field. If we don't find it after 3 bytes, skip this hit
found_next_field = goto_next_field(fb, offset_after_IPMSMStext, 3)
if (not found_next_field):
print ("Skipping hit at " + hex(hit) + " - cannot find next field after SMStext")
continue # can't find next field so skip this hit
# print "found next string after IPM.SMStext at " + hex(fb.tell()).rstrip("L")
# we are either at beginning of sms string (sent) or at beginning of list of 3 null terminated phone numbers
# (each *usually* separated by 1 byte ... for recvd)
if (nums_listed == 0):
# Sent sms only has text
string_offset = fb.tell()
funi.seek(string_offset)
unistring = read_nullterm_unistring(funi)
# print "Text (" + hex(string_offset).rstrip("L") +"): " + unistring
if (nums_listed == 1):
# At the beginning of phone numbers
funi.seek(fb.tell())
# print "Recvd at " + hex(funi.tell())
phonestring1 = read_nullterm_unistring(funi)
if (phonestring1 == ""):
print ("Skipping hit at " + hex(hit) + " - cannot read PHONE1 field")
continue # skip this hit if empty string
phonestring = phonestring1 # just collect the first phone string for printing at this time
# print phonestring1
offset_after_string = funi.tell()
found_next_field = goto_next_field(fb, offset_after_string, 3)
if (not found_next_field):
print ("Skipping hit at " + hex(hit) + " - cannot find PHONE2 field")
continue # can't find next field so skip this hit
funi.seek(fb.tell())
phonestring2 = read_nullterm_unistring(funi)
if (phonestring2 == ""):
print ("Skipping hit at " + hex(hit) + " - cannot read PHONE2 field")
continue # skip this hit if empty string
# print phonestring2
offset_after_string = funi.tell()
found_next_field = goto_next_field(fb, offset_after_string, 3)
if (not found_next_field):
print ("Skipping hit at " + hex(hit) + " - cannot find PHONE3 field")
continue # can't find next field so skip this hit
funi.seek(fb.tell())
phonestring3 = read_nullterm_unistring(funi)
if (phonestring3 == ""):
print ("Skipping hit at " + hex(hit) + " - cannot read PHONE3 field")
continue # skip this hit if empty string
# print phonestring3
# print "Number(s): " + phonestring1 + ", " + phonestring2 + ", " + phonestring3
offset_after_string = funi.tell()
found_next_field = goto_next_field(fb, offset_after_string, 3)
if (not found_next_field):
print ("Skipping hit at " + hex(hit) + " - cannot find Received text field")
continue # can't find next field so skip this hit
string_offset = fb.tell()
funi.seek(string_offset)
unistring = read_nullterm_unistring(funi)
# print "Text (" + hex(string_offset).rstrip("L") +"): " + unistring
timeval = 0
if (nums_listed == 0):
# Original method: Manual adjustment of FILETIME2 offset value
# Offsets between begin of FILETIME2 and begin of "SMStext" string for Sent SMS
# MAD: 0xBF | OH: 0xB4 | DUB1: 0xBF | DUB2: 0xB4 bytes
# WARNING: Might need to adjust the 0xBF value to suit your data ...
# Note: Remember there's no PHONE0 field to account for in Sent SMS.
# filetime2_offset = 0xBF
# fb.seek(hit - filetime2_offset)
# timeval = read_filetime(fb)
# Experimental method. Use test data offsets +/-5
# From test data, minimum offset was 0xB4.
# Allowing for some tolerance => 0xB4 - 5 = 0xAF as min offset
# From test data, maximum offset was 0xBF.
# Allowing for some tolerance => 0xBF + 5 = 0xC4 as max offset
# Some adjustment may be required for other data sets
fb.seek(hit)
# timeval = find_timestamp(fb, 0xC4, 0xAF)
timeval = find_timestamp(fb, 0xEA+0x5, 0x7D) # Based on 30AUG DUB data, change the max offset to 0xEA + 5,
# Based on 1SEP data change min to x7D (from 0xAF)
if (nums_listed == 1):
# Old method: This doesnt handle variable length phone numbers
# Offsets between begin of FILETIME2 and begin of "SMStext" string for Recvd SMS
# MAD: 0xEA | OH: 0xDF | DUB1: 0xEC | DUB2: 0xDF bytes
# fb.seek(hit - 0xEA)
# timeval = read_filetime(fb)
#
# Updated method of calculating FILETIME2 offset using the "PHONE0" field length.
# This means the script can handle received SMS with variable length phone numbers
# offset = length of string in bytes + (NULL bytes + "IPM." + 0x01 byte = 0xB) + offset from beginning of
# FILETIME2 to start of phonestring (=0xC7)
# This assumes "PHONE0" is same length as "PHONE1" (phonestring)
# WARNING: Might need to adjust the 0xC7 value to suit your data ...
# 0xEA = 12 digit phone number (0x18 bytes) + 0xB + 0xC7
# 0xEC = 13 digit phone number (0x1A bytes) + 0xB + 0xC7
# filetime2_offset = len(phonestring)*2 + (0xB) + 0xC7
# print "filetime2_offset = " + hex(filetime2_offset)
# fb.seek(hit - filetime2_offset)
# timeval = read_filetime(fb)
# Experimental method: Use projected min/max from test data
# From the test data, we can see a maximum offset of 0xEC (236 dec) for 13 digits (ie DUB1).
# So for the theoretical maximum of 15 digits, this projects to 0xD4 (240 dec) for 15 digits.
# Add in some tolerance and we will use 0xFA (250 dec) for our max offset between FILETIME2 and "SMStext"
# From the test data, we can see a minimum offset of 0xDF (223 dec) for 13 digits (ie DUB2).
# So for the theoretical minimum of 1 digit, this projects to 0xC7 (199 dec).
# Add in some tolerance and we will use 0xBD (189 dec) for our min offset between FILETIME2 and "SMStext"
fb.seek(hit)
# timeval = find_timestamp(fb, 0xFA, 0xBD)
timeval = find_timestamp(fb, 0xFA, 0x9B) # Based on 30AUG DUB data, change the min offset to 0xB8 - 5
# Based on MPD log file data, changed min offset to 0x9B
timestring = ""
if (timeval != 0):
# print "timeval = " + hex(timeval)
try:
# returns time referenced to local system timezone
# timestring = datetime_advanced.datetime_advanced.fromtimestamp(timeval).isoformat()
# returns UTC time
timestring = datetime.datetime.utcfromtimestamp(timeval).isoformat()
except (ValueError):
timestring = "Error"
else:
# something bad happened reading time
timestring = "Error"
# print "Time2 (UTC) = " + timestring + "\n"
# If no number listed (ie sent SMS), try grabbing the PHONEX phone number based on the FILETIME2 timestamp retrieved
if (nums_listed == 0 and timestring != "Error"):
phonestring = "Unknown"
if (timestring in smslogdict.keys()):
phonestring = smslogdict[timestring]
# Store parsed data in dictionary keyed by SMS string offset
sms_entries[string_offset] = (timestring, sentflag, phonestring, unistring)
# ends for hits loop
print ("\nProcessed " + str(len(hits)) + " SMStext hits\n")
# sort by filetime
sorted_messages_keys = sorted(sms_entries, key=lambda x: (sms_entries[x][0]))
# print to TSV
# open contacts output file if reqd
if ("sms.tsv" is not None):
tsvof = None
try:
tsvof = open("sms.tsv", "w")
except (IOError, ValueError):
print ("Trouble Opening TSV Output File")
exit(-1)
tsvof.write("Text_Offset\tUTC_Time2\tDirection\tPhone_No\tText\n")
for key in sorted_messages_keys:
tsvof.write(
hex(key).rstrip("L") + "\t" +
sms_entries[key][0] + "\t" +
sms_entries[key][1] + "\t" +
sms_entries[key][2] + "\t" +
sms_entries[key][3] + "\n")
print ("\nFinished writing out " + str(len(sorted_messages_keys)) + " TSV entries\n")
tsvof.close()
funi.close()
fb.close()
# print("Running " + __version__ + "\n")
usage = " %prog -f dump -o database -d directory"
# Handle command line args
parser = OptionParser(usage=usage)
parser.add_option("-f", dest="dump",
action="store", type="string",
help="Input File To Be Searched")
parser.add_option("-o", dest="database",
action="store", type="string",
help="sqlite3 database holding processed phone data")
parser.add_option("-d", dest="directory",
action="store", type="string",
help="base directory for other arguments (default current directory)")
(options, cmd_args) = parser.parse_args()
fb = None
# Check if no arguments given by user, exit
if len(sys.argv) == 1:
parser.print_help()
exit(-1)
if (options.dump is None):
parser.print_help()
print ("\nInput filename incorrectly specified!")
exit(-1)
if (options.directory is not None):
try:
os.chdir('%s' % options.directory)
except ValueError:
options.directory = options.directory[0:-1]
os.chdir('%s' % options.directory)
if (options.database is None):
options.database = r"data\mydb"
# Open store.vol for unicode encoded text reads
try:
funi = codecs.open(options.dump, encoding="utf-16-le", mode="r")
except UnicodeError:
print ("Input File Not Found (unicode attempt)")
exit(-1)
# Open store.vol for binary byte ops (eg timestamps)
try:
fb = open(options.dump, "rb")
except ValueError:
print ("Input File Not Found (binary attempt)")
exit(-1)
| WindowsPhoneForensics/find_my_texts_wp8 | find_my_texts_wp8/wp8_sms_integrated.py | Python | gpl-3.0 | 24,866 | 0.004987 |
"""Support for tracking consumption over given periods of time."""
from datetime import timedelta
import logging
from croniter import croniter
import voluptuous as vol
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.const import CONF_NAME
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.restore_state import RestoreEntity
from .const import (
ATTR_TARIFF,
CONF_CRON_PATTERN,
CONF_METER,
CONF_METER_DELTA_VALUES,
CONF_METER_NET_CONSUMPTION,
CONF_METER_OFFSET,
CONF_METER_TYPE,
CONF_SOURCE_SENSOR,
CONF_TARIFF,
CONF_TARIFF_ENTITY,
CONF_TARIFFS,
DATA_TARIFF_SENSORS,
DATA_UTILITY,
DOMAIN,
METER_TYPES,
SERVICE_RESET,
SERVICE_SELECT_NEXT_TARIFF,
SERVICE_SELECT_TARIFF,
SIGNAL_RESET_METER,
)
_LOGGER = logging.getLogger(__name__)
TARIFF_ICON = "mdi:clock-outline"
ATTR_TARIFFS = "tariffs"
DEFAULT_OFFSET = timedelta(hours=0)
def validate_cron_pattern(pattern):
"""Check that the pattern is well-formed."""
if croniter.is_valid(pattern):
return pattern
raise vol.Invalid("Invalid pattern")
def period_or_cron(config):
"""Check that if cron pattern is used, then meter type and offsite must be removed."""
if CONF_CRON_PATTERN in config and CONF_METER_TYPE in config:
raise vol.Invalid(f"Use <{CONF_CRON_PATTERN}> or <{CONF_METER_TYPE}>")
if (
CONF_CRON_PATTERN in config
and CONF_METER_OFFSET in config
and config[CONF_METER_OFFSET] != DEFAULT_OFFSET
):
raise vol.Invalid(
f"When <{CONF_CRON_PATTERN}> is used <{CONF_METER_OFFSET}> has no meaning"
)
return config
def max_28_days(config):
"""Check that time period does not include more then 28 days."""
if config.days >= 28:
raise vol.Invalid(
"Unsupported offset of more then 28 days, please use a cron pattern."
)
return config
METER_CONFIG_SCHEMA = vol.Schema(
vol.All(
{
vol.Required(CONF_SOURCE_SENSOR): cv.entity_id,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_METER_TYPE): vol.In(METER_TYPES),
vol.Optional(CONF_METER_OFFSET, default=DEFAULT_OFFSET): vol.All(
cv.time_period, cv.positive_timedelta, max_28_days
),
vol.Optional(CONF_METER_DELTA_VALUES, default=False): cv.boolean,
vol.Optional(CONF_METER_NET_CONSUMPTION, default=False): cv.boolean,
vol.Optional(CONF_TARIFFS, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
vol.Optional(CONF_CRON_PATTERN): validate_cron_pattern,
},
period_or_cron,
)
)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.Schema({cv.slug: METER_CONFIG_SCHEMA})}, extra=vol.ALLOW_EXTRA
)
async def async_setup(hass, config):
"""Set up an Utility Meter."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
hass.data[DATA_UTILITY] = {}
register_services = False
for meter, conf in config.get(DOMAIN).items():
_LOGGER.debug("Setup %s.%s", DOMAIN, meter)
hass.data[DATA_UTILITY][meter] = conf
hass.data[DATA_UTILITY][meter][DATA_TARIFF_SENSORS] = []
if not conf[CONF_TARIFFS]:
# only one entity is required
hass.async_create_task(
discovery.async_load_platform(
hass,
SENSOR_DOMAIN,
DOMAIN,
[{CONF_METER: meter, CONF_NAME: conf.get(CONF_NAME, meter)}],
config,
)
)
else:
# create tariff selection
await component.async_add_entities(
[TariffSelect(meter, list(conf[CONF_TARIFFS]))]
)
hass.data[DATA_UTILITY][meter][CONF_TARIFF_ENTITY] = "{}.{}".format(
DOMAIN, meter
)
# add one meter for each tariff
tariff_confs = []
for tariff in conf[CONF_TARIFFS]:
tariff_confs.append(
{
CONF_METER: meter,
CONF_NAME: f"{meter} {tariff}",
CONF_TARIFF: tariff,
}
)
hass.async_create_task(
discovery.async_load_platform(
hass, SENSOR_DOMAIN, DOMAIN, tariff_confs, config
)
)
register_services = True
if register_services:
component.async_register_entity_service(SERVICE_RESET, {}, "async_reset_meters")
component.async_register_entity_service(
SERVICE_SELECT_TARIFF,
{vol.Required(ATTR_TARIFF): cv.string},
"async_select_tariff",
)
component.async_register_entity_service(
SERVICE_SELECT_NEXT_TARIFF, {}, "async_next_tariff"
)
return True
class TariffSelect(RestoreEntity):
"""Representation of a Tariff selector."""
def __init__(self, name, tariffs):
"""Initialize a tariff selector."""
self._name = name
self._current_tariff = None
self._tariffs = tariffs
self._icon = TARIFF_ICON
async def async_added_to_hass(self):
"""Run when entity about to be added."""
await super().async_added_to_hass()
if self._current_tariff is not None:
return
state = await self.async_get_last_state()
if not state or state.state not in self._tariffs:
self._current_tariff = self._tariffs[0]
else:
self._current_tariff = state.state
@property
def should_poll(self):
"""If entity should be polled."""
return False
@property
def name(self):
"""Return the name of the select input."""
return self._name
@property
def icon(self):
"""Return the icon to be used for this entity."""
return self._icon
@property
def state(self):
"""Return the state of the component."""
return self._current_tariff
@property
def extra_state_attributes(self):
"""Return the state attributes."""
return {ATTR_TARIFFS: self._tariffs}
async def async_reset_meters(self):
"""Reset all sensors of this meter."""
_LOGGER.debug("reset meter %s", self.entity_id)
async_dispatcher_send(self.hass, SIGNAL_RESET_METER, self.entity_id)
async def async_select_tariff(self, tariff):
"""Select new option."""
if tariff not in self._tariffs:
_LOGGER.warning(
"Invalid tariff: %s (possible tariffs: %s)",
tariff,
", ".join(self._tariffs),
)
return
self._current_tariff = tariff
self.async_write_ha_state()
async def async_next_tariff(self):
"""Offset current index."""
current_index = self._tariffs.index(self._current_tariff)
new_index = (current_index + 1) % len(self._tariffs)
self._current_tariff = self._tariffs[new_index]
self.async_write_ha_state()
| jawilson/home-assistant | homeassistant/components/utility_meter/__init__.py | Python | apache-2.0 | 7,390 | 0.000947 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import collections
import logging
import os
import platform
import re
import subprocess
import types
import util
import json
from ebstall.versions import Version
from ebstall.util import normalize_string
logger = logging.getLogger(__name__)
CLI_DEFAULTS_DEFAULT = dict(
packager='source'
)
CLI_DEFAULTS_DEBIAN = dict(
packager='apt-get'
)
CLI_DEFAULTS_CENTOS = dict(
packager='yum'
)
CLI_DEFAULTS_DARWIN = dict(
packager='source'
)
FLAVORS = {
'debian': 'debian',
'ubuntu': 'debian',
'kubuntu': 'debian',
'kali': 'debian',
'centos': 'redhat',
'centos linux': 'redhat',
'fedora': 'redhat',
'red hat enterprise linux server': 'redhat',
'rhel': 'redhat',
'amazon': 'redhat',
'amzn': 'redhat',
'gentoo': 'gentoo',
'gentoo base system': 'gentoo',
'darwin': 'darwin',
'opensuse': 'suse',
'suse': 'suse',
}
CLI_DEFAULTS = {
"default": CLI_DEFAULTS_DEFAULT,
"debian": CLI_DEFAULTS_DEBIAN,
"ubuntu": CLI_DEFAULTS_DEBIAN,
"centos": CLI_DEFAULTS_CENTOS,
"centos linux": CLI_DEFAULTS_CENTOS,
"fedora": CLI_DEFAULTS_CENTOS,
"red hat enterprise linux server": CLI_DEFAULTS_CENTOS,
"rhel": CLI_DEFAULTS_CENTOS,
"amazon": CLI_DEFAULTS_CENTOS,
"amzn": CLI_DEFAULTS_CENTOS,
"gentoo": CLI_DEFAULTS_DEFAULT,
"gentoo base system": CLI_DEFAULTS_DEFAULT,
"darwin": CLI_DEFAULTS_DARWIN,
"opensuse": CLI_DEFAULTS_DEFAULT,
"suse": CLI_DEFAULTS_DEFAULT,
}
"""CLI defaults."""
# Start system
START_INITD = 'init.d'
START_SYSTEMD = 'systemd'
# Pkg manager
PKG_YUM = 'yum'
PKG_APT = 'apt-get'
FAMILY_REDHAT = 'redhat'
FAMILY_DEBIAN = 'debian'
# redhat / debian
YUMS = ['redhat', 'fedora', 'centos', 'rhel', 'amzn', 'amazon']
DEBS = ['debian', 'ubuntu', 'kali']
class OSInfo(object):
"""OS information, name, version, like - similarity"""
def __init__(self, name=None, version=None, version_major=None, like=None, family=None,
packager=None, start_system=None, has_os_release=False, fallback_detection=False, long_name=None,
*args, **kwargs):
self.name = name
self.long_name = long_name
self.version_major = version_major
self.version = version
self.like = like
self.family = family
self.packager = packager
self.start_system = start_system
self.has_os_release = has_os_release
self.fallback_detection = fallback_detection
def __str__(self):
return 'OSInfo(%r)' % json.dumps(self.to_json())
def __repr__(self):
return 'OSInfo(%r)' % json.dumps(self.to_json())
def to_json(self):
"""
Converts to the JSON
:return:
"""
js = collections.OrderedDict()
js['name'] = self.name
js['long_name'] = self.long_name
js['version_major'] = self.version_major
js['version'] = self.version
js['like'] = self.like
js['family'] = self.family
js['packager'] = self.packager
js['start_system'] = self.start_system
js['has_os_release'] = self.has_os_release
js['fallback_detection'] = self.fallback_detection
return js
class PackageInfo(object):
"""
Basic information about particular package
"""
def __init__(self, name, version, arch, repo, size=None, section=None):
self._version = None
self.name = name
self.version = version
self.arch = arch
self.repo = repo
self.size = size
self.section = section
@property
def version(self):
return self._version
@version.setter
def version(self, val):
self._version = Version(val)
def __str__(self):
return '%s-%s.%s' % (self.name, self.version, self.arch)
def __repr__(self):
return 'PackageInfo(name=%r, version=%r, arch=%r, repo=%r, size=%r, section=%r)' \
% (self.name, self.version, self.arch, self.repo, self.size, self.section)
def to_json(self):
"""
Converts to the JSON
:return:
"""
js = collections.OrderedDict()
js['name'] = self.name
js['version'] = str(self.version)
js['arch'] = self.arch
js['repo'] = self.repo
if self.size is not None:
js['size'] = self.size
if self.section is not None:
js['section'] = self.section
return js
@classmethod
def from_json(cls, js):
"""
Converts json dict to the object
:param js:
:return:
"""
obj = cls(name=js['name'], version=js['version'], arch=js['arch'], repo=js['repo'])
if 'size' in js:
obj.size = js['size']
if 'section' in js:
obj.section = js['section']
return obj
def get_os():
"""
Returns basic information about the OS.
:return: OSInfo
"""
# At first - parse os-release
ros = OSInfo()
os_release_path = '/etc/os-release'
if os.path.isfile(os_release_path):
ros.name = _get_systemd_os_release_var("ID", filepath=os_release_path)
ros.version = _get_systemd_os_release_var("VERSION_ID", filepath=os_release_path)
ros.like = _get_systemd_os_release_var("ID_LIKE", os_release_path).split(" ")
ros.long_name = _get_systemd_os_release_var("PRETTY_NAME", filepath=os_release_path)
ros.has_os_release = True
if not ros.long_name:
ros.long_name = _get_systemd_os_release_var("NAME", filepath=os_release_path)
# Try /etc/redhat-release and /etc/debian_version
if not ros.has_os_release or ros.like is None or ros.version is None or ros.name is None:
os_redhat_release(ros)
os_debian_version(ros)
os_issue(ros)
# like detection
os_like_detect(ros)
os_family_detect(ros)
# Major version
os_major_version(ros)
# Packager detection - yum / apt-get
os_packager(ros)
# Start system - init.d / systemd
os_start_system(ros)
return ros
def os_family_detect(ros):
"""
OS Family (redhat, debian, ...)
:param ros:
:return:
"""
if util.startswith(ros.like, YUMS):
ros.family = FAMILY_REDHAT
if util.startswith(ros.like, DEBS):
ros.family = FAMILY_DEBIAN
if ros.family is not None:
if sum([1 for x in YUMS if ros.name.lower().startswith(x)]) > 0:
ros.family = FAMILY_REDHAT
if sum([1 for x in DEBS if ros.name.lower().startswith(x)]) > 0:
ros.family = FAMILY_DEBIAN
return
def os_packager(ros):
if ros.like is not None:
if util.startswith(ros.like, YUMS):
ros.packager = PKG_YUM
if util.startswith(ros.like, DEBS):
ros.packager = PKG_APT
return ros
if ros.name is not None:
if sum([1 for x in YUMS if ros.name.lower().startswith(x)]) > 0:
ros.packager = PKG_YUM
if sum([1 for x in DEBS if ros.name.lower().startswith(x)]) > 0:
ros.packager = PKG_APT
return
if os.path.exists('/etc/yum'):
ros.packager = PKG_YUM
if os.path.exists('/etc/apt/sources.list'):
ros.packager = PKG_APT
def os_start_system(ros):
if os.path.exists('/etc/systemd'):
ros.start_system = START_SYSTEMD
else:
ros.start_system = START_INITD
return ros
def os_issue(ros):
if os.path.exists('/etc/issue'):
with open('/etc/issue', 'r') as fh:
issue = fh.readline().strip()
issue = re.sub(r'\\[a-z]', '', issue).strip()
match1 = re.match(r'^(.+?)\s+release\s+(.+?)$', issue, re.IGNORECASE)
match2 = re.match(r'^(.+?)\s+([0-9.]+)\s*(LTS)?$', issue, re.IGNORECASE)
if match1:
ros.long_name = match1.group(1).strip()
ros.version = match1.group(2).strip()
elif match2:
ros.long_name = match2.group(1).strip()
ros.version = match2.group(2).strip()
else:
ros.long_name = issue
return ros
def os_debian_version(ros):
if os.path.exists('/etc/debian_version'):
with open('/etc/debian_version', 'r') as fh:
debver = fh.readline().strip()
ros.like = 'debian'
ros.family = FAMILY_DEBIAN
if ros.version is None:
ros.version = debver.strip()
return ros
def os_redhat_release(ros):
if os.path.exists('/etc/redhat-release'):
with open('/etc/redhat-release', 'r') as fh:
redhatrel = fh.readline().strip()
ros.like = 'redhat'
ros.family = FAMILY_REDHAT
match = re.match(r'^(.+?)\s+release\s+(.+?)$', redhatrel, re.IGNORECASE)
if match is not None:
ros.long_name = match.group(1).strip()
ros.version = match.group(2).strip()
else:
ros.long_name = redhatrel
return ros
def os_like_detect(ros):
if not ros.like and ros.name is not None:
try:
ros.like = FLAVORS[ros.name.lower()]
except:
pass
if not ros.like and ros.long_name is not None:
try:
ros.like = FLAVORS[ros.long_name.lower()]
except:
pass
return ros
def os_major_version(ros):
if ros.version is not None:
match = re.match(r'(.+?)[/.]', ros.version)
if match:
ros.version_major = match.group(1)
return ros
def get_os_info(filepath="/etc/os-release"):
"""
Get OS name and version
:param str filepath: File path of os-release file
:returns: (os_name, os_version)
:rtype: `tuple` of `str`
"""
if os.path.isfile(filepath):
# Systemd os-release parsing might be viable
os_name, os_version = get_systemd_os_info(filepath=filepath)
if os_name:
return (os_name, os_version)
# Fallback to platform module
return get_python_os_info()
def get_os_info_ua(filepath="/etc/os-release"):
"""
Get OS name and version string for User Agent
:param str filepath: File path of os-release file
:returns: os_ua
:rtype: `str`
"""
if os.path.isfile(filepath):
os_ua = _get_systemd_os_release_var("PRETTY_NAME", filepath=filepath)
if not os_ua:
os_ua = _get_systemd_os_release_var("NAME", filepath=filepath)
if os_ua:
return os_ua
# Fallback
return " ".join(get_python_os_info())
def get_systemd_os_info(filepath="/etc/os-release"):
"""
Parse systemd /etc/os-release for distribution information
:param str filepath: File path of os-release file
:returns: (os_name, os_version)
:rtype: `tuple` of `str`
"""
os_name = _get_systemd_os_release_var("ID", filepath=filepath)
os_version = _get_systemd_os_release_var("VERSION_ID", filepath=filepath)
return (os_name, os_version)
def get_systemd_os_like(filepath="/etc/os-release"):
"""
Get a list of strings that indicate the distribution likeness to
other distributions.
:param str filepath: File path of os-release file
:returns: List of distribution acronyms
:rtype: `list` of `str`
"""
return _get_systemd_os_release_var("ID_LIKE", filepath).split(" ")
def _get_systemd_os_release_var(varname, filepath="/etc/os-release"):
"""
Get single value from systemd /etc/os-release
:param str varname: Name of variable to fetch
:param str filepath: File path of os-release file
:returns: requested value
:rtype: `str`
"""
var_string = varname+"="
if not os.path.isfile(filepath):
return ""
with open(filepath, 'r') as fh:
contents = fh.readlines()
for line in contents:
if line.strip().startswith(var_string):
# Return the value of var, normalized
return normalize_string(line.strip()[len(var_string):])
return ""
def get_python_os_info():
"""
Get Operating System type/distribution and major version
using python platform module
:returns: (os_name, os_version)
:rtype: `tuple` of `str`
"""
info = platform.system_alias(
platform.system(),
platform.release(),
platform.version()
)
os_type, os_ver, _ = info
os_type = os_type.lower()
if os_type.startswith('linux'):
info = platform.linux_distribution()
# On arch, platform.linux_distribution() is reportedly ('','',''),
# so handle it defensively
if info[0]:
os_type = info[0]
if info[1]:
os_ver = info[1]
elif os_type.startswith('darwin'):
os_ver = subprocess.Popen(
["sw_vers", "-productVersion"],
stdout=subprocess.PIPE
).communicate()[0].rstrip('\n')
elif os_type.startswith('freebsd'):
# eg "9.3-RC3-p1"
os_ver = os_ver.partition("-")[0]
os_ver = os_ver.partition(".")[0]
elif platform.win32_ver()[1]:
os_ver = platform.win32_ver()[1]
else:
# Cases known to fall here: Cygwin python
os_ver = ''
return os_type, os_ver
def os_like(key):
"""
Tries to transform OS ID to LIKE_ID
:param key:
:return: string or None
"""
try:
return FLAVORS[key.lower()]
except KeyError:
return None
def os_constant(key):
"""
Get a constant value for operating system
:param key: name of cli constant
:return: value of constant for active os
"""
os_info = get_os_info()
try:
constants = CLI_DEFAULTS[os_info[0].lower()]
except KeyError:
constants = os_like_constants()
if not constants:
constants = CLI_DEFAULTS["default"]
return constants[key]
def os_like_constants():
"""
Try to get constants for distribution with
similar layout and configuration, indicated by
/etc/os-release variable "LIKE"
:returns: Constants dictionary
:rtype: `dict`
"""
os_like = get_systemd_os_like()
if os_like:
for os_name in os_like:
if os_name in CLI_DEFAULTS.keys():
return CLI_DEFAULTS[os_name]
return {}
def get_yum_packages(out):
"""
List of all packages parsing
:param out:
:return:
"""
ret = []
lines = out if isinstance(out, types.ListType) else out.split('\n')
for line in lines:
line = line.strip()
match = re.match(r'^([a-zA-Z0-9.\-_]+)[\s\t]+([a-zA-Z0-9.:\-_]+)[\s\t]+([@a-zA-Z0-9.\-_]+)$', line)
if match is None:
continue
package = match.group(1).strip()
version = match.group(2).strip()
repo = match.group(3).strip()
arch = None
# Architecture extract
match_arch = re.match(r'^(.+?)\.([^.]+)$', package)
if match_arch:
package = match_arch.group(1).strip()
arch = match_arch.group(2).strip()
pkg = PackageInfo(name=package, version=version, arch=arch, repo=repo)
ret.append(pkg)
return ret
def get_yum_packages_update(out):
"""
List of packages to update parsing
:param out:
:return:
"""
ret = []
eqline = 0
cur_section = None
lines = out if isinstance(out, types.ListType) else out.split('\n')
for line in lines:
line = line.strip()
if line.startswith('====='):
eqline += 1
continue
# Process lines only after 2nd ====== line - should be the package list.
if eqline != 2:
continue
lmatch = re.match(r'^([a-zA-Z\s]+):$', line)
if lmatch is not None:
cur_section = lmatch.group(1)
continue
match = re.match(r'^([a-zA-Z0-9.\-_]+)[\s\t]+([a-zA-Z0-9.\-_]+)[\s\t]+([a-zA-Z0-9.:\-_]+)'
r'[\s\t]+([@a-zA-Z0-9.:\-_]+)[\s\t]+([a-zA-Z0-9.\-_\s]+?)$', line)
if match is None:
continue
package = match.group(1).strip()
version = match.group(3).strip()
repo = match.group(4).strip()
arch = match.group(2).strip()
size = match.group(5).strip()
pkg = PackageInfo(name=package, version=version, arch=arch, repo=repo, size=size, section=cur_section)
ret.append(pkg)
return ret
def check_package_restrictions(yum_output_packages, allowed_packages):
"""
Checks list of the yum output pakcages vs. allowed packages
:param yum_output_packages:
:param check_packages:
:return: (conflicting packages, new packages)
"""
new_packages = []
conflicting_packages = []
for out_package in yum_output_packages:
allowed_list = [x for x in allowed_packages if x.name == out_package.name]
if len(allowed_list) == 0:
new_packages.append(out_package)
continue
# Sort packages based on the version, highest first.
if len(allowed_list) > 1:
allowed_list.sort(key=lambda x: x.version, reverse=True)
allowed = allowed_list[0]
if out_package.version > allowed.version:
conflicting_packages.append(out_package)
return conflicting_packages, new_packages
def package_diff(a, b, only_in_b=False):
"""
Package diff a - b
package x \in a is removed from a if the same package (or higher version) is in b.
If there are more packages in b, the one with higher version is taken
Used for removing already installed packages (b) from the packages to install (a).
:param a:
:param b:
:param only_in_b: if True the element in a has to be in the b in the lower version.
:return:
"""
res = []
for pkg in a:
b_filtered = [x for x in b if x.name == pkg.name and x.arch == pkg.arch]
# New package, not in b
if len(b_filtered) == 0:
if not only_in_b:
res.append(pkg)
continue
# Sort packages based on the version, highest first.
if len(b_filtered) > 1:
b_filtered.sort(key=lambda x: x.version, reverse=True)
# b contains smaller version of the package, add to the result
if b_filtered[0].version < pkg.version:
res.append(pkg)
return res
| EnigmaBridge/ebstall.py | ebstall/osutil.py | Python | mit | 18,396 | 0.002174 |
#!/Users/wuga/Documents/website/wuga/env/bin/python2.7
#
# The Python Imaging Library
# $Id$
#
from __future__ import print_function
import sys
if sys.version_info[0] > 2:
import tkinter
else:
import Tkinter as tkinter
from PIL import Image, ImageTk
#
# an image viewer
class UI(tkinter.Label):
def __init__(self, master, im):
if im.mode == "1":
# bitmap image
self.image = ImageTk.BitmapImage(im, foreground="white")
tkinter.Label.__init__(self, master, image=self.image, bd=0,
bg="black")
else:
# photo image
self.image = ImageTk.PhotoImage(im)
tkinter.Label.__init__(self, master, image=self.image, bd=0)
#
# script interface
if __name__ == "__main__":
if not sys.argv[1:]:
print("Syntax: python viewer.py imagefile")
sys.exit(1)
filename = sys.argv[1]
root = tkinter.Tk()
root.title(filename)
im = Image.open(filename)
UI(root, im).pack()
root.mainloop()
| wuga214/Django-Wuga | env/bin/viewer.py | Python | apache-2.0 | 1,056 | 0.000947 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.