section stringlengths 2 30 | filename stringlengths 1 82 | text stringlengths 783 28M |
|---|---|---|
bs4 | element | # Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
__license__ = "MIT"
import collections
import re
import shlex
import sys
import warnings
from bs4.dammit import EntitySubstitution
DEFAULT_OUTPUT_ENCODING = "utf-8"
PY3K = (sys.version_info[0] > 2)
whitespace_re = re.compile("\s+")
def _alias(attr):
"""Alias one attribute name to another for backward compatibility"""
@property
def alias(self):
return getattr(self, attr)
@alias.setter
def alias(self):
return setattr(self, attr)
return alias
class NamespacedAttribute(unicode):
def __new__(cls, prefix, name, namespace=None):
if name is None:
obj = unicode.__new__(cls, prefix)
elif prefix is None:
# Not really namespaced.
obj = unicode.__new__(cls, name)
else:
obj = unicode.__new__(cls, prefix + ":" + name)
obj.prefix = prefix
obj.name = name
obj.namespace = namespace
return obj
class AttributeValueWithCharsetSubstitution(unicode):
"""A stand-in object for a character encoding specified in HTML."""
class CharsetMetaAttributeValue(AttributeValueWithCharsetSubstitution):
"""A generic stand-in for the value of a meta tag's 'charset' attribute.
When Beautiful Soup parses the markup '<meta charset="utf8">', the
value of the 'charset' attribute will be one of these objects.
"""
def __new__(cls, original_value):
obj = unicode.__new__(cls, original_value)
obj.original_value = original_value
return obj
def encode(self, encoding):
return encoding
class ContentMetaAttributeValue(AttributeValueWithCharsetSubstitution):
"""A generic stand-in for the value of a meta tag's 'content' attribute.
When Beautiful Soup parses the markup:
<meta http-equiv="content-type" content="text/html; charset=utf8">
The value of the 'content' attribute will be one of these objects.
"""
CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)", re.M)
def __new__(cls, original_value):
match = cls.CHARSET_RE.search(original_value)
if match is None:
# No substitution necessary.
return unicode.__new__(unicode, original_value)
obj = unicode.__new__(cls, original_value)
obj.original_value = original_value
return obj
def encode(self, encoding):
def rewrite(match):
return match.group(1) + encoding
return self.CHARSET_RE.sub(rewrite, self.original_value)
class HTMLAwareEntitySubstitution(EntitySubstitution):
"""Entity substitution rules that are aware of some HTML quirks.
Specifically, the contents of <script> and <style> tags should not
undergo entity substitution.
Incoming NavigableString objects are checked to see if they're the
direct children of a <script> or <style> tag.
"""
cdata_containing_tags = set(["script", "style"])
preformatted_tags = set(["pre"])
preserve_whitespace_tags = set(['pre', 'textarea'])
@classmethod
def _substitute_if_appropriate(cls, ns, f):
if (isinstance(ns, NavigableString)
and ns.parent is not None
and ns.parent.name in cls.cdata_containing_tags):
# Do nothing.
return ns
# Substitute.
return f(ns)
@classmethod
def substitute_html(cls, ns):
return cls._substitute_if_appropriate(
ns, EntitySubstitution.substitute_html)
@classmethod
def substitute_xml(cls, ns):
return cls._substitute_if_appropriate(
ns, EntitySubstitution.substitute_xml)
class PageElement(object):
"""Contains the navigational information for some part of the page
(either a tag or a piece of text)"""
# There are five possible values for the "formatter" argument passed in
# to methods like encode() and prettify():
#
# "html" - All Unicode characters with corresponding HTML entities
# are converted to those entities on output.
# "minimal" - Bare ampersands and angle brackets are converted to
# XML entities: & < >
# None - The null formatter. Unicode characters are never
# converted to entities. This is not recommended, but it's
# faster than "minimal".
# A function - This function will be called on every string that
# needs to undergo entity substitution.
#
# In an HTML document, the default "html" and "minimal" functions
# will leave the contents of <script> and <style> tags alone. For
# an XML document, all tags will be given the same treatment.
HTML_FORMATTERS = {
"html" : HTMLAwareEntitySubstitution.substitute_html,
"minimal" : HTMLAwareEntitySubstitution.substitute_xml,
None : None
}
XML_FORMATTERS = {
"html" : EntitySubstitution.substitute_html,
"minimal" : EntitySubstitution.substitute_xml,
None : None
}
def format_string(self, s, formatter='minimal'):
"""Format the given string using the given formatter."""
if not callable(formatter):
formatter = self._formatter_for_name(formatter)
if formatter is None:
output = s
else:
output = formatter(s)
return output
@property
def _is_xml(self):
"""Is this element part of an XML tree or an HTML tree?
This is used when mapping a formatter name ("minimal") to an
appropriate function (one that performs entity-substitution on
the contents of <script> and <style> tags, or not). It can be
inefficient, but it should be called very rarely.
"""
if self.known_xml is not None:
# Most of the time we will have determined this when the
# document is parsed.
return self.known_xml
# Otherwise, it's likely that this element was created by
# direct invocation of the constructor from within the user's
# Python code.
if self.parent is None:
# This is the top-level object. It should have .known_xml set
# from tree creation. If not, take a guess--BS is usually
# used on HTML markup.
return getattr(self, 'is_xml', False)
return self.parent._is_xml
def _formatter_for_name(self, name):
"Look up a formatter function based on its name and the tree."
if self._is_xml:
return self.XML_FORMATTERS.get(
name, EntitySubstitution.substitute_xml)
else:
return self.HTML_FORMATTERS.get(
name, HTMLAwareEntitySubstitution.substitute_xml)
def setup(self, parent=None, previous_element=None, next_element=None,
previous_sibling=None, next_sibling=None):
"""Sets up the initial relations between this element and
other elements."""
self.parent = parent
self.previous_element = previous_element
if previous_element is not None:
self.previous_element.next_element = self
self.next_element = next_element
if self.next_element:
self.next_element.previous_element = self
self.next_sibling = next_sibling
if self.next_sibling:
self.next_sibling.previous_sibling = self
if (not previous_sibling
and self.parent is not None and self.parent.contents):
previous_sibling = self.parent.contents[-1]
self.previous_sibling = previous_sibling
if previous_sibling:
self.previous_sibling.next_sibling = self
nextSibling = _alias("next_sibling") # BS3
previousSibling = _alias("previous_sibling") # BS3
def replace_with(self, replace_with):
if not self.parent:
raise ValueError(
"Cannot replace one element with another when the"
"element to be replaced is not part of a tree.")
if replace_with is self:
return
if replace_with is self.parent:
raise ValueError("Cannot replace a Tag with its parent.")
old_parent = self.parent
my_index = self.parent.index(self)
self.extract()
old_parent.insert(my_index, replace_with)
return self
replaceWith = replace_with # BS3
def unwrap(self):
my_parent = self.parent
if not self.parent:
raise ValueError(
"Cannot replace an element with its contents when that"
"element is not part of a tree.")
my_index = self.parent.index(self)
self.extract()
for child in reversed(self.contents[:]):
my_parent.insert(my_index, child)
return self
replace_with_children = unwrap
replaceWithChildren = unwrap # BS3
def wrap(self, wrap_inside):
me = self.replace_with(wrap_inside)
wrap_inside.append(me)
return wrap_inside
def extract(self):
"""Destructively rips this element out of the tree."""
if self.parent is not None:
del self.parent.contents[self.parent.index(self)]
#Find the two elements that would be next to each other if
#this element (and any children) hadn't been parsed. Connect
#the two.
last_child = self._last_descendant()
next_element = last_child.next_element
if (self.previous_element is not None and
self.previous_element is not next_element):
self.previous_element.next_element = next_element
if next_element is not None and next_element is not self.previous_element:
next_element.previous_element = self.previous_element
self.previous_element = None
last_child.next_element = None
self.parent = None
if (self.previous_sibling is not None
and self.previous_sibling is not self.next_sibling):
self.previous_sibling.next_sibling = self.next_sibling
if (self.next_sibling is not None
and self.next_sibling is not self.previous_sibling):
self.next_sibling.previous_sibling = self.previous_sibling
self.previous_sibling = self.next_sibling = None
return self
def _last_descendant(self, is_initialized=True, accept_self=True):
"Finds the last element beneath this object to be parsed."
if is_initialized and self.next_sibling:
last_child = self.next_sibling.previous_element
else:
last_child = self
while isinstance(last_child, Tag) and last_child.contents:
last_child = last_child.contents[-1]
if not accept_self and last_child is self:
last_child = None
return last_child
# BS3: Not part of the API!
_lastRecursiveChild = _last_descendant
def insert(self, position, new_child):
if new_child is None:
raise ValueError("Cannot insert None into a tag.")
if new_child is self:
raise ValueError("Cannot insert a tag into itself.")
if (isinstance(new_child, basestring)
and not isinstance(new_child, NavigableString)):
new_child = NavigableString(new_child)
position = min(position, len(self.contents))
if hasattr(new_child, 'parent') and new_child.parent is not None:
# We're 'inserting' an element that's already one
# of this object's children.
if new_child.parent is self:
current_index = self.index(new_child)
if current_index < position:
# We're moving this element further down the list
# of this object's children. That means that when
# we extract this element, our target index will
# jump down one.
position -= 1
new_child.extract()
new_child.parent = self
previous_child = None
if position == 0:
new_child.previous_sibling = None
new_child.previous_element = self
else:
previous_child = self.contents[position - 1]
new_child.previous_sibling = previous_child
new_child.previous_sibling.next_sibling = new_child
new_child.previous_element = previous_child._last_descendant(False)
if new_child.previous_element is not None:
new_child.previous_element.next_element = new_child
new_childs_last_element = new_child._last_descendant(False)
if position >= len(self.contents):
new_child.next_sibling = None
parent = self
parents_next_sibling = None
while parents_next_sibling is None and parent is not None:
parents_next_sibling = parent.next_sibling
parent = parent.parent
if parents_next_sibling is not None:
# We found the element that comes next in the document.
break
if parents_next_sibling is not None:
new_childs_last_element.next_element = parents_next_sibling
else:
# The last element of this tag is the last element in
# the document.
new_childs_last_element.next_element = None
else:
next_child = self.contents[position]
new_child.next_sibling = next_child
if new_child.next_sibling is not None:
new_child.next_sibling.previous_sibling = new_child
new_childs_last_element.next_element = next_child
if new_childs_last_element.next_element is not None:
new_childs_last_element.next_element.previous_element = new_childs_last_element
self.contents.insert(position, new_child)
def append(self, tag):
"""Appends the given tag to the contents of this tag."""
self.insert(len(self.contents), tag)
def insert_before(self, predecessor):
"""Makes the given element the immediate predecessor of this one.
The two elements will have the same parent, and the given element
will be immediately before this one.
"""
if self is predecessor:
raise ValueError("Can't insert an element before itself.")
parent = self.parent
if parent is None:
raise ValueError(
"Element has no parent, so 'before' has no meaning.")
# Extract first so that the index won't be screwed up if they
# are siblings.
if isinstance(predecessor, PageElement):
predecessor.extract()
index = parent.index(self)
parent.insert(index, predecessor)
def insert_after(self, successor):
"""Makes the given element the immediate successor of this one.
The two elements will have the same parent, and the given element
will be immediately after this one.
"""
if self is successor:
raise ValueError("Can't insert an element after itself.")
parent = self.parent
if parent is None:
raise ValueError(
"Element has no parent, so 'after' has no meaning.")
# Extract first so that the index won't be screwed up if they
# are siblings.
if isinstance(successor, PageElement):
successor.extract()
index = parent.index(self)
parent.insert(index+1, successor)
def find_next(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears after this Tag in the document."""
return self._find_one(self.find_all_next, name, attrs, text, **kwargs)
findNext = find_next # BS3
def find_all_next(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
after this Tag in the document."""
return self._find_all(name, attrs, text, limit, self.next_elements,
**kwargs)
findAllNext = find_all_next # BS3
def find_next_sibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears after this Tag in the document."""
return self._find_one(self.find_next_siblings, name, attrs, text,
**kwargs)
findNextSibling = find_next_sibling # BS3
def find_next_siblings(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear after this Tag in the document."""
return self._find_all(name, attrs, text, limit,
self.next_siblings, **kwargs)
findNextSiblings = find_next_siblings # BS3
fetchNextSiblings = find_next_siblings # BS2
def find_previous(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears before this Tag in the document."""
return self._find_one(
self.find_all_previous, name, attrs, text, **kwargs)
findPrevious = find_previous # BS3
def find_all_previous(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
before this Tag in the document."""
return self._find_all(name, attrs, text, limit, self.previous_elements,
**kwargs)
findAllPrevious = find_all_previous # BS3
fetchPrevious = find_all_previous # BS2
def find_previous_sibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears before this Tag in the document."""
return self._find_one(self.find_previous_siblings, name, attrs, text,
**kwargs)
findPreviousSibling = find_previous_sibling # BS3
def find_previous_siblings(self, name=None, attrs={}, text=None,
limit=None, **kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear before this Tag in the document."""
return self._find_all(name, attrs, text, limit,
self.previous_siblings, **kwargs)
findPreviousSiblings = find_previous_siblings # BS3
fetchPreviousSiblings = find_previous_siblings # BS2
def find_parent(self, name=None, attrs={}, **kwargs):
"""Returns the closest parent of this Tag that matches the given
criteria."""
# NOTE: We can't use _find_one because findParents takes a different
# set of arguments.
r = None
l = self.find_parents(name, attrs, 1, **kwargs)
if l:
r = l[0]
return r
findParent = find_parent # BS3
def find_parents(self, name=None, attrs={}, limit=None, **kwargs):
"""Returns the parents of this Tag that match the given
criteria."""
return self._find_all(name, attrs, None, limit, self.parents,
**kwargs)
findParents = find_parents # BS3
fetchParents = find_parents # BS2
@property
def next(self):
return self.next_element
@property
def previous(self):
return self.previous_element
#These methods do the real heavy lifting.
def _find_one(self, method, name, attrs, text, **kwargs):
r = None
l = method(name, attrs, text, 1, **kwargs)
if l:
r = l[0]
return r
def _find_all(self, name, attrs, text, limit, generator, **kwargs):
"Iterates over a generator looking for things that match."
if text is None and 'string' in kwargs:
text = kwargs['string']
del kwargs['string']
if isinstance(name, SoupStrainer):
strainer = name
else:
strainer = SoupStrainer(name, attrs, text, **kwargs)
if text is None and not limit and not attrs and not kwargs:
if name is True or name is None:
# Optimization to find all tags.
result = (element for element in generator
if isinstance(element, Tag))
return ResultSet(strainer, result)
elif isinstance(name, basestring):
# Optimization to find all tags with a given name.
result = (element for element in generator
if isinstance(element, Tag)
and element.name == name)
return ResultSet(strainer, result)
results = ResultSet(strainer)
while True:
try:
i = next(generator)
except StopIteration:
break
if i:
found = strainer.search(i)
if found:
results.append(found)
if limit and len(results) >= limit:
break
return results
#These generators can be used to navigate starting from both
#NavigableStrings and Tags.
@property
def next_elements(self):
i = self.next_element
while i is not None:
yield i
i = i.next_element
@property
def next_siblings(self):
i = self.next_sibling
while i is not None:
yield i
i = i.next_sibling
@property
def previous_elements(self):
i = self.previous_element
while i is not None:
yield i
i = i.previous_element
@property
def previous_siblings(self):
i = self.previous_sibling
while i is not None:
yield i
i = i.previous_sibling
@property
def parents(self):
i = self.parent
while i is not None:
yield i
i = i.parent
# Methods for supporting CSS selectors.
tag_name_re = re.compile('^[a-zA-Z0-9][-.a-zA-Z0-9:_]*$')
# /^([a-zA-Z0-9][-.a-zA-Z0-9:_]*)\[(\w+)([=~\|\^\$\*]?)=?"?([^\]"]*)"?\]$/
# \---------------------------/ \---/\-------------/ \-------/
# | | | |
# | | | The value
# | | ~,|,^,$,* or =
# | Attribute
# Tag
attribselect_re = re.compile(
r'^(?P<tag>[a-zA-Z0-9][-.a-zA-Z0-9:_]*)?\[(?P<attribute>[\w-]+)(?P<operator>[=~\|\^\$\*]?)' +
r'=?"?(?P<value>[^\]"]*)"?\]$'
)
def _attr_value_as_string(self, value, default=None):
"""Force an attribute value into a string representation.
A multi-valued attribute will be converted into a
space-separated stirng.
"""
value = self.get(value, default)
if isinstance(value, list) or isinstance(value, tuple):
value =" ".join(value)
return value
def _tag_name_matches_and(self, function, tag_name):
if not tag_name:
return function
else:
def _match(tag):
return tag.name == tag_name and function(tag)
return _match
def _attribute_checker(self, operator, attribute, value=''):
"""Create a function that performs a CSS selector operation.
Takes an operator, attribute and optional value. Returns a
function that will return True for elements that match that
combination.
"""
if operator == '=':
# string representation of `attribute` is equal to `value`
return lambda el: el._attr_value_as_string(attribute) == value
elif operator == '~':
# space-separated list representation of `attribute`
# contains `value`
def _includes_value(element):
attribute_value = element.get(attribute, [])
if not isinstance(attribute_value, list):
attribute_value = attribute_value.split()
return value in attribute_value
return _includes_value
elif operator == '^':
# string representation of `attribute` starts with `value`
return lambda el: el._attr_value_as_string(
attribute, '').startswith(value)
elif operator == '$':
# string representation of `attribute` ends with `value`
return lambda el: el._attr_value_as_string(
attribute, '').endswith(value)
elif operator == '*':
# string representation of `attribute` contains `value`
return lambda el: value in el._attr_value_as_string(attribute, '')
elif operator == '|':
# string representation of `attribute` is either exactly
# `value` or starts with `value` and then a dash.
def _is_or_starts_with_dash(element):
attribute_value = element._attr_value_as_string(attribute, '')
return (attribute_value == value or attribute_value.startswith(
value + '-'))
return _is_or_starts_with_dash
else:
return lambda el: el.has_attr(attribute)
# Old non-property versions of the generators, for backwards
# compatibility with BS3.
def nextGenerator(self):
return self.next_elements
def nextSiblingGenerator(self):
return self.next_siblings
def previousGenerator(self):
return self.previous_elements
def previousSiblingGenerator(self):
return self.previous_siblings
def parentGenerator(self):
return self.parents
class NavigableString(unicode, PageElement):
PREFIX = ''
SUFFIX = ''
# We can't tell just by looking at a string whether it's contained
# in an XML document or an HTML document.
known_xml = None
def __new__(cls, value):
"""Create a new NavigableString.
When unpickling a NavigableString, this method is called with
the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be
passed in to the superclass's __new__ or the superclass won't know
how to handle non-ASCII characters.
"""
if isinstance(value, unicode):
u = unicode.__new__(cls, value)
else:
u = unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)
u.setup()
return u
def __copy__(self):
"""A copy of a NavigableString has the same contents and class
as the original, but it is not connected to the parse tree.
"""
return type(self)(self)
def __getnewargs__(self):
return (unicode(self),)
def __getattr__(self, attr):
"""text.string gives you text. This is for backwards
compatibility for Navigable*String, but for CData* it lets you
get the string without the CData wrapper."""
if attr == 'string':
return self
else:
raise AttributeError(
"'%s' object has no attribute '%s'" % (
self.__class__.__name__, attr))
def output_ready(self, formatter="minimal"):
output = self.format_string(self, formatter)
return self.PREFIX + output + self.SUFFIX
@property
def name(self):
return None
@name.setter
def name(self, name):
raise AttributeError("A NavigableString cannot be given a name.")
class PreformattedString(NavigableString):
"""A NavigableString not subject to the normal formatting rules.
The string will be passed into the formatter (to trigger side effects),
but the return value will be ignored.
"""
def output_ready(self, formatter="minimal"):
"""CData strings are passed into the formatter.
But the return value is ignored."""
self.format_string(self, formatter)
return self.PREFIX + self + self.SUFFIX
class CData(PreformattedString):
PREFIX = u'<![CDATA['
SUFFIX = u']]>'
class ProcessingInstruction(PreformattedString):
"""A SGML processing instruction."""
PREFIX = u'<?'
SUFFIX = u'>'
class XMLProcessingInstruction(ProcessingInstruction):
"""An XML processing instruction."""
PREFIX = u'<?'
SUFFIX = u'?>'
class Comment(PreformattedString):
PREFIX = u'<!--'
SUFFIX = u'-->'
class Declaration(PreformattedString):
PREFIX = u'<?'
SUFFIX = u'?>'
class Doctype(PreformattedString):
@classmethod
def for_name_and_ids(cls, name, pub_id, system_id):
value = name or ''
if pub_id is not None:
value += ' PUBLIC "%s"' % pub_id
if system_id is not None:
value += ' "%s"' % system_id
elif system_id is not None:
value += ' SYSTEM "%s"' % system_id
return Doctype(value)
PREFIX = u'<!DOCTYPE '
SUFFIX = u'>\n'
class Tag(PageElement):
"""Represents a found HTML tag with its attributes and contents."""
def __init__(self, parser=None, builder=None, name=None, namespace=None,
prefix=None, attrs=None, parent=None, previous=None,
is_xml=None):
"Basic constructor."
if parser is None:
self.parser_class = None
else:
# We don't actually store the parser object: that lets extracted
# chunks be garbage-collected.
self.parser_class = parser.__class__
if name is None:
raise ValueError("No value provided for new tag's name.")
self.name = name
self.namespace = namespace
self.prefix = prefix
if builder is not None:
preserve_whitespace_tags = builder.preserve_whitespace_tags
else:
if is_xml:
preserve_whitespace_tags = []
else:
preserve_whitespace_tags = HTMLAwareEntitySubstitution.preserve_whitespace_tags
self.preserve_whitespace_tags = preserve_whitespace_tags
if attrs is None:
attrs = {}
elif attrs:
if builder is not None and builder.cdata_list_attributes:
attrs = builder._replace_cdata_list_attribute_values(
self.name, attrs)
else:
attrs = dict(attrs)
else:
attrs = dict(attrs)
# If possible, determine ahead of time whether this tag is an
# XML tag.
if builder:
self.known_xml = builder.is_xml
else:
self.known_xml = is_xml
self.attrs = attrs
self.contents = []
self.setup(parent, previous)
self.hidden = False
# Set up any substitutions, such as the charset in a META tag.
if builder is not None:
builder.set_up_substitutions(self)
self.can_be_empty_element = builder.can_be_empty_element(name)
else:
self.can_be_empty_element = False
parserClass = _alias("parser_class") # BS3
def __copy__(self):
"""A copy of a Tag is a new Tag, unconnected to the parse tree.
Its contents are a copy of the old Tag's contents.
"""
clone = type(self)(None, self.builder, self.name, self.namespace,
self.nsprefix, self.attrs, is_xml=self._is_xml)
for attr in ('can_be_empty_element', 'hidden'):
setattr(clone, attr, getattr(self, attr))
for child in self.contents:
clone.append(child.__copy__())
return clone
@property
def is_empty_element(self):
"""Is this tag an empty-element tag? (aka a self-closing tag)
A tag that has contents is never an empty-element tag.
A tag that has no contents may or may not be an empty-element
tag. It depends on the builder used to create the tag. If the
builder has a designated list of empty-element tags, then only
a tag whose name shows up in that list is considered an
empty-element tag.
If the builder has no designated list of empty-element tags,
then any tag with no contents is an empty-element tag.
"""
return len(self.contents) == 0 and self.can_be_empty_element
isSelfClosing = is_empty_element # BS3
@property
def string(self):
"""Convenience property to get the single string within this tag.
:Return: If this tag has a single string child, return value
is that string. If this tag has no children, or more than one
child, return value is None. If this tag has one child tag,
return value is the 'string' attribute of the child tag,
recursively.
"""
if len(self.contents) != 1:
return None
child = self.contents[0]
if isinstance(child, NavigableString):
return child
return child.string
@string.setter
def string(self, string):
self.clear()
self.append(string.__class__(string))
def _all_strings(self, strip=False, types=(NavigableString, CData)):
"""Yield all strings of certain classes, possibly stripping them.
By default, yields only NavigableString and CData objects. So
no comments, processing instructions, etc.
"""
for descendant in self.descendants:
if (
(types is None and not isinstance(descendant, NavigableString))
or
(types is not None and type(descendant) not in types)):
continue
if strip:
descendant = descendant.strip()
if len(descendant) == 0:
continue
yield descendant
strings = property(_all_strings)
@property
def stripped_strings(self):
for string in self._all_strings(True):
yield string
def get_text(self, separator=u"", strip=False,
types=(NavigableString, CData)):
"""
Get all child strings, concatenated using the given separator.
"""
return separator.join([s for s in self._all_strings(
strip, types=types)])
getText = get_text
text = property(get_text)
def decompose(self):
"""Recursively destroys the contents of this tree."""
self.extract()
i = self
while i is not None:
next = i.next_element
i.__dict__.clear()
i.contents = []
i = next
def clear(self, decompose=False):
"""
Extract all children. If decompose is True, decompose instead.
"""
if decompose:
for element in self.contents[:]:
if isinstance(element, Tag):
element.decompose()
else:
element.extract()
else:
for element in self.contents[:]:
element.extract()
def index(self, element):
"""
Find the index of a child by identity, not value. Avoids issues with
tag.contents.index(element) getting the index of equal elements.
"""
for i, child in enumerate(self.contents):
if child is element:
return i
raise ValueError("Tag.index: element not in tag")
def get(self, key, default=None):
"""Returns the value of the 'key' attribute for the tag, or
the value given for 'default' if it doesn't have that
attribute."""
return self.attrs.get(key, default)
def has_attr(self, key):
return key in self.attrs
def __hash__(self):
return str(self).__hash__()
def __getitem__(self, key):
"""tag[key] returns the value of the 'key' attribute for the tag,
and throws an exception if it's not there."""
return self.attrs[key]
def __iter__(self):
"Iterating over a tag iterates over its contents."
return iter(self.contents)
def __len__(self):
"The length of a tag is the length of its list of contents."
return len(self.contents)
def __contains__(self, x):
return x in self.contents
def __nonzero__(self):
"A tag is non-None even if it has no contents."
return True
def __setitem__(self, key, value):
"""Setting tag[key] sets the value of the 'key' attribute for the
tag."""
self.attrs[key] = value
def __delitem__(self, key):
"Deleting tag[key] deletes all 'key' attributes for the tag."
self.attrs.pop(key, None)
def __call__(self, *args, **kwargs):
"""Calling a tag like a function is the same as calling its
find_all() method. Eg. tag('a') returns a list of all the A tags
found within this tag."""
return self.find_all(*args, **kwargs)
def __getattr__(self, tag):
#print "Getattr %s.%s" % (self.__class__, tag)
if len(tag) > 3 and tag.endswith('Tag'):
# BS3: soup.aTag -> "soup.find("a")
tag_name = tag[:-3]
warnings.warn(
'.%sTag is deprecated, use .find("%s") instead.' % (
tag_name, tag_name))
return self.find(tag_name)
# We special case contents to avoid recursion.
elif not tag.startswith("__") and not tag == "contents":
return self.find(tag)
raise AttributeError(
"'%s' object has no attribute '%s'" % (self.__class__, tag))
def __eq__(self, other):
"""Returns true iff this tag has the same name, the same attributes,
and the same contents (recursively) as the given tag."""
if self is other:
return True
if (not hasattr(other, 'name') or
not hasattr(other, 'attrs') or
not hasattr(other, 'contents') or
self.name != other.name or
self.attrs != other.attrs or
len(self) != len(other)):
return False
for i, my_child in enumerate(self.contents):
if my_child != other.contents[i]:
return False
return True
def __ne__(self, other):
"""Returns true iff this tag is not identical to the other tag,
as defined in __eq__."""
return not self == other
def __repr__(self, encoding="unicode-escape"):
"""Renders this tag as a string."""
if PY3K:
# "The return value must be a string object", i.e. Unicode
return self.decode()
else:
# "The return value must be a string object", i.e. a bytestring.
# By convention, the return value of __repr__ should also be
# an ASCII string.
return self.encode(encoding)
def __unicode__(self):
return self.decode()
def __str__(self):
if PY3K:
return self.decode()
else:
return self.encode()
if PY3K:
__str__ = __repr__ = __unicode__
def encode(self, encoding=DEFAULT_OUTPUT_ENCODING,
indent_level=None, formatter="minimal",
errors="xmlcharrefreplace"):
# Turn the data structure into Unicode, then encode the
# Unicode.
u = self.decode(indent_level, encoding, formatter)
return u.encode(encoding, errors)
def _should_pretty_print(self, indent_level):
"""Should this tag be pretty-printed?"""
return (
indent_level is not None
and self.name not in self.preserve_whitespace_tags
)
def decode(self, indent_level=None,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Returns a Unicode representation of this tag and its contents.
:param eventual_encoding: The tag is destined to be
encoded into this encoding. This method is _not_
responsible for performing that encoding. This information
is passed in so that it can be substituted in if the
document contains a <META> tag that mentions the document's
encoding.
"""
# First off, turn a string formatter into a function. This
# will stop the lookup from happening over and over again.
if not callable(formatter):
formatter = self._formatter_for_name(formatter)
attrs = []
if self.attrs:
for key, val in sorted(self.attrs.items()):
if val is None:
decoded = key
else:
if isinstance(val, list) or isinstance(val, tuple):
val = ' '.join(val)
elif not isinstance(val, basestring):
val = unicode(val)
elif (
isinstance(val, AttributeValueWithCharsetSubstitution)
and eventual_encoding is not None):
val = val.encode(eventual_encoding)
text = self.format_string(val, formatter)
decoded = (
unicode(key) + '='
+ EntitySubstitution.quoted_attribute_value(text))
attrs.append(decoded)
close = ''
closeTag = ''
prefix = ''
if self.prefix:
prefix = self.prefix + ":"
if self.is_empty_element:
close = '/'
else:
closeTag = '</%s%s>' % (prefix, self.name)
pretty_print = self._should_pretty_print(indent_level)
space = ''
indent_space = ''
if indent_level is not None:
indent_space = (' ' * (indent_level - 1))
if pretty_print:
space = indent_space
indent_contents = indent_level + 1
else:
indent_contents = None
contents = self.decode_contents(
indent_contents, eventual_encoding, formatter)
if self.hidden:
# This is the 'document root' object.
s = contents
else:
s = []
attribute_string = ''
if attrs:
attribute_string = ' ' + ' '.join(attrs)
if indent_level is not None:
# Even if this particular tag is not pretty-printed,
# we should indent up to the start of the tag.
s.append(indent_space)
s.append('<%s%s%s%s>' % (
prefix, self.name, attribute_string, close))
if pretty_print:
s.append("\n")
s.append(contents)
if pretty_print and contents and contents[-1] != "\n":
s.append("\n")
if pretty_print and closeTag:
s.append(space)
s.append(closeTag)
if indent_level is not None and closeTag and self.next_sibling:
# Even if this particular tag is not pretty-printed,
# we're now done with the tag, and we should add a
# newline if appropriate.
s.append("\n")
s = ''.join(s)
return s
def prettify(self, encoding=None, formatter="minimal"):
if encoding is None:
return self.decode(True, formatter=formatter)
else:
return self.encode(encoding, True, formatter=formatter)
def decode_contents(self, indent_level=None,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Renders the contents of this tag as a Unicode string.
:param indent_level: Each line of the rendering will be
indented this many spaces.
:param eventual_encoding: The tag is destined to be
encoded into this encoding. This method is _not_
responsible for performing that encoding. This information
is passed in so that it can be substituted in if the
document contains a <META> tag that mentions the document's
encoding.
:param formatter: The output formatter responsible for converting
entities to Unicode characters.
"""
# First off, turn a string formatter into a function. This
# will stop the lookup from happening over and over again.
if not callable(formatter):
formatter = self._formatter_for_name(formatter)
pretty_print = (indent_level is not None)
s = []
for c in self:
text = None
if isinstance(c, NavigableString):
text = c.output_ready(formatter)
elif isinstance(c, Tag):
s.append(c.decode(indent_level, eventual_encoding,
formatter))
if text and indent_level and not self.name == 'pre':
text = text.strip()
if text:
if pretty_print and not self.name == 'pre':
s.append(" " * (indent_level - 1))
s.append(text)
if pretty_print and not self.name == 'pre':
s.append("\n")
return ''.join(s)
def encode_contents(
self, indent_level=None, encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Renders the contents of this tag as a bytestring.
:param indent_level: Each line of the rendering will be
indented this many spaces.
:param eventual_encoding: The bytestring will be in this encoding.
:param formatter: The output formatter responsible for converting
entities to Unicode characters.
"""
contents = self.decode_contents(indent_level, encoding, formatter)
return contents.encode(encoding)
# Old method for BS3 compatibility
def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
if not prettyPrint:
indentLevel = None
return self.encode_contents(
indent_level=indentLevel, encoding=encoding)
#Soup methods
def find(self, name=None, attrs={}, recursive=True, text=None,
**kwargs):
"""Return only the first child of this Tag matching the given
criteria."""
r = None
l = self.find_all(name, attrs, recursive, text, 1, **kwargs)
if l:
r = l[0]
return r
findChild = find
def find_all(self, name=None, attrs={}, recursive=True, text=None,
limit=None, **kwargs):
"""Extracts a list of Tag objects that match the given
criteria. You can specify the name of the Tag and any
attributes you want the Tag to have.
The value of a key-value pair in the 'attrs' map can be a
string, a list of strings, a regular expression object, or a
callable that takes a string and returns whether or not the
string matches for some custom definition of 'matches'. The
same is true of the tag name."""
generator = self.descendants
if not recursive:
generator = self.children
return self._find_all(name, attrs, text, limit, generator, **kwargs)
findAll = find_all # BS3
findChildren = find_all # BS2
#Generator methods
@property
def children(self):
# return iter() to make the purpose of the method clear
return iter(self.contents) # XXX This seems to be untested.
@property
def descendants(self):
if not len(self.contents):
return
stopNode = self._last_descendant().next_element
current = self.contents[0]
while current is not stopNode:
yield current
current = current.next_element
# CSS selector code
_selector_combinators = ['>', '+', '~']
_select_debug = False
quoted_colon = re.compile('"[^"]*:[^"]*"')
def select_one(self, selector):
"""Perform a CSS selection operation on the current element."""
value = self.select(selector, limit=1)
if value:
return value[0]
return None
def select(self, selector, _candidate_generator=None, limit=None):
"""Perform a CSS selection operation on the current element."""
# Handle grouping selectors if ',' exists, ie: p,a
if ',' in selector:
context = []
for partial_selector in selector.split(','):
partial_selector = partial_selector.strip()
if partial_selector == '':
raise ValueError('Invalid group selection syntax: %s' % selector)
candidates = self.select(partial_selector, limit=limit)
for candidate in candidates:
if candidate not in context:
context.append(candidate)
if limit and len(context) >= limit:
break
return context
tokens = shlex.split(selector)
current_context = [self]
if tokens[-1] in self._selector_combinators:
raise ValueError(
'Final combinator "%s" is missing an argument.' % tokens[-1])
if self._select_debug:
print 'Running CSS selector "%s"' % selector
for index, token in enumerate(tokens):
new_context = []
new_context_ids = set([])
if tokens[index-1] in self._selector_combinators:
# This token was consumed by the previous combinator. Skip it.
if self._select_debug:
print ' Token was consumed by the previous combinator.'
continue
if self._select_debug:
print ' Considering token "%s"' % token
recursive_candidate_generator = None
tag_name = None
# Each operation corresponds to a checker function, a rule
# for determining whether a candidate matches the
# selector. Candidates are generated by the active
# iterator.
checker = None
m = self.attribselect_re.match(token)
if m is not None:
# Attribute selector
tag_name, attribute, operator, value = m.groups()
checker = self._attribute_checker(operator, attribute, value)
elif '#' in token:
# ID selector
tag_name, tag_id = token.split('#', 1)
def id_matches(tag):
return tag.get('id', None) == tag_id
checker = id_matches
elif '.' in token:
# Class selector
tag_name, klass = token.split('.', 1)
classes = set(klass.split('.'))
def classes_match(candidate):
return classes.issubset(candidate.get('class', []))
checker = classes_match
elif ':' in token and not self.quoted_colon.search(token):
# Pseudo-class
tag_name, pseudo = token.split(':', 1)
if tag_name == '':
raise ValueError(
"A pseudo-class must be prefixed with a tag name.")
pseudo_attributes = re.match('([a-zA-Z\d-]+)\(([a-zA-Z\d]+)\)', pseudo)
found = []
if pseudo_attributes is None:
pseudo_type = pseudo
pseudo_value = None
else:
pseudo_type, pseudo_value = pseudo_attributes.groups()
if pseudo_type == 'nth-of-type':
try:
pseudo_value = int(pseudo_value)
except:
raise NotImplementedError(
'Only numeric values are currently supported for the nth-of-type pseudo-class.')
if pseudo_value < 1:
raise ValueError(
'nth-of-type pseudo-class value must be at least 1.')
class Counter(object):
def __init__(self, destination):
self.count = 0
self.destination = destination
def nth_child_of_type(self, tag):
self.count += 1
if self.count == self.destination:
return True
else:
return False
checker = Counter(pseudo_value).nth_child_of_type
else:
raise NotImplementedError(
'Only the following pseudo-classes are implemented: nth-of-type.')
elif token == '*':
# Star selector -- matches everything
pass
elif token == '>':
# Run the next token as a CSS selector against the
# direct children of each tag in the current context.
recursive_candidate_generator = lambda tag: tag.children
elif token == '~':
# Run the next token as a CSS selector against the
# siblings of each tag in the current context.
recursive_candidate_generator = lambda tag: tag.next_siblings
elif token == '+':
# For each tag in the current context, run the next
# token as a CSS selector against the tag's next
# sibling that's a tag.
def next_tag_sibling(tag):
yield tag.find_next_sibling(True)
recursive_candidate_generator = next_tag_sibling
elif self.tag_name_re.match(token):
# Just a tag name.
tag_name = token
else:
raise ValueError(
'Unsupported or invalid CSS selector: "%s"' % token)
if recursive_candidate_generator:
# This happens when the selector looks like "> foo".
#
# The generator calls select() recursively on every
# member of the current context, passing in a different
# candidate generator and a different selector.
#
# In the case of "> foo", the candidate generator is
# one that yields a tag's direct children (">"), and
# the selector is "foo".
next_token = tokens[index+1]
def recursive_select(tag):
if self._select_debug:
print ' Calling select("%s") recursively on %s %s' % (next_token, tag.name, tag.attrs)
print '-' * 40
for i in tag.select(next_token, recursive_candidate_generator):
if self._select_debug:
print '(Recursive select picked up candidate %s %s)' % (i.name, i.attrs)
yield i
if self._select_debug:
print '-' * 40
_use_candidate_generator = recursive_select
elif _candidate_generator is None:
# By default, a tag's candidates are all of its
# children. If tag_name is defined, only yield tags
# with that name.
if self._select_debug:
if tag_name:
check = "[any]"
else:
check = tag_name
print ' Default candidate generator, tag name="%s"' % check
if self._select_debug:
# This is redundant with later code, but it stops
# a bunch of bogus tags from cluttering up the
# debug log.
def default_candidate_generator(tag):
for child in tag.descendants:
if not isinstance(child, Tag):
continue
if tag_name and not child.name == tag_name:
continue
yield child
_use_candidate_generator = default_candidate_generator
else:
_use_candidate_generator = lambda tag: tag.descendants
else:
_use_candidate_generator = _candidate_generator
count = 0
for tag in current_context:
if self._select_debug:
print " Running candidate generator on %s %s" % (
tag.name, repr(tag.attrs))
for candidate in _use_candidate_generator(tag):
if not isinstance(candidate, Tag):
continue
if tag_name and candidate.name != tag_name:
continue
if checker is not None:
try:
result = checker(candidate)
except StopIteration:
# The checker has decided we should no longer
# run the generator.
break
if checker is None or result:
if self._select_debug:
print " SUCCESS %s %s" % (candidate.name, repr(candidate.attrs))
if id(candidate) not in new_context_ids:
# If a tag matches a selector more than once,
# don't include it in the context more than once.
new_context.append(candidate)
new_context_ids.add(id(candidate))
elif self._select_debug:
print " FAILURE %s %s" % (candidate.name, repr(candidate.attrs))
current_context = new_context
if limit and len(current_context) >= limit:
current_context = current_context[:limit]
if self._select_debug:
print "Final verdict:"
for i in current_context:
print " %s %s" % (i.name, i.attrs)
return current_context
# Old names for backwards compatibility
def childGenerator(self):
return self.children
def recursiveChildGenerator(self):
return self.descendants
def has_key(self, key):
"""This was kind of misleading because has_key() (attributes)
was different from __in__ (contents). has_key() is gone in
Python 3, anyway."""
warnings.warn('has_key is deprecated. Use has_attr("%s") instead.' % (
key))
return self.has_attr(key)
# Next, a couple classes to represent queries and their results.
class SoupStrainer(object):
"""Encapsulates a number of ways of matching a markup element (tag or
text)."""
def __init__(self, name=None, attrs={}, text=None, **kwargs):
self.name = self._normalize_search_value(name)
if not isinstance(attrs, dict):
# Treat a non-dict value for attrs as a search for the 'class'
# attribute.
kwargs['class'] = attrs
attrs = None
if 'class_' in kwargs:
# Treat class_="foo" as a search for the 'class'
# attribute, overriding any non-dict value for attrs.
kwargs['class'] = kwargs['class_']
del kwargs['class_']
if kwargs:
if attrs:
attrs = attrs.copy()
attrs.update(kwargs)
else:
attrs = kwargs
normalized_attrs = {}
for key, value in attrs.items():
normalized_attrs[key] = self._normalize_search_value(value)
self.attrs = normalized_attrs
self.text = self._normalize_search_value(text)
def _normalize_search_value(self, value):
# Leave it alone if it's a Unicode string, a callable, a
# regular expression, a boolean, or None.
if (isinstance(value, unicode) or callable(value) or hasattr(value, 'match')
or isinstance(value, bool) or value is None):
return value
# If it's a bytestring, convert it to Unicode, treating it as UTF-8.
if isinstance(value, bytes):
return value.decode("utf8")
# If it's listlike, convert it into a list of strings.
if hasattr(value, '__iter__'):
new_value = []
for v in value:
if (hasattr(v, '__iter__') and not isinstance(v, bytes)
and not isinstance(v, unicode)):
# This is almost certainly the user's mistake. In the
# interests of avoiding infinite loops, we'll let
# it through as-is rather than doing a recursive call.
new_value.append(v)
else:
new_value.append(self._normalize_search_value(v))
return new_value
# Otherwise, convert it into a Unicode string.
# The unicode(str()) thing is so this will do the same thing on Python 2
# and Python 3.
return unicode(str(value))
def __str__(self):
if self.text:
return self.text
else:
return "%s|%s" % (self.name, self.attrs)
def search_tag(self, markup_name=None, markup_attrs={}):
found = None
markup = None
if isinstance(markup_name, Tag):
markup = markup_name
markup_attrs = markup
call_function_with_tag_data = (
isinstance(self.name, collections.Callable)
and not isinstance(markup_name, Tag))
if ((not self.name)
or call_function_with_tag_data
or (markup and self._matches(markup, self.name))
or (not markup and self._matches(markup_name, self.name))):
if call_function_with_tag_data:
match = self.name(markup_name, markup_attrs)
else:
match = True
markup_attr_map = None
for attr, match_against in list(self.attrs.items()):
if not markup_attr_map:
if hasattr(markup_attrs, 'get'):
markup_attr_map = markup_attrs
else:
markup_attr_map = {}
for k, v in markup_attrs:
markup_attr_map[k] = v
attr_value = markup_attr_map.get(attr)
if not self._matches(attr_value, match_against):
match = False
break
if match:
if markup:
found = markup
else:
found = markup_name
if found and self.text and not self._matches(found.string, self.text):
found = None
return found
searchTag = search_tag
def search(self, markup):
# print 'looking for %s in %s' % (self, markup)
found = None
# If given a list of items, scan it for a text element that
# matches.
if hasattr(markup, '__iter__') and not isinstance(markup, (Tag, basestring)):
for element in markup:
if isinstance(element, NavigableString) \
and self.search(element):
found = element
break
# If it's a Tag, make sure its name or attributes match.
# Don't bother with Tags if we're searching for text.
elif isinstance(markup, Tag):
if not self.text or self.name or self.attrs:
found = self.search_tag(markup)
# If it's text, make sure the text matches.
elif isinstance(markup, NavigableString) or \
isinstance(markup, basestring):
if not self.name and not self.attrs and self._matches(markup, self.text):
found = markup
else:
raise Exception(
"I don't know how to match against a %s" % markup.__class__)
return found
def _matches(self, markup, match_against):
# print u"Matching %s against %s" % (markup, match_against)
result = False
if isinstance(markup, list) or isinstance(markup, tuple):
# This should only happen when searching a multi-valued attribute
# like 'class'.
for item in markup:
if self._matches(item, match_against):
return True
# We didn't match any particular value of the multivalue
# attribute, but maybe we match the attribute value when
# considered as a string.
if self._matches(' '.join(markup), match_against):
return True
return False
if match_against is True:
# True matches any non-None value.
return markup is not None
if isinstance(match_against, collections.Callable):
return match_against(markup)
# Custom callables take the tag as an argument, but all
# other ways of matching match the tag name as a string.
if isinstance(markup, Tag):
markup = markup.name
# Ensure that `markup` is either a Unicode string, or None.
markup = self._normalize_search_value(markup)
if markup is None:
# None matches None, False, an empty string, an empty list, and so on.
return not match_against
if isinstance(match_against, unicode):
# Exact string match
return markup == match_against
if hasattr(match_against, 'match'):
# Regexp match
return match_against.search(markup)
if hasattr(match_against, '__iter__'):
# The markup must be an exact match against something
# in the iterable.
return markup in match_against
class ResultSet(list):
"""A ResultSet is just a list that keeps track of the SoupStrainer
that created it."""
def __init__(self, source, result=()):
super(ResultSet, self).__init__(result)
self.source = source
|
plotting | canvasBackground | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2015 Timothée Lecomte
# This file is part of Friture.
#
# Friture is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as published by
# the Free Software Foundation.
#
# Friture is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Friture. If not, see <http://www.gnu.org/licenses/>.
from PyQt5 import QtCore, QtGui
class CanvasBackground:
def __init__(self, *args):
self.canvas_width = 2
self.canvas_height = 2
self.need_redraw = False
self.cache_pixmap = QtGui.QPixmap()
def drawToCache(self, painter, rect):
w = rect.width()
h = rect.height()
self.cache_pixmap = QtGui.QPixmap(w, h)
painter = QtGui.QPainter(self.cache_pixmap)
self.directDraw(painter, rect)
def directDraw(self, painter, rect):
# verical gradient from top to bottom
gradient = QtGui.QLinearGradient(rect.topLeft(), rect.bottomLeft())
gradient.setColorAt(0, QtGui.QColor("#E0E0E0"))
gradient.setColorAt(0.5, QtCore.Qt.white)
painter.fillRect(rect, gradient)
def draw(self, painter, rect):
# update the cache according to possibly new canvas dimensions
h = rect.height()
w = rect.width()
if w != self.canvas_width:
self.canvas_width = w
self.need_redraw = True
if h != self.canvas_height:
self.canvas_height = h
self.need_redraw = True
if self.need_redraw:
self.drawToCache(painter, rect)
self.need_redraw = False
painter.drawPixmap(0, 0, self.cache_pixmap)
|
cmd | save | from __future__ import absolute_import, print_function
import math
import os
import stat
import sys
import time
from binascii import hexlify
from errno import ENOENT
from bup import client, git, hashsplit, hlinkdb, index, metadata, options
from bup.compat import argv_bytes, environ, nullcontext
from bup.hashsplit import GIT_MODE_FILE, GIT_MODE_SYMLINK, GIT_MODE_TREE
from bup.helpers import (
add_error,
grafted_path_components,
handle_ctrl_c,
hostname,
istty2,
log,
parse_date_or_fatal,
parse_num,
path_components,
progress,
qprogress,
resolve_parent,
saved_errors,
stripped_path_components,
valid_save_name,
)
from bup.io import byte_stream, path_msg
from bup.pwdgrp import userfullname, username
from bup.tree import Stack
optspec = """
bup save [-tc] [-n name] <filenames...>
--
r,remote= hostname:/path/to/repo of remote repository
t,tree output a tree id
c,commit output a commit id
n,name= name of backup set to update (if any)
d,date= date for the commit (seconds since the epoch)
v,verbose increase log output (can be used more than once)
q,quiet don't show progress meter
smaller= only back up files smaller than n bytes
bwlimit= maximum bytes/sec to transmit to server
f,indexfile= the name of the index file (normally BUP_DIR/bupindex)
strip strips the path to every filename given
strip-path= path-prefix to be stripped when saving
graft= a graft point *old_path*=*new_path* (can be used more than once)
#,compress= set compression level to # (0-9, 9 is highest) [1]
"""
### Test hooks
after_nondir_metadata_stat = None
def before_saving_regular_file(name):
return
def opts_from_cmdline(argv):
o = options.Options(optspec)
opt, flags, extra = o.parse_bytes(argv[1:])
if opt.indexfile:
opt.indexfile = argv_bytes(opt.indexfile)
if opt.name:
opt.name = argv_bytes(opt.name)
if opt.remote:
opt.remote = argv_bytes(opt.remote)
if opt.strip_path:
opt.strip_path = argv_bytes(opt.strip_path)
if not (opt.tree or opt.commit or opt.name):
o.fatal("use one or more of -t, -c, -n")
if not extra:
o.fatal("no filenames given")
if opt.date:
opt.date = parse_date_or_fatal(opt.date, o.fatal)
else:
opt.date = time.time()
opt.progress = istty2 and not opt.quiet
opt.smaller = parse_num(opt.smaller or 0)
if opt.bwlimit:
opt.bwlimit = parse_num(opt.bwlimit)
if opt.strip and opt.strip_path:
o.fatal("--strip is incompatible with --strip-path")
opt.sources = [argv_bytes(x) for x in extra]
grafts = []
if opt.graft:
if opt.strip:
o.fatal("--strip is incompatible with --graft")
if opt.strip_path:
o.fatal("--strip-path is incompatible with --graft")
for option, parameter in flags:
if option == "--graft":
parameter = argv_bytes(parameter)
splitted_parameter = parameter.split(b"=")
if len(splitted_parameter) != 2:
o.fatal("a graft point must be of the form old_path=new_path")
old_path, new_path = splitted_parameter
if not (old_path and new_path):
o.fatal("a graft point cannot be empty")
grafts.append((resolve_parent(old_path), resolve_parent(new_path)))
opt.grafts = grafts
opt.is_reverse = environ.get(b"BUP_SERVER_REVERSE")
if opt.is_reverse and opt.remote:
o.fatal("don't use -r in reverse mode; it's automatic")
if opt.name and not valid_save_name(opt.name):
o.fatal("'%s' is not a valid branch name" % path_msg(opt.name))
return opt
def save_tree(opt, reader, hlink_db, msr, w):
# Metadata is stored in a file named .bupm in each directory. The
# first metadata entry will be the metadata for the current directory.
# The remaining entries will be for each of the other directory
# elements, in the order they're listed in the index.
#
# Since the git tree elements are sorted according to
# git.shalist_item_sort_key, the metalist items are accumulated as
# (sort_key, metadata) tuples, and then sorted when the .bupm file is
# created. The sort_key should have been computed using the element's
# mangled name and git mode (after hashsplitting), but the code isn't
# actually doing that but rather uses the element's real name and mode.
# This makes things a bit more difficult when reading it back, see
# vfs.ordered_tree_entries().
# Maintain a stack of information representing the current location in
stack = Stack()
# Hack around lack of nonlocal vars in python 2
_nonlocal = {}
_nonlocal["count"] = 0
_nonlocal["subcount"] = 0
_nonlocal["lastremain"] = None
def progress_report(n):
_nonlocal["subcount"] += n
cc = _nonlocal["count"] + _nonlocal["subcount"]
pct = total and (cc * 100.0 / total) or 0
now = time.time()
elapsed = now - tstart
kps = elapsed and int(cc / 1024.0 / elapsed)
kps_frac = 10 ** int(math.log(kps + 1, 10) - 1)
kps = int(kps / kps_frac) * kps_frac
if cc:
remain = elapsed * 1.0 / cc * (total - cc)
else:
remain = 0.0
if (
_nonlocal["lastremain"]
and (remain > _nonlocal["lastremain"])
and ((remain - _nonlocal["lastremain"]) / _nonlocal["lastremain"] < 0.05)
):
remain = _nonlocal["lastremain"]
else:
_nonlocal["lastremain"] = remain
hours = int(remain / 60 / 60)
mins = int(remain / 60 - hours * 60)
secs = int(remain - hours * 60 * 60 - mins * 60)
if elapsed < 30:
remainstr = ""
kpsstr = ""
else:
kpsstr = "%dk/s" % kps
if hours:
remainstr = "%dh%dm" % (hours, mins)
elif mins:
remainstr = "%dm%d" % (mins, secs)
else:
remainstr = "%ds" % secs
qprogress(
"Saving: %.2f%% (%d/%dk, %d/%d files) %s %s\r"
% (pct, cc / 1024, total / 1024, fcount, ftotal, remainstr, kpsstr)
)
def already_saved(ent):
return ent.is_valid() and w.exists(ent.sha) and ent.sha
def wantrecurse_pre(ent):
return not already_saved(ent)
def wantrecurse_during(ent):
return not already_saved(ent) or ent.sha_missing()
def find_hardlink_target(hlink_db, ent):
if hlink_db and not stat.S_ISDIR(ent.mode) and ent.nlink > 1:
link_paths = hlink_db.node_paths(ent.dev, ent.ino)
if link_paths:
return link_paths[0]
return None
total = ftotal = 0
if opt.progress:
for transname, ent in reader.filter(opt.sources, wantrecurse=wantrecurse_pre):
if not (ftotal % 10024):
qprogress("Reading index: %d\r" % ftotal)
exists = ent.exists()
hashvalid = already_saved(ent)
ent.set_sha_missing(not hashvalid)
if not opt.smaller or ent.size < opt.smaller:
if exists and not hashvalid:
total += ent.size
ftotal += 1
progress("Reading index: %d, done.\n" % ftotal)
hashsplit.progress_callback = progress_report
# Root collisions occur when strip or graft options map more than one
# path to the same directory (paths which originally had separate
# parents). When that situation is detected, use empty metadata for
# the parent. Otherwise, use the metadata for the common parent.
# Collision example: "bup save ... --strip /foo /foo/bar /bar".
# FIXME: Add collision tests, or handle collisions some other way.
# FIXME: Detect/handle strip/graft name collisions (other than root),
# i.e. if '/foo/bar' and '/bar' both map to '/'.
first_root = None
root_collision = None
tstart = time.time()
fcount = 0
lastskip_name = None
lastdir = b""
for transname, ent in reader.filter(opt.sources, wantrecurse=wantrecurse_during):
(dir, file) = os.path.split(ent.name)
exists = ent.flags & index.IX_EXISTS
hashvalid = already_saved(ent)
wasmissing = ent.sha_missing()
oldsize = ent.size
if opt.verbose:
if not exists:
status = "D"
elif not hashvalid:
if ent.sha == index.EMPTY_SHA:
status = "A"
else:
status = "M"
else:
status = " "
if opt.verbose >= 2:
log("%s %-70s\n" % (status, path_msg(ent.name)))
elif not stat.S_ISDIR(ent.mode) and lastdir != dir:
if not lastdir.startswith(dir):
log("%s %-70s\n" % (status, path_msg(os.path.join(dir, b""))))
lastdir = dir
if opt.progress:
progress_report(0)
fcount += 1
if not exists:
continue
if opt.smaller and ent.size >= opt.smaller:
if exists and not hashvalid:
if opt.verbose:
log('skipping large file "%s"\n' % path_msg(ent.name))
lastskip_name = ent.name
continue
assert dir.startswith(b"/")
if opt.strip:
dirp = stripped_path_components(dir, opt.sources)
elif opt.strip_path:
dirp = stripped_path_components(dir, [opt.strip_path])
elif opt.grafts:
dirp = grafted_path_components(opt.grafts, dir)
else:
dirp = path_components(dir)
# At this point, dirp contains a representation of the archive
# path that looks like [(archive_dir_name, real_fs_path), ...].
# So given "bup save ... --strip /foo/bar /foo/bar/baz", dirp
# might look like this at some point:
# [('', '/foo/bar'), ('baz', '/foo/bar/baz'), ...].
# This dual representation supports stripping/grafting, where the
# archive path may not have a direct correspondence with the
# filesystem. The root directory is represented by an initial
# component named '', and any component that doesn't have a
# corresponding filesystem directory (due to grafting, for
# example) will have a real_fs_path of None, i.e. [('', None),
# ...].
if first_root == None:
first_root = dirp[0]
elif first_root != dirp[0]:
root_collision = True
# If switching to a new sub-tree, finish the current sub-tree.
while stack.path() > [x[0] for x in dirp]:
_ = stack.pop(w)
# If switching to a new sub-tree, start a new sub-tree.
for path_component in dirp[len(stack) :]:
dir_name, fs_path = path_component
# Not indexed, so just grab the FS metadata or use empty metadata.
try:
meta = (
metadata.from_path(fs_path, normalized=True)
if fs_path
else metadata.Metadata()
)
except (OSError, IOError) as e:
add_error(e)
lastskip_name = dir_name
meta = metadata.Metadata()
stack.push(dir_name, meta)
if not file:
if len(stack) == 1:
continue # We're at the top level -- keep the current root dir
# Since there's no filename, this is a subdir -- finish it.
oldtree = already_saved(ent) # may be None
newtree = stack.pop(w, override_tree=oldtree)
if not oldtree:
if lastskip_name and lastskip_name.startswith(ent.name):
ent.invalidate()
else:
ent.validate(GIT_MODE_TREE, newtree)
ent.repack()
if exists and wasmissing:
_nonlocal["count"] += oldsize
continue
# it's not a directory
if hashvalid:
meta = msr.metadata_at(ent.meta_ofs)
meta.hardlink_target = find_hardlink_target(hlink_db, ent)
# Restore the times that were cleared to 0 in the metastore.
(meta.atime, meta.mtime, meta.ctime) = (ent.atime, ent.mtime, ent.ctime)
stack.append_to_current(file, ent.mode, ent.gitmode, ent.sha, meta)
else:
id = None
hlink = find_hardlink_target(hlink_db, ent)
try:
meta = metadata.from_path(
ent.name,
hardlink_target=hlink,
normalized=True,
after_stat=after_nondir_metadata_stat,
)
except (OSError, IOError) as e:
add_error(e)
lastskip_name = ent.name
continue
if stat.S_IFMT(ent.mode) != stat.S_IFMT(meta.mode):
# The mode changed since we indexed the file, this is bad.
# This can cause two issues:
# 1) We e.g. think the file is a regular file, but now it's
# something else (a device, socket, FIFO or symlink, etc.)
# and _read_ from it when we shouldn't.
# 2) We then record it as valid, but don't update the index
# metadata, and on a subsequent save it has 'hashvalid'
# but is recorded as the file type from the index, when
# the content is something else ...
# Avoid all of these consistency issues by just skipping such
# things - it really ought to not happen anyway.
add_error(
"%s: mode changed since indexing, skipping." % path_msg(ent.name)
)
lastskip_name = ent.name
continue
if stat.S_ISREG(ent.mode):
try:
# If the file changes while we're reading it, then our reading
# may stop at some point, but the stat() above may have gotten
# a different size already. Recalculate the meta size so that
# the repository records the accurate size in the metadata, even
# if the other stat() data might be slightly older than the file
# content (which we can't fix, this is inherently racy, but we
# can prevent the size mismatch.)
meta.size = 0
def new_blob(data):
meta.size += len(data)
return w.new_blob(data)
before_saving_regular_file(ent.name)
with hashsplit.open_noatime(ent.name) as f:
(mode, id) = hashsplit.split_to_blob_or_tree(
new_blob, w.new_tree, [f], keep_boundaries=False
)
except (IOError, OSError) as e:
add_error("%s: %s" % (ent.name, e))
lastskip_name = ent.name
elif stat.S_ISDIR(ent.mode):
assert 0 # handled above
elif stat.S_ISLNK(ent.mode):
mode, id = (GIT_MODE_SYMLINK, w.new_blob(meta.symlink_target))
else:
# Everything else should be fully described by its
# metadata, so just record an empty blob, so the paths
# in the tree and .bupm will match up.
(mode, id) = (GIT_MODE_FILE, w.new_blob(b""))
if id:
ent.validate(mode, id)
ent.repack()
stack.append_to_current(file, ent.mode, ent.gitmode, id, meta)
if exists and wasmissing:
_nonlocal["count"] += oldsize
_nonlocal["subcount"] = 0
if opt.progress:
pct = total and _nonlocal["count"] * 100.0 / total or 100
progress(
"Saving: %.2f%% (%d/%dk, %d/%d files), done. \n"
% (pct, _nonlocal["count"] / 1024, total / 1024, fcount, ftotal)
)
# pop all parts above the root folder
while len(stack) > 1:
stack.pop(w)
# Finish the root directory.
# When there's a collision, use empty metadata for the root.
root_meta = metadata.Metadata() if root_collision else None
tree = stack.pop(w, override_meta=root_meta)
return tree
def commit_tree(tree, parent, date, argv, writer):
# Strip b prefix from python 3 bytes reprs to preserve previous format
msgcmd = b"[%s]" % b", ".join(
[repr(argv_bytes(x))[1:].encode("ascii") for x in argv]
)
msg = b"bup save\n\nGenerated by command:\n%s\n" % msgcmd
userline = b"%s <%s@%s>" % (userfullname(), username(), hostname())
return writer.new_commit(
tree, parent, userline, date, None, userline, date, None, msg
)
def main(argv):
handle_ctrl_c()
opt = opts_from_cmdline(argv)
client.bwlimit = opt.bwlimit
git.check_repo_or_die()
remote_dest = opt.remote or opt.is_reverse
if not remote_dest:
repo = git
cli = nullcontext()
else:
try:
cli = repo = client.Client(opt.remote)
except client.ClientError as e:
log("error: %s" % e)
sys.exit(1)
# cli creation must be last nontrivial command in each if clause above
with cli:
if not remote_dest:
w = git.PackWriter(compression_level=opt.compress)
else:
w = cli.new_packwriter(compression_level=opt.compress)
with w:
sys.stdout.flush()
out = byte_stream(sys.stdout)
if opt.name:
refname = b"refs/heads/%s" % opt.name
parent = repo.read_ref(refname)
else:
refname = parent = None
indexfile = opt.indexfile or git.repo(b"bupindex")
try:
msr = index.MetaStoreReader(indexfile + b".meta")
except IOError as ex:
if ex.errno != ENOENT:
raise
log(
"error: cannot access %r; have you run bup index?"
% path_msg(indexfile)
)
sys.exit(1)
with msr, hlinkdb.HLinkDB(indexfile + b".hlink") as hlink_db, index.Reader(
indexfile
) as reader:
tree = save_tree(opt, reader, hlink_db, msr, w)
if opt.tree:
out.write(hexlify(tree))
out.write(b"\n")
if opt.commit or opt.name:
commit = commit_tree(tree, parent, opt.date, argv, w)
if opt.commit:
out.write(hexlify(commit))
out.write(b"\n")
# packwriter must be closed before we can update the ref
if opt.name:
repo.update_ref(refname, commit, parent)
if saved_errors:
log("WARNING: %d errors encountered while saving.\n" % len(saved_errors))
sys.exit(1)
|
meshes | mesh_multibodybeam_tria6 | def create_nodes(femmesh):
# nodes
femmesh.addNode(100.0, 10.0, 0.0, 1)
femmesh.addNode(100.0, 0.0, 0.0, 2)
femmesh.addNode(80.0, 10.0, 0.0, 3)
femmesh.addNode(80.0, 0.0, 0.0, 4)
femmesh.addNode(60.0, 10.0, 0.0, 5)
femmesh.addNode(60.0, 0.0, 0.0, 6)
femmesh.addNode(40.0, 10.0, 0.0, 7)
femmesh.addNode(40.0, 0.0, 0.0, 8)
femmesh.addNode(20.0, 10.0, 0.0, 9)
femmesh.addNode(20.0, 0.0, 0.0, 10)
femmesh.addNode(0.0, 10.0, 0.0, 11)
femmesh.addNode(0.0, 0.0, 0.0, 12)
femmesh.addNode(80.0, 2.0, 0.0, 13)
femmesh.addNode(80.0, 4.0, 0.0, 14)
femmesh.addNode(80.0, 6.0, 0.0, 15)
femmesh.addNode(80.0, 8.0, 0.0, 16)
femmesh.addNode(82.0, 0.0, 0.0, 17)
femmesh.addNode(84.0, 0.0, 0.0, 18)
femmesh.addNode(86.0, 0.0, 0.0, 19)
femmesh.addNode(88.0, 0.0, 0.0, 20)
femmesh.addNode(90.0, 0.0, 0.0, 21)
femmesh.addNode(92.0, 0.0, 0.0, 22)
femmesh.addNode(94.0, 0.0, 0.0, 23)
femmesh.addNode(96.0, 0.0, 0.0, 24)
femmesh.addNode(98.0, 0.0, 0.0, 25)
femmesh.addNode(82.0, 10.0, 0.0, 26)
femmesh.addNode(84.0, 10.0, 0.0, 27)
femmesh.addNode(86.0, 10.0, 0.0, 28)
femmesh.addNode(88.0, 10.0, 0.0, 29)
femmesh.addNode(90.0, 10.0, 0.0, 30)
femmesh.addNode(92.0, 10.0, 0.0, 31)
femmesh.addNode(94.0, 10.0, 0.0, 32)
femmesh.addNode(96.0, 10.0, 0.0, 33)
femmesh.addNode(98.0, 10.0, 0.0, 34)
femmesh.addNode(100.0, 2.0, 0.0, 35)
femmesh.addNode(100.0, 4.0, 0.0, 36)
femmesh.addNode(100.0, 6.0, 0.0, 37)
femmesh.addNode(100.0, 8.0, 0.0, 38)
femmesh.addNode(60.0, 2.0, 0.0, 39)
femmesh.addNode(60.0, 4.0, 0.0, 40)
femmesh.addNode(60.0, 6.0, 0.0, 41)
femmesh.addNode(60.0, 8.0, 0.0, 42)
femmesh.addNode(62.0, 0.0, 0.0, 43)
femmesh.addNode(64.0, 0.0, 0.0, 44)
femmesh.addNode(66.0, 0.0, 0.0, 45)
femmesh.addNode(68.0, 0.0, 0.0, 46)
femmesh.addNode(70.0, 0.0, 0.0, 47)
femmesh.addNode(72.0, 0.0, 0.0, 48)
femmesh.addNode(74.0, 0.0, 0.0, 49)
femmesh.addNode(76.0, 0.0, 0.0, 50)
femmesh.addNode(78.0, 0.0, 0.0, 51)
femmesh.addNode(62.0, 10.0, 0.0, 52)
femmesh.addNode(64.0, 10.0, 0.0, 53)
femmesh.addNode(66.0, 10.0, 0.0, 54)
femmesh.addNode(68.0, 10.0, 0.0, 55)
femmesh.addNode(70.0, 10.0, 0.0, 56)
femmesh.addNode(72.0, 10.0, 0.0, 57)
femmesh.addNode(74.0, 10.0, 0.0, 58)
femmesh.addNode(76.0, 10.0, 0.0, 59)
femmesh.addNode(78.0, 10.0, 0.0, 60)
femmesh.addNode(40.0, 2.0, 0.0, 61)
femmesh.addNode(40.0, 4.0, 0.0, 62)
femmesh.addNode(40.0, 6.0, 0.0, 63)
femmesh.addNode(40.0, 8.0, 0.0, 64)
femmesh.addNode(42.0, 0.0, 0.0, 65)
femmesh.addNode(44.0, 0.0, 0.0, 66)
femmesh.addNode(46.0, 0.0, 0.0, 67)
femmesh.addNode(48.0, 0.0, 0.0, 68)
femmesh.addNode(50.0, 0.0, 0.0, 69)
femmesh.addNode(52.0, 0.0, 0.0, 70)
femmesh.addNode(54.0, 0.0, 0.0, 71)
femmesh.addNode(56.0, 0.0, 0.0, 72)
femmesh.addNode(58.0, 0.0, 0.0, 73)
femmesh.addNode(42.0, 10.0, 0.0, 74)
femmesh.addNode(44.0, 10.0, 0.0, 75)
femmesh.addNode(46.0, 10.0, 0.0, 76)
femmesh.addNode(48.0, 10.0, 0.0, 77)
femmesh.addNode(50.0, 10.0, 0.0, 78)
femmesh.addNode(52.0, 10.0, 0.0, 79)
femmesh.addNode(54.0, 10.0, 0.0, 80)
femmesh.addNode(56.0, 10.0, 0.0, 81)
femmesh.addNode(58.0, 10.0, 0.0, 82)
femmesh.addNode(20.0, 2.0, 0.0, 83)
femmesh.addNode(20.0, 4.0, 0.0, 84)
femmesh.addNode(20.0, 6.0, 0.0, 85)
femmesh.addNode(20.0, 8.0, 0.0, 86)
femmesh.addNode(22.0, 0.0, 0.0, 87)
femmesh.addNode(24.0, 0.0, 0.0, 88)
femmesh.addNode(26.0, 0.0, 0.0, 89)
femmesh.addNode(28.0, 0.0, 0.0, 90)
femmesh.addNode(30.0, 0.0, 0.0, 91)
femmesh.addNode(32.0, 0.0, 0.0, 92)
femmesh.addNode(34.0, 0.0, 0.0, 93)
femmesh.addNode(36.0, 0.0, 0.0, 94)
femmesh.addNode(38.0, 0.0, 0.0, 95)
femmesh.addNode(22.0, 10.0, 0.0, 96)
femmesh.addNode(24.0, 10.0, 0.0, 97)
femmesh.addNode(26.0, 10.0, 0.0, 98)
femmesh.addNode(28.0, 10.0, 0.0, 99)
femmesh.addNode(30.0, 10.0, 0.0, 100)
femmesh.addNode(32.0, 10.0, 0.0, 101)
femmesh.addNode(34.0, 10.0, 0.0, 102)
femmesh.addNode(36.0, 10.0, 0.0, 103)
femmesh.addNode(38.0, 10.0, 0.0, 104)
femmesh.addNode(0.0, 2.0, 0.0, 105)
femmesh.addNode(0.0, 4.0, 0.0, 106)
femmesh.addNode(0.0, 6.0, 0.0, 107)
femmesh.addNode(0.0, 8.0, 0.0, 108)
femmesh.addNode(2.0, 0.0, 0.0, 109)
femmesh.addNode(4.0, 0.0, 0.0, 110)
femmesh.addNode(6.0, 0.0, 0.0, 111)
femmesh.addNode(8.0, 0.0, 0.0, 112)
femmesh.addNode(10.0, 0.0, 0.0, 113)
femmesh.addNode(12.0, 0.0, 0.0, 114)
femmesh.addNode(14.0, 0.0, 0.0, 115)
femmesh.addNode(16.0, 0.0, 0.0, 116)
femmesh.addNode(18.0, 0.0, 0.0, 117)
femmesh.addNode(2.0, 10.0, 0.0, 118)
femmesh.addNode(4.0, 10.0, 0.0, 119)
femmesh.addNode(6.0, 10.0, 0.0, 120)
femmesh.addNode(8.0, 10.0, 0.0, 121)
femmesh.addNode(10.0, 10.0, 0.0, 122)
femmesh.addNode(12.0, 10.0, 0.0, 123)
femmesh.addNode(14.0, 10.0, 0.0, 124)
femmesh.addNode(16.0, 10.0, 0.0, 125)
femmesh.addNode(18.0, 10.0, 0.0, 126)
femmesh.addNode(81.5392197661, 3.15476964347, 0.0, 127)
femmesh.addNode(81.5180910518, 4.78237613921, 0.0, 128)
femmesh.addNode(81.5662074653, 6.5724608991, 0.0, 129)
femmesh.addNode(81.7599576087, 8.30022931513, 0.0, 130)
femmesh.addNode(82.6716087772, 1.68867528699, 0.0, 131)
femmesh.addNode(86.8424063084, 1.7505519231, 0.0, 132)
femmesh.addNode(90.9337698074, 1.81058108381, 0.0, 133)
femmesh.addNode(95.0021530838, 1.84678868604, 0.0, 134)
femmesh.addNode(83.6543829612, 8.41370661197, 0.0, 135)
femmesh.addNode(85.5913872899, 8.4145034167, 0.0, 136)
femmesh.addNode(87.542122618, 8.41922038693, 0.0, 137)
femmesh.addNode(89.4732015205, 8.44983847147, 0.0, 138)
femmesh.addNode(91.3583903926, 8.52850937991, 0.0, 139)
femmesh.addNode(93.3029865146, 8.48448880356, 0.0, 140)
femmesh.addNode(95.1639533281, 8.42042019762, 0.0, 141)
femmesh.addNode(97.2474140121, 8.38478217035, 0.0, 142)
femmesh.addNode(98.2900931755, 2.63964686733, 0.0, 143)
femmesh.addNode(97.999287578, 6.57227423373, 0.0, 144)
femmesh.addNode(84.7606532388, 1.76672278431, 0.0, 145)
femmesh.addNode(88.895020277, 1.76168886648, 0.0, 146)
femmesh.addNode(93.0047606408, 1.90889561608, 0.0, 147)
femmesh.addNode(96.7744342217, 1.58124689867, 0.0, 148)
femmesh.addNode(98.1488197125, 4.65182661547, 0.0, 149)
femmesh.addNode(82.9199994949, 5.38509185329, 0.0, 150)
femmesh.addNode(83.3204951963, 6.84341771921, 0.0, 151)
femmesh.addNode(96.1929644973, 5.30378297199, 0.0, 152)
femmesh.addNode(91.8553990232, 3.66067934457, 0.0, 153)
femmesh.addNode(89.7699972769, 3.52313457422, 0.0, 154)
femmesh.addNode(96.4093344424, 3.38843043534, 0.0, 155)
femmesh.addNode(85.645954722, 3.48402622746, 0.0, 156)
femmesh.addNode(83.420207809, 3.60212852561, 0.0, 157)
femmesh.addNode(85.1673196297, 6.81843018946, 0.0, 158)
femmesh.addNode(87.0998428052, 6.80994755275, 0.0, 159)
femmesh.addNode(89.0145656138, 6.8362902391, 0.0, 160)
femmesh.addNode(90.8650584289, 6.92970475583, 0.0, 161)
femmesh.addNode(92.5068126228, 7.17021674032, 0.0, 162)
femmesh.addNode(87.7263576556, 3.4786736161, 0.0, 163)
femmesh.addNode(96.1925323214, 7.02335670706, 0.0, 164)
femmesh.addNode(94.1972202057, 4.03486740509, 0.0, 165)
femmesh.addNode(94.3378881048, 6.51819092042, 0.0, 166)
femmesh.addNode(90.4562016497, 5.25107562191, 0.0, 167)
femmesh.addNode(84.5945917648, 5.20830901224, 0.0, 168)
femmesh.addNode(86.5147139128, 5.1713602248, 0.0, 169)
femmesh.addNode(92.4420536966, 5.50235414588, 0.0, 170)
femmesh.addNode(88.5036576232, 5.18447404951, 0.0, 171)
femmesh.addNode(61.5392197661, 3.15476964347, 0.0, 172)
femmesh.addNode(61.5180910518, 4.78237613921, 0.0, 173)
femmesh.addNode(61.5662074653, 6.5724608991, 0.0, 174)
femmesh.addNode(61.7599576087, 8.30022931513, 0.0, 175)
femmesh.addNode(62.6716087772, 1.68867528699, 0.0, 176)
femmesh.addNode(66.8424063084, 1.7505519231, 0.0, 177)
femmesh.addNode(70.9337698074, 1.81058108381, 0.0, 178)
femmesh.addNode(75.0021530838, 1.84678868604, 0.0, 179)
femmesh.addNode(63.6543829612, 8.41370661197, 0.0, 180)
femmesh.addNode(65.5913872899, 8.4145034167, 0.0, 181)
femmesh.addNode(67.542122618, 8.41922038693, 0.0, 182)
femmesh.addNode(69.4732015205, 8.44983847147, 0.0, 183)
femmesh.addNode(71.3583903926, 8.52850937991, 0.0, 184)
femmesh.addNode(73.3029865146, 8.48448880356, 0.0, 185)
femmesh.addNode(75.1639533281, 8.42042019762, 0.0, 186)
femmesh.addNode(77.2474140121, 8.38478217035, 0.0, 187)
femmesh.addNode(78.2900931755, 2.63964686733, 0.0, 188)
femmesh.addNode(77.999287578, 6.57227423373, 0.0, 189)
femmesh.addNode(64.7606532388, 1.76672278431, 0.0, 190)
femmesh.addNode(68.895020277, 1.76168886648, 0.0, 191)
femmesh.addNode(73.0047606408, 1.90889561608, 0.0, 192)
femmesh.addNode(76.7744342217, 1.58124689867, 0.0, 193)
femmesh.addNode(78.1488197125, 4.65182661547, 0.0, 194)
femmesh.addNode(62.9199994949, 5.38509185329, 0.0, 195)
femmesh.addNode(63.3204951963, 6.84341771921, 0.0, 196)
femmesh.addNode(76.1929644973, 5.30378297199, 0.0, 197)
femmesh.addNode(71.8553990232, 3.66067934457, 0.0, 198)
femmesh.addNode(69.7699972769, 3.52313457422, 0.0, 199)
femmesh.addNode(76.4093344424, 3.38843043534, 0.0, 200)
femmesh.addNode(65.645954722, 3.48402622746, 0.0, 201)
femmesh.addNode(63.420207809, 3.60212852561, 0.0, 202)
femmesh.addNode(65.1673196297, 6.81843018946, 0.0, 203)
femmesh.addNode(67.0998428052, 6.80994755275, 0.0, 204)
femmesh.addNode(69.0145656138, 6.8362902391, 0.0, 205)
femmesh.addNode(70.8650584289, 6.92970475583, 0.0, 206)
femmesh.addNode(72.5068126228, 7.17021674032, 0.0, 207)
femmesh.addNode(67.7263576556, 3.4786736161, 0.0, 208)
femmesh.addNode(76.1925323214, 7.02335670706, 0.0, 209)
femmesh.addNode(74.1972202057, 4.03486740509, 0.0, 210)
femmesh.addNode(74.3378881048, 6.51819092042, 0.0, 211)
femmesh.addNode(70.4562016497, 5.25107562191, 0.0, 212)
femmesh.addNode(64.5945917648, 5.20830901224, 0.0, 213)
femmesh.addNode(66.5147139128, 5.1713602248, 0.0, 214)
femmesh.addNode(72.4420536966, 5.50235414588, 0.0, 215)
femmesh.addNode(68.5036576232, 5.18447404951, 0.0, 216)
femmesh.addNode(41.5392197661, 3.15476964347, 0.0, 217)
femmesh.addNode(41.5180910518, 4.78237613921, 0.0, 218)
femmesh.addNode(41.5662074653, 6.5724608991, 0.0, 219)
femmesh.addNode(41.7599576087, 8.30022931513, 0.0, 220)
femmesh.addNode(42.6716087772, 1.68867528699, 0.0, 221)
femmesh.addNode(46.8424063084, 1.7505519231, 0.0, 222)
femmesh.addNode(50.9337698074, 1.81058108381, 0.0, 223)
femmesh.addNode(55.0021530838, 1.84678868604, 0.0, 224)
femmesh.addNode(43.6543829612, 8.41370661197, 0.0, 225)
femmesh.addNode(45.5913872899, 8.4145034167, 0.0, 226)
femmesh.addNode(47.542122618, 8.41922038693, 0.0, 227)
femmesh.addNode(49.4732015205, 8.44983847147, 0.0, 228)
femmesh.addNode(51.3583903926, 8.52850937991, 0.0, 229)
femmesh.addNode(53.3029865146, 8.48448880356, 0.0, 230)
femmesh.addNode(55.1639533281, 8.42042019762, 0.0, 231)
femmesh.addNode(57.2474140121, 8.38478217035, 0.0, 232)
femmesh.addNode(58.2900931755, 2.63964686733, 0.0, 233)
femmesh.addNode(57.999287578, 6.57227423373, 0.0, 234)
femmesh.addNode(44.7606532388, 1.76672278431, 0.0, 235)
femmesh.addNode(48.895020277, 1.76168886648, 0.0, 236)
femmesh.addNode(53.0047606408, 1.90889561608, 0.0, 237)
femmesh.addNode(56.7744342217, 1.58124689867, 0.0, 238)
femmesh.addNode(58.1488197125, 4.65182661547, 0.0, 239)
femmesh.addNode(42.9199994949, 5.38509185329, 0.0, 240)
femmesh.addNode(43.3204951963, 6.84341771921, 0.0, 241)
femmesh.addNode(56.1929644973, 5.30378297199, 0.0, 242)
femmesh.addNode(51.8553990232, 3.66067934457, 0.0, 243)
femmesh.addNode(49.7699972769, 3.52313457422, 0.0, 244)
femmesh.addNode(56.4093344424, 3.38843043534, 0.0, 245)
femmesh.addNode(45.645954722, 3.48402622746, 0.0, 246)
femmesh.addNode(43.420207809, 3.60212852561, 0.0, 247)
femmesh.addNode(45.1673196297, 6.81843018946, 0.0, 248)
femmesh.addNode(47.0998428052, 6.80994755275, 0.0, 249)
femmesh.addNode(49.0145656138, 6.8362902391, 0.0, 250)
femmesh.addNode(50.8650584289, 6.92970475583, 0.0, 251)
femmesh.addNode(52.5068126228, 7.17021674032, 0.0, 252)
femmesh.addNode(47.7263576556, 3.4786736161, 0.0, 253)
femmesh.addNode(56.1925323214, 7.02335670706, 0.0, 254)
femmesh.addNode(54.1972202057, 4.03486740509, 0.0, 255)
femmesh.addNode(54.3378881048, 6.51819092042, 0.0, 256)
femmesh.addNode(50.4562016497, 5.25107562191, 0.0, 257)
femmesh.addNode(44.5945917648, 5.20830901224, 0.0, 258)
femmesh.addNode(46.5147139128, 5.1713602248, 0.0, 259)
femmesh.addNode(52.4420536966, 5.50235414588, 0.0, 260)
femmesh.addNode(48.5036576232, 5.18447404951, 0.0, 261)
femmesh.addNode(21.5392197661, 3.15476964347, 0.0, 262)
femmesh.addNode(21.5180910518, 4.78237613921, 0.0, 263)
femmesh.addNode(21.5662074653, 6.5724608991, 0.0, 264)
femmesh.addNode(21.7599576087, 8.30022931513, 0.0, 265)
femmesh.addNode(22.6716087772, 1.68867528699, 0.0, 266)
femmesh.addNode(26.8424063084, 1.7505519231, 0.0, 267)
femmesh.addNode(30.9337698074, 1.81058108381, 0.0, 268)
femmesh.addNode(35.0021530838, 1.84678868604, 0.0, 269)
femmesh.addNode(23.6543829612, 8.41370661197, 0.0, 270)
femmesh.addNode(25.5913872899, 8.4145034167, 0.0, 271)
femmesh.addNode(27.542122618, 8.41922038693, 0.0, 272)
femmesh.addNode(29.4732015205, 8.44983847147, 0.0, 273)
femmesh.addNode(31.3583903926, 8.52850937991, 0.0, 274)
femmesh.addNode(33.3029865146, 8.48448880356, 0.0, 275)
femmesh.addNode(35.1639533281, 8.42042019762, 0.0, 276)
femmesh.addNode(37.2474140121, 8.38478217035, 0.0, 277)
femmesh.addNode(38.2900931755, 2.63964686733, 0.0, 278)
femmesh.addNode(37.999287578, 6.57227423373, 0.0, 279)
femmesh.addNode(24.7606532388, 1.76672278431, 0.0, 280)
femmesh.addNode(28.895020277, 1.76168886648, 0.0, 281)
femmesh.addNode(33.0047606408, 1.90889561608, 0.0, 282)
femmesh.addNode(36.7744342217, 1.58124689867, 0.0, 283)
femmesh.addNode(38.1488197125, 4.65182661547, 0.0, 284)
femmesh.addNode(22.9199994949, 5.38509185329, 0.0, 285)
femmesh.addNode(23.3204951963, 6.84341771921, 0.0, 286)
femmesh.addNode(36.1929644973, 5.30378297199, 0.0, 287)
femmesh.addNode(31.8553990232, 3.66067934457, 0.0, 288)
femmesh.addNode(29.7699972769, 3.52313457422, 0.0, 289)
femmesh.addNode(36.4093344424, 3.38843043534, 0.0, 290)
femmesh.addNode(25.645954722, 3.48402622746, 0.0, 291)
femmesh.addNode(23.420207809, 3.60212852561, 0.0, 292)
femmesh.addNode(25.1673196297, 6.81843018946, 0.0, 293)
femmesh.addNode(27.0998428052, 6.80994755275, 0.0, 294)
femmesh.addNode(29.0145656138, 6.8362902391, 0.0, 295)
femmesh.addNode(30.8650584289, 6.92970475583, 0.0, 296)
femmesh.addNode(32.5068126228, 7.17021674032, 0.0, 297)
femmesh.addNode(27.7263576556, 3.4786736161, 0.0, 298)
femmesh.addNode(36.1925323214, 7.02335670706, 0.0, 299)
femmesh.addNode(34.1972202057, 4.03486740509, 0.0, 300)
femmesh.addNode(34.3378881048, 6.51819092042, 0.0, 301)
femmesh.addNode(30.4562016497, 5.25107562191, 0.0, 302)
femmesh.addNode(24.5945917648, 5.20830901224, 0.0, 303)
femmesh.addNode(26.5147139128, 5.1713602248, 0.0, 304)
femmesh.addNode(32.4420536966, 5.50235414588, 0.0, 305)
femmesh.addNode(28.5036576232, 5.18447404951, 0.0, 306)
femmesh.addNode(1.53921976611, 3.15476964347, 0.0, 307)
femmesh.addNode(1.51809105178, 4.78237613921, 0.0, 308)
femmesh.addNode(1.56620746532, 6.5724608991, 0.0, 309)
femmesh.addNode(1.75995760869, 8.30022931513, 0.0, 310)
femmesh.addNode(2.6716087772, 1.68867528699, 0.0, 311)
femmesh.addNode(6.84240630842, 1.7505519231, 0.0, 312)
femmesh.addNode(10.9337698074, 1.81058108381, 0.0, 313)
femmesh.addNode(15.0021530838, 1.84678868604, 0.0, 314)
femmesh.addNode(3.65438296115, 8.41370661197, 0.0, 315)
femmesh.addNode(5.59138728988, 8.4145034167, 0.0, 316)
femmesh.addNode(7.54212261796, 8.41922038693, 0.0, 317)
femmesh.addNode(9.47320152048, 8.44983847147, 0.0, 318)
femmesh.addNode(11.3583903926, 8.52850937991, 0.0, 319)
femmesh.addNode(13.3029865146, 8.48448880356, 0.0, 320)
femmesh.addNode(15.1639533281, 8.42042019762, 0.0, 321)
femmesh.addNode(17.2474140121, 8.38478217035, 0.0, 322)
femmesh.addNode(18.2900931755, 2.63964686733, 0.0, 323)
femmesh.addNode(17.999287578, 6.57227423373, 0.0, 324)
femmesh.addNode(4.76065323884, 1.76672278431, 0.0, 325)
femmesh.addNode(8.89502027703, 1.76168886648, 0.0, 326)
femmesh.addNode(13.0047606408, 1.90889561608, 0.0, 327)
femmesh.addNode(16.7744342217, 1.58124689867, 0.0, 328)
femmesh.addNode(18.1488197125, 4.65182661547, 0.0, 329)
femmesh.addNode(2.91999949488, 5.38509185329, 0.0, 330)
femmesh.addNode(3.32049519627, 6.84341771921, 0.0, 331)
femmesh.addNode(16.1929644973, 5.30378297199, 0.0, 332)
femmesh.addNode(11.8553990232, 3.66067934457, 0.0, 333)
femmesh.addNode(9.76999727687, 3.52313457422, 0.0, 334)
femmesh.addNode(16.4093344424, 3.38843043534, 0.0, 335)
femmesh.addNode(5.64595472198, 3.48402622746, 0.0, 336)
femmesh.addNode(3.42020780903, 3.60212852561, 0.0, 337)
femmesh.addNode(5.1673196297, 6.81843018946, 0.0, 338)
femmesh.addNode(7.09984280524, 6.80994755275, 0.0, 339)
femmesh.addNode(9.01456561376, 6.8362902391, 0.0, 340)
femmesh.addNode(10.8650584289, 6.92970475583, 0.0, 341)
femmesh.addNode(12.5068126228, 7.17021674032, 0.0, 342)
femmesh.addNode(7.7263576556, 3.4786736161, 0.0, 343)
femmesh.addNode(16.1925323214, 7.02335670706, 0.0, 344)
femmesh.addNode(14.1972202057, 4.03486740509, 0.0, 345)
femmesh.addNode(14.3378881048, 6.51819092042, 0.0, 346)
femmesh.addNode(10.4562016497, 5.25107562191, 0.0, 347)
femmesh.addNode(4.59459176476, 5.20830901224, 0.0, 348)
femmesh.addNode(6.51471391276, 5.1713602248, 0.0, 349)
femmesh.addNode(12.4420536966, 5.50235414588, 0.0, 350)
femmesh.addNode(8.50365762316, 5.18447404951, 0.0, 351)
femmesh.addNode(80.0, 1.0, 0.0, 352)
femmesh.addNode(80.0, 3.0, 0.0, 353)
femmesh.addNode(80.0, 5.0, 0.0, 354)
femmesh.addNode(80.0, 7.0, 0.0, 355)
femmesh.addNode(80.0, 9.0, 0.0, 356)
femmesh.addNode(81.0, 0.0, 0.0, 357)
femmesh.addNode(83.0, 0.0, 0.0, 358)
femmesh.addNode(85.0, 0.0, 0.0, 359)
femmesh.addNode(87.0, 0.0, 0.0, 360)
femmesh.addNode(89.0, 0.0, 0.0, 361)
femmesh.addNode(91.0, 0.0, 0.0, 362)
femmesh.addNode(93.0, 0.0, 0.0, 363)
femmesh.addNode(95.0, 0.0, 0.0, 364)
femmesh.addNode(97.0, 0.0, 0.0, 365)
femmesh.addNode(99.0, 0.0, 0.0, 366)
femmesh.addNode(81.0, 10.0, 0.0, 367)
femmesh.addNode(83.0, 10.0, 0.0, 368)
femmesh.addNode(85.0, 10.0, 0.0, 369)
femmesh.addNode(87.0, 10.0, 0.0, 370)
femmesh.addNode(89.0, 10.0, 0.0, 371)
femmesh.addNode(91.0, 10.0, 0.0, 372)
femmesh.addNode(93.0, 10.0, 0.0, 373)
femmesh.addNode(95.0, 10.0, 0.0, 374)
femmesh.addNode(97.0, 10.0, 0.0, 375)
femmesh.addNode(99.0, 10.0, 0.0, 376)
femmesh.addNode(100.0, 1.0, 0.0, 377)
femmesh.addNode(100.0, 3.0, 0.0, 378)
femmesh.addNode(100.0, 5.0, 0.0, 379)
femmesh.addNode(100.0, 7.0, 0.0, 380)
femmesh.addNode(100.0, 9.0, 0.0, 381)
femmesh.addNode(60.0, 1.0, 0.0, 382)
femmesh.addNode(60.0, 3.0, 0.0, 383)
femmesh.addNode(60.0, 5.0, 0.0, 384)
femmesh.addNode(60.0, 7.0, 0.0, 385)
femmesh.addNode(60.0, 9.0, 0.0, 386)
femmesh.addNode(61.0, 0.0, 0.0, 387)
femmesh.addNode(63.0, 0.0, 0.0, 388)
femmesh.addNode(65.0, 0.0, 0.0, 389)
femmesh.addNode(67.0, 0.0, 0.0, 390)
femmesh.addNode(69.0, 0.0, 0.0, 391)
femmesh.addNode(71.0, 0.0, 0.0, 392)
femmesh.addNode(73.0, 0.0, 0.0, 393)
femmesh.addNode(75.0, 0.0, 0.0, 394)
femmesh.addNode(77.0, 0.0, 0.0, 395)
femmesh.addNode(79.0, 0.0, 0.0, 396)
femmesh.addNode(61.0, 10.0, 0.0, 397)
femmesh.addNode(63.0, 10.0, 0.0, 398)
femmesh.addNode(65.0, 10.0, 0.0, 399)
femmesh.addNode(67.0, 10.0, 0.0, 400)
femmesh.addNode(69.0, 10.0, 0.0, 401)
femmesh.addNode(71.0, 10.0, 0.0, 402)
femmesh.addNode(73.0, 10.0, 0.0, 403)
femmesh.addNode(75.0, 10.0, 0.0, 404)
femmesh.addNode(77.0, 10.0, 0.0, 405)
femmesh.addNode(79.0, 10.0, 0.0, 406)
femmesh.addNode(40.0, 1.0, 0.0, 407)
femmesh.addNode(40.0, 3.0, 0.0, 408)
femmesh.addNode(40.0, 5.0, 0.0, 409)
femmesh.addNode(40.0, 7.0, 0.0, 410)
femmesh.addNode(40.0, 9.0, 0.0, 411)
femmesh.addNode(41.0, 0.0, 0.0, 412)
femmesh.addNode(43.0, 0.0, 0.0, 413)
femmesh.addNode(45.0, 0.0, 0.0, 414)
femmesh.addNode(47.0, 0.0, 0.0, 415)
femmesh.addNode(49.0, 0.0, 0.0, 416)
femmesh.addNode(51.0, 0.0, 0.0, 417)
femmesh.addNode(53.0, 0.0, 0.0, 418)
femmesh.addNode(55.0, 0.0, 0.0, 419)
femmesh.addNode(57.0, 0.0, 0.0, 420)
femmesh.addNode(59.0, 0.0, 0.0, 421)
femmesh.addNode(41.0, 10.0, 0.0, 422)
femmesh.addNode(43.0, 10.0, 0.0, 423)
femmesh.addNode(45.0, 10.0, 0.0, 424)
femmesh.addNode(47.0, 10.0, 0.0, 425)
femmesh.addNode(49.0, 10.0, 0.0, 426)
femmesh.addNode(51.0, 10.0, 0.0, 427)
femmesh.addNode(53.0, 10.0, 0.0, 428)
femmesh.addNode(55.0, 10.0, 0.0, 429)
femmesh.addNode(57.0, 10.0, 0.0, 430)
femmesh.addNode(59.0, 10.0, 0.0, 431)
femmesh.addNode(20.0, 1.0, 0.0, 432)
femmesh.addNode(20.0, 3.0, 0.0, 433)
femmesh.addNode(20.0, 5.0, 0.0, 434)
femmesh.addNode(20.0, 7.0, 0.0, 435)
femmesh.addNode(20.0, 9.0, 0.0, 436)
femmesh.addNode(21.0, 0.0, 0.0, 437)
femmesh.addNode(23.0, 0.0, 0.0, 438)
femmesh.addNode(25.0, 0.0, 0.0, 439)
femmesh.addNode(27.0, 0.0, 0.0, 440)
femmesh.addNode(29.0, 0.0, 0.0, 441)
femmesh.addNode(31.0, 0.0, 0.0, 442)
femmesh.addNode(33.0, 0.0, 0.0, 443)
femmesh.addNode(35.0, 0.0, 0.0, 444)
femmesh.addNode(37.0, 0.0, 0.0, 445)
femmesh.addNode(39.0, 0.0, 0.0, 446)
femmesh.addNode(21.0, 10.0, 0.0, 447)
femmesh.addNode(23.0, 10.0, 0.0, 448)
femmesh.addNode(25.0, 10.0, 0.0, 449)
femmesh.addNode(27.0, 10.0, 0.0, 450)
femmesh.addNode(29.0, 10.0, 0.0, 451)
femmesh.addNode(31.0, 10.0, 0.0, 452)
femmesh.addNode(33.0, 10.0, 0.0, 453)
femmesh.addNode(35.0, 10.0, 0.0, 454)
femmesh.addNode(37.0, 10.0, 0.0, 455)
femmesh.addNode(39.0, 10.0, 0.0, 456)
femmesh.addNode(0.0, 1.0, 0.0, 457)
femmesh.addNode(0.0, 3.0, 0.0, 458)
femmesh.addNode(0.0, 5.0, 0.0, 459)
femmesh.addNode(0.0, 7.0, 0.0, 460)
femmesh.addNode(0.0, 9.0, 0.0, 461)
femmesh.addNode(1.0, 0.0, 0.0, 462)
femmesh.addNode(3.0, 0.0, 0.0, 463)
femmesh.addNode(5.0, 0.0, 0.0, 464)
femmesh.addNode(7.0, 0.0, 0.0, 465)
femmesh.addNode(9.0, 0.0, 0.0, 466)
femmesh.addNode(11.0, 0.0, 0.0, 467)
femmesh.addNode(13.0, 0.0, 0.0, 468)
femmesh.addNode(15.0, 0.0, 0.0, 469)
femmesh.addNode(17.0, 0.0, 0.0, 470)
femmesh.addNode(19.0, 0.0, 0.0, 471)
femmesh.addNode(1.0, 10.0, 0.0, 472)
femmesh.addNode(3.0, 10.0, 0.0, 473)
femmesh.addNode(5.0, 10.0, 0.0, 474)
femmesh.addNode(7.0, 10.0, 0.0, 475)
femmesh.addNode(9.0, 10.0, 0.0, 476)
femmesh.addNode(11.0, 10.0, 0.0, 477)
femmesh.addNode(13.0, 10.0, 0.0, 478)
femmesh.addNode(15.0, 10.0, 0.0, 479)
femmesh.addNode(17.0, 10.0, 0.0, 480)
femmesh.addNode(19.0, 10.0, 0.0, 481)
femmesh.addNode(81.0, 1.0, 0.0, 482)
femmesh.addNode(80.7696098831, 2.57738482174, 0.0, 483)
femmesh.addNode(80.7696098831, 3.57738482174, 0.0, 484)
femmesh.addNode(80.7590455259, 4.3911880696, 0.0, 485)
femmesh.addNode(80.7590455259, 5.3911880696, 0.0, 486)
femmesh.addNode(81.5286554089, 3.96857289134, 0.0, 487)
femmesh.addNode(80.7831037327, 6.28623044955, 0.0, 488)
femmesh.addNode(80.7831037327, 7.28623044955, 0.0, 489)
femmesh.addNode(81.5421492586, 5.67741851915, 0.0, 490)
femmesh.addNode(80.8799788043, 8.15011465756, 0.0, 491)
femmesh.addNode(80.8799788043, 9.15011465756, 0.0, 492)
femmesh.addNode(81.8799788043, 9.15011465756, 0.0, 493)
femmesh.addNode(81.663082537, 7.43634510711, 0.0, 494)
femmesh.addNode(83.3358043886, 0.844337643495, 0.0, 495)
femmesh.addNode(82.3358043886, 0.844337643495, 0.0, 496)
femmesh.addNode(87.4212031542, 0.875275961552, 0.0, 497)
femmesh.addNode(86.4212031542, 0.875275961552, 0.0, 498)
femmesh.addNode(91.4668849037, 0.905290541907, 0.0, 499)
femmesh.addNode(90.4668849037, 0.905290541907, 0.0, 500)
femmesh.addNode(95.5010765419, 0.923394343021, 0.0, 501)
femmesh.addNode(94.5010765419, 0.923394343021, 0.0, 502)
femmesh.addNode(99.0, 1.0, 0.0, 503)
femmesh.addNode(82.8271914806, 9.20685330599, 0.0, 504)
femmesh.addNode(83.8271914806, 9.20685330599, 0.0, 505)
femmesh.addNode(82.7071702849, 8.35696796355, 0.0, 506)
femmesh.addNode(84.7956936449, 9.20725170835, 0.0, 507)
femmesh.addNode(85.7956936449, 9.20725170835, 0.0, 508)
femmesh.addNode(84.6228851255, 8.41410501433, 0.0, 509)
femmesh.addNode(86.771061309, 9.20961019347, 0.0, 510)
femmesh.addNode(87.771061309, 9.20961019347, 0.0, 511)
femmesh.addNode(86.5667549539, 8.41686190181, 0.0, 512)
femmesh.addNode(88.7366007602, 9.22491923573, 0.0, 513)
femmesh.addNode(89.7366007602, 9.22491923573, 0.0, 514)
femmesh.addNode(88.5076620692, 8.4345294292, 0.0, 515)
femmesh.addNode(90.6791951963, 9.26425468996, 0.0, 516)
femmesh.addNode(91.6791951963, 9.26425468996, 0.0, 517)
femmesh.addNode(90.4157959565, 8.48917392569, 0.0, 518)
femmesh.addNode(92.6514932573, 9.24224440178, 0.0, 519)
femmesh.addNode(93.6514932573, 9.24224440178, 0.0, 520)
femmesh.addNode(92.3306884536, 8.50649909174, 0.0, 521)
femmesh.addNode(94.581976664, 9.21021009881, 0.0, 522)
femmesh.addNode(95.581976664, 9.21021009881, 0.0, 523)
femmesh.addNode(94.2334699214, 8.45245450059, 0.0, 524)
femmesh.addNode(96.623707006, 9.19239108518, 0.0, 525)
femmesh.addNode(97.623707006, 9.19239108518, 0.0, 526)
femmesh.addNode(96.2056836701, 8.40260118399, 0.0, 527)
femmesh.addNode(99.0, 9.0, 0.0, 528)
femmesh.addNode(99.1450465877, 3.31982343367, 0.0, 529)
femmesh.addNode(99.1450465877, 2.31982343367, 0.0, 530)
femmesh.addNode(98.999643789, 7.28613711686, 0.0, 531)
femmesh.addNode(98.999643789, 6.28613711686, 0.0, 532)
femmesh.addNode(98.1450465877, 1.31982343367, 0.0, 533)
femmesh.addNode(85.3803266194, 0.883361392153, 0.0, 534)
femmesh.addNode(84.3803266194, 0.883361392153, 0.0, 535)
femmesh.addNode(89.4475101385, 0.880844433239, 0.0, 536)
femmesh.addNode(88.4475101385, 0.880844433239, 0.0, 537)
femmesh.addNode(93.5023803204, 0.95444780804, 0.0, 538)
femmesh.addNode(92.5023803204, 0.95444780804, 0.0, 539)
femmesh.addNode(85.8015297736, 1.7586373537, 0.0, 540)
femmesh.addNode(97.3872171108, 0.790623449336, 0.0, 541)
femmesh.addNode(96.3872171108, 0.790623449336, 0.0, 542)
femmesh.addNode(83.716131008, 1.72769903565, 0.0, 543)
femmesh.addNode(98.623707006, 8.19239108518, 0.0, 544)
femmesh.addNode(99.0744098563, 5.32591330773, 0.0, 545)
femmesh.addNode(99.0744098563, 4.32591330773, 0.0, 546)
femmesh.addNode(97.623350795, 7.47852820204, 0.0, 547)
femmesh.addNode(81.3358043886, 1.8443376435, 0.0, 548)
femmesh.addNode(91.9692652241, 1.85973834995, 0.0, 549)
femmesh.addNode(95.8882936527, 1.71401779236, 0.0, 550)
femmesh.addNode(89.9143950422, 1.78613497515, 0.0, 551)
femmesh.addNode(94.0034568623, 1.87784215106, 0.0, 552)
femmesh.addNode(98.219456444, 3.6457367414, 0.0, 553)
femmesh.addNode(87.8687132927, 1.75612039479, 0.0, 554)
femmesh.addNode(98.0740536453, 5.6120504246, 0.0, 555)
femmesh.addNode(82.1054142717, 2.42172246523, 0.0, 556)
femmesh.addNode(82.2190452733, 5.08373399625, 0.0, 557)
femmesh.addNode(82.2431034801, 5.9787763762, 0.0, 558)
femmesh.addNode(82.4433513308, 6.70793930915, 0.0, 559)
femmesh.addNode(82.5402264025, 7.57182351717, 0.0, 560)
femmesh.addNode(83.4874390787, 7.62856216559, 0.0, 561)
femmesh.addNode(83.1202473456, 6.11425478625, 0.0, 562)
femmesh.addNode(97.0961260376, 5.93802860286, 0.0, 563)
femmesh.addNode(97.1708921049, 4.97780479373, 0.0, 564)
femmesh.addNode(92.430079832, 2.78478748033, 0.0, 565)
femmesh.addNode(91.3945844153, 2.73563021419, 0.0, 566)
femmesh.addNode(90.3518835421, 2.66685782902, 0.0, 567)
femmesh.addNode(89.332508777, 2.64241172035, 0.0, 568)
femmesh.addNode(90.81269815, 3.59190695939, 0.0, 569)
femmesh.addNode(96.5918843321, 2.48483866701, 0.0, 570)
femmesh.addNode(95.7057437631, 2.61760956069, 0.0, 571)
femmesh.addNode(86.2441805152, 2.61728907528, 0.0, 572)
femmesh.addNode(85.2033039804, 2.62537450588, 0.0, 573)
femmesh.addNode(97.5322636986, 2.110446883, 0.0, 574)
femmesh.addNode(84.0904305239, 2.68442565496, 0.0, 575)
femmesh.addNode(83.0459082931, 2.6454019063, 0.0, 576)
femmesh.addNode(84.5330812655, 3.54307737653, 0.0, 577)
femmesh.addNode(84.4108512954, 7.61606840071, 0.0, 578)
femmesh.addNode(85.3793534598, 7.61646680308, 0.0, 579)
femmesh.addNode(84.243907413, 6.83092395433, 0.0, 580)
femmesh.addNode(86.3456150476, 7.61222548472, 0.0, 581)
femmesh.addNode(87.3209827116, 7.61458396984, 0.0, 582)
femmesh.addNode(86.1335812175, 6.8141888711, 0.0, 583)
femmesh.addNode(88.2783441159, 7.62775531302, 0.0, 584)
femmesh.addNode(89.2438835671, 7.64306435528, 0.0, 585)
femmesh.addNode(88.0572042095, 6.82311889592, 0.0, 586)
femmesh.addNode(90.1691299747, 7.68977161365, 0.0, 587)
femmesh.addNode(91.1117244107, 7.72910706787, 0.0, 588)
femmesh.addNode(89.9398120213, 6.88299749746, 0.0, 589)
femmesh.addNode(91.9326015077, 7.84936306012, 0.0, 590)
femmesh.addNode(92.9048995687, 7.82735277194, 0.0, 591)
femmesh.addNode(91.6859355259, 7.04996074807, 0.0, 592)
femmesh.addNode(88.3106889663, 2.62018124129, 0.0, 593)
femmesh.addNode(87.284381982, 2.6146127696, 0.0, 594)
femmesh.addNode(86.6861561888, 3.48134992178, 0.0, 595)
femmesh.addNode(88.7481774662, 3.50090409516, 0.0, 596)
femmesh.addNode(82.4797137876, 3.37844908454, 0.0, 597)
femmesh.addNode(96.7199731668, 7.70406943871, 0.0, 598)
femmesh.addNode(97.0959099497, 6.7978154704, 0.0, 599)
femmesh.addNode(96.1927484094, 6.16356983953, 0.0, 600)
femmesh.addNode(95.6782428248, 7.72188845234, 0.0, 601)
femmesh.addNode(82.4691494304, 4.19225233241, 0.0, 602)
femmesh.addNode(97.349713809, 3.01403865134, 0.0, 603)
femmesh.addNode(83.170103652, 4.49361018945, 0.0, 604)
femmesh.addNode(97.2790770775, 4.02012852541, 0.0, 605)
femmesh.addNode(94.5996866448, 2.94082804557, 0.0, 606)
femmesh.addNode(93.6009904232, 2.97188151058, 0.0, 607)
femmesh.addNode(93.0263096145, 3.84777337483, 0.0, 608)
femmesh.addNode(93.8204373097, 7.50133986199, 0.0, 609)
femmesh.addNode(94.7509207164, 7.46930555902, 0.0, 610)
femmesh.addNode(96.3011494699, 4.34610670367, 0.0, 611)
femmesh.addNode(95.2652102131, 6.77077381374, 0.0, 612)
femmesh.addNode(95.3032773241, 3.71164892022, 0.0, 613)
femmesh.addNode(95.1950923515, 4.66932518854, 0.0, 614)
femmesh.addNode(91.1558003365, 4.45587748324, 0.0, 615)
femmesh.addNode(90.1130994633, 4.38710509806, 0.0, 616)
femmesh.addNode(93.4223503638, 6.84420383037, 0.0, 617)
femmesh.addNode(83.9575434805, 6.02586336572, 0.0, 618)
femmesh.addNode(84.8809556972, 6.01336960085, 0.0, 619)
femmesh.addNode(83.7572956298, 5.29670043277, 0.0, 620)
femmesh.addNode(85.8410167712, 5.99489520713, 0.0, 621)
femmesh.addNode(86.807278359, 5.99065388877, 0.0, 622)
femmesh.addNode(85.5546528388, 5.18983461852, 0.0, 623)
femmesh.addNode(89.7353836317, 6.04368293051, 0.0, 624)
femmesh.addNode(90.6606300393, 6.09039018887, 0.0, 625)
femmesh.addNode(91.6535560627, 6.21602945085, 0.0, 626)
femmesh.addNode(92.4744331597, 6.3362854431, 0.0, 627)
femmesh.addNode(93.3899709007, 6.01027253315, 0.0, 628)
femmesh.addNode(91.4491276731, 5.3767148839, 0.0, 629)
femmesh.addNode(95.265426301, 5.91098694621, 0.0, 630)
femmesh.addNode(87.1205357842, 4.32501692045, 0.0, 631)
femmesh.addNode(86.0803343174, 4.32769322613, 0.0, 632)
femmesh.addNode(93.3196369511, 4.76861077548, 0.0, 633)
femmesh.addNode(94.2675541552, 5.27652916275, 0.0, 634)
femmesh.addNode(84.0073997869, 4.40521876893, 0.0, 635)
femmesh.addNode(92.1487263599, 4.58151674523, 0.0, 636)
femmesh.addNode(85.1202732434, 4.34616761985, 0.0, 637)
femmesh.addNode(87.8017502142, 5.99721080113, 0.0, 638)
femmesh.addNode(88.7591116185, 6.01038214431, 0.0, 639)
femmesh.addNode(89.4799296364, 5.21777483571, 0.0, 640)
femmesh.addNode(87.509185768, 5.17791713715, 0.0, 641)
femmesh.addNode(89.13682745, 4.35380431186, 0.0, 642)
femmesh.addNode(88.1150076394, 4.3315738328, 0.0, 643)
femmesh.addNode(61.0, 1.0, 0.0, 644)
femmesh.addNode(60.7696098831, 2.57738482174, 0.0, 645)
femmesh.addNode(60.7696098831, 3.57738482174, 0.0, 646)
femmesh.addNode(60.7590455259, 4.3911880696, 0.0, 647)
femmesh.addNode(60.7590455259, 5.3911880696, 0.0, 648)
femmesh.addNode(61.5286554089, 3.96857289134, 0.0, 649)
femmesh.addNode(60.7831037327, 6.28623044955, 0.0, 650)
femmesh.addNode(60.7831037327, 7.28623044955, 0.0, 651)
femmesh.addNode(61.5421492586, 5.67741851915, 0.0, 652)
femmesh.addNode(60.8799788043, 8.15011465756, 0.0, 653)
femmesh.addNode(60.8799788043, 9.15011465756, 0.0, 654)
femmesh.addNode(61.8799788043, 9.15011465756, 0.0, 655)
femmesh.addNode(61.663082537, 7.43634510711, 0.0, 656)
femmesh.addNode(63.3358043886, 0.844337643495, 0.0, 657)
femmesh.addNode(62.3358043886, 0.844337643495, 0.0, 658)
femmesh.addNode(67.4212031542, 0.875275961552, 0.0, 659)
femmesh.addNode(66.4212031542, 0.875275961552, 0.0, 660)
femmesh.addNode(71.4668849037, 0.905290541907, 0.0, 661)
femmesh.addNode(70.4668849037, 0.905290541907, 0.0, 662)
femmesh.addNode(75.5010765419, 0.923394343021, 0.0, 663)
femmesh.addNode(74.5010765419, 0.923394343021, 0.0, 664)
femmesh.addNode(79.0, 1.0, 0.0, 665)
femmesh.addNode(62.8271914806, 9.20685330599, 0.0, 666)
femmesh.addNode(63.8271914806, 9.20685330599, 0.0, 667)
femmesh.addNode(62.7071702849, 8.35696796355, 0.0, 668)
femmesh.addNode(64.7956936449, 9.20725170835, 0.0, 669)
femmesh.addNode(65.7956936449, 9.20725170835, 0.0, 670)
femmesh.addNode(64.6228851255, 8.41410501433, 0.0, 671)
femmesh.addNode(66.771061309, 9.20961019347, 0.0, 672)
femmesh.addNode(67.771061309, 9.20961019347, 0.0, 673)
femmesh.addNode(66.5667549539, 8.41686190181, 0.0, 674)
femmesh.addNode(68.7366007602, 9.22491923573, 0.0, 675)
femmesh.addNode(69.7366007602, 9.22491923573, 0.0, 676)
femmesh.addNode(68.5076620692, 8.4345294292, 0.0, 677)
femmesh.addNode(70.6791951963, 9.26425468996, 0.0, 678)
femmesh.addNode(71.6791951963, 9.26425468996, 0.0, 679)
femmesh.addNode(70.4157959565, 8.48917392569, 0.0, 680)
femmesh.addNode(72.6514932573, 9.24224440178, 0.0, 681)
femmesh.addNode(73.6514932573, 9.24224440178, 0.0, 682)
femmesh.addNode(72.3306884536, 8.50649909174, 0.0, 683)
femmesh.addNode(74.581976664, 9.21021009881, 0.0, 684)
femmesh.addNode(75.581976664, 9.21021009881, 0.0, 685)
femmesh.addNode(74.2334699214, 8.45245450059, 0.0, 686)
femmesh.addNode(76.623707006, 9.19239108518, 0.0, 687)
femmesh.addNode(77.623707006, 9.19239108518, 0.0, 688)
femmesh.addNode(76.2056836701, 8.40260118399, 0.0, 689)
femmesh.addNode(79.0, 9.0, 0.0, 690)
femmesh.addNode(79.1450465877, 3.31982343367, 0.0, 691)
femmesh.addNode(79.1450465877, 2.31982343367, 0.0, 692)
femmesh.addNode(78.999643789, 7.28613711686, 0.0, 693)
femmesh.addNode(78.999643789, 6.28613711686, 0.0, 694)
femmesh.addNode(78.1450465877, 1.31982343367, 0.0, 695)
femmesh.addNode(65.3803266194, 0.883361392153, 0.0, 696)
femmesh.addNode(64.3803266194, 0.883361392153, 0.0, 697)
femmesh.addNode(69.4475101385, 0.880844433239, 0.0, 698)
femmesh.addNode(68.4475101385, 0.880844433239, 0.0, 699)
femmesh.addNode(73.5023803204, 0.95444780804, 0.0, 700)
femmesh.addNode(72.5023803204, 0.95444780804, 0.0, 701)
femmesh.addNode(65.8015297736, 1.7586373537, 0.0, 702)
femmesh.addNode(77.3872171108, 0.790623449336, 0.0, 703)
femmesh.addNode(76.3872171108, 0.790623449336, 0.0, 704)
femmesh.addNode(63.716131008, 1.72769903565, 0.0, 705)
femmesh.addNode(78.623707006, 8.19239108518, 0.0, 706)
femmesh.addNode(79.0744098563, 5.32591330773, 0.0, 707)
femmesh.addNode(79.0744098563, 4.32591330773, 0.0, 708)
femmesh.addNode(77.623350795, 7.47852820204, 0.0, 709)
femmesh.addNode(61.3358043886, 1.8443376435, 0.0, 710)
femmesh.addNode(71.9692652241, 1.85973834995, 0.0, 711)
femmesh.addNode(75.8882936527, 1.71401779236, 0.0, 712)
femmesh.addNode(69.9143950422, 1.78613497515, 0.0, 713)
femmesh.addNode(74.0034568623, 1.87784215106, 0.0, 714)
femmesh.addNode(78.219456444, 3.6457367414, 0.0, 715)
femmesh.addNode(67.8687132927, 1.75612039479, 0.0, 716)
femmesh.addNode(78.0740536453, 5.6120504246, 0.0, 717)
femmesh.addNode(62.1054142717, 2.42172246523, 0.0, 718)
femmesh.addNode(62.2190452733, 5.08373399625, 0.0, 719)
femmesh.addNode(62.2431034801, 5.9787763762, 0.0, 720)
femmesh.addNode(62.4433513308, 6.70793930915, 0.0, 721)
femmesh.addNode(62.5402264025, 7.57182351717, 0.0, 722)
femmesh.addNode(63.4874390787, 7.62856216559, 0.0, 723)
femmesh.addNode(63.1202473456, 6.11425478625, 0.0, 724)
femmesh.addNode(77.0961260376, 5.93802860286, 0.0, 725)
femmesh.addNode(77.1708921049, 4.97780479373, 0.0, 726)
femmesh.addNode(72.430079832, 2.78478748033, 0.0, 727)
femmesh.addNode(71.3945844153, 2.73563021419, 0.0, 728)
femmesh.addNode(70.3518835421, 2.66685782902, 0.0, 729)
femmesh.addNode(69.332508777, 2.64241172035, 0.0, 730)
femmesh.addNode(70.81269815, 3.59190695939, 0.0, 731)
femmesh.addNode(76.5918843321, 2.48483866701, 0.0, 732)
femmesh.addNode(75.7057437631, 2.61760956069, 0.0, 733)
femmesh.addNode(66.2441805152, 2.61728907528, 0.0, 734)
femmesh.addNode(65.2033039804, 2.62537450588, 0.0, 735)
femmesh.addNode(77.5322636986, 2.110446883, 0.0, 736)
femmesh.addNode(64.0904305239, 2.68442565496, 0.0, 737)
femmesh.addNode(63.0459082931, 2.6454019063, 0.0, 738)
femmesh.addNode(64.5330812655, 3.54307737653, 0.0, 739)
femmesh.addNode(64.4108512954, 7.61606840071, 0.0, 740)
femmesh.addNode(65.3793534598, 7.61646680308, 0.0, 741)
femmesh.addNode(64.243907413, 6.83092395433, 0.0, 742)
femmesh.addNode(66.3456150476, 7.61222548472, 0.0, 743)
femmesh.addNode(67.3209827116, 7.61458396984, 0.0, 744)
femmesh.addNode(66.1335812175, 6.8141888711, 0.0, 745)
femmesh.addNode(68.2783441159, 7.62775531302, 0.0, 746)
femmesh.addNode(69.2438835671, 7.64306435528, 0.0, 747)
femmesh.addNode(68.0572042095, 6.82311889592, 0.0, 748)
femmesh.addNode(70.1691299747, 7.68977161365, 0.0, 749)
femmesh.addNode(71.1117244107, 7.72910706787, 0.0, 750)
femmesh.addNode(69.9398120213, 6.88299749746, 0.0, 751)
femmesh.addNode(71.9326015077, 7.84936306012, 0.0, 752)
femmesh.addNode(72.9048995687, 7.82735277194, 0.0, 753)
femmesh.addNode(71.6859355259, 7.04996074807, 0.0, 754)
femmesh.addNode(68.3106889663, 2.62018124129, 0.0, 755)
femmesh.addNode(67.284381982, 2.6146127696, 0.0, 756)
femmesh.addNode(66.6861561888, 3.48134992178, 0.0, 757)
femmesh.addNode(68.7481774662, 3.50090409516, 0.0, 758)
femmesh.addNode(62.4797137876, 3.37844908454, 0.0, 759)
femmesh.addNode(76.7199731668, 7.70406943871, 0.0, 760)
femmesh.addNode(77.0959099497, 6.7978154704, 0.0, 761)
femmesh.addNode(76.1927484094, 6.16356983953, 0.0, 762)
femmesh.addNode(75.6782428248, 7.72188845234, 0.0, 763)
femmesh.addNode(62.4691494304, 4.19225233241, 0.0, 764)
femmesh.addNode(77.349713809, 3.01403865134, 0.0, 765)
femmesh.addNode(63.170103652, 4.49361018945, 0.0, 766)
femmesh.addNode(77.2790770775, 4.02012852541, 0.0, 767)
femmesh.addNode(74.5996866448, 2.94082804557, 0.0, 768)
femmesh.addNode(73.6009904232, 2.97188151058, 0.0, 769)
femmesh.addNode(73.0263096145, 3.84777337483, 0.0, 770)
femmesh.addNode(73.8204373097, 7.50133986199, 0.0, 771)
femmesh.addNode(74.7509207164, 7.46930555902, 0.0, 772)
femmesh.addNode(76.3011494699, 4.34610670367, 0.0, 773)
femmesh.addNode(75.2652102131, 6.77077381374, 0.0, 774)
femmesh.addNode(75.3032773241, 3.71164892022, 0.0, 775)
femmesh.addNode(75.1950923515, 4.66932518854, 0.0, 776)
femmesh.addNode(71.1558003365, 4.45587748324, 0.0, 777)
femmesh.addNode(70.1130994633, 4.38710509806, 0.0, 778)
femmesh.addNode(73.4223503638, 6.84420383037, 0.0, 779)
femmesh.addNode(63.9575434805, 6.02586336572, 0.0, 780)
femmesh.addNode(64.8809556972, 6.01336960085, 0.0, 781)
femmesh.addNode(63.7572956298, 5.29670043277, 0.0, 782)
femmesh.addNode(65.8410167712, 5.99489520713, 0.0, 783)
femmesh.addNode(66.807278359, 5.99065388877, 0.0, 784)
femmesh.addNode(65.5546528388, 5.18983461852, 0.0, 785)
femmesh.addNode(69.7353836317, 6.04368293051, 0.0, 786)
femmesh.addNode(70.6606300393, 6.09039018887, 0.0, 787)
femmesh.addNode(71.6535560627, 6.21602945085, 0.0, 788)
femmesh.addNode(72.4744331597, 6.3362854431, 0.0, 789)
femmesh.addNode(73.3899709007, 6.01027253315, 0.0, 790)
femmesh.addNode(71.4491276731, 5.3767148839, 0.0, 791)
femmesh.addNode(75.265426301, 5.91098694621, 0.0, 792)
femmesh.addNode(67.1205357842, 4.32501692045, 0.0, 793)
femmesh.addNode(66.0803343174, 4.32769322613, 0.0, 794)
femmesh.addNode(73.3196369511, 4.76861077548, 0.0, 795)
femmesh.addNode(74.2675541552, 5.27652916275, 0.0, 796)
femmesh.addNode(64.0073997869, 4.40521876893, 0.0, 797)
femmesh.addNode(72.1487263599, 4.58151674523, 0.0, 798)
femmesh.addNode(65.1202732434, 4.34616761985, 0.0, 799)
femmesh.addNode(67.8017502142, 5.99721080113, 0.0, 800)
femmesh.addNode(68.7591116185, 6.01038214431, 0.0, 801)
femmesh.addNode(69.4799296364, 5.21777483571, 0.0, 802)
femmesh.addNode(67.509185768, 5.17791713715, 0.0, 803)
femmesh.addNode(69.13682745, 4.35380431186, 0.0, 804)
femmesh.addNode(68.1150076394, 4.3315738328, 0.0, 805)
femmesh.addNode(41.0, 1.0, 0.0, 806)
femmesh.addNode(40.7696098831, 2.57738482174, 0.0, 807)
femmesh.addNode(40.7696098831, 3.57738482174, 0.0, 808)
femmesh.addNode(40.7590455259, 4.3911880696, 0.0, 809)
femmesh.addNode(40.7590455259, 5.3911880696, 0.0, 810)
femmesh.addNode(41.5286554089, 3.96857289134, 0.0, 811)
femmesh.addNode(40.7831037327, 6.28623044955, 0.0, 812)
femmesh.addNode(40.7831037327, 7.28623044955, 0.0, 813)
femmesh.addNode(41.5421492586, 5.67741851915, 0.0, 814)
femmesh.addNode(40.8799788043, 8.15011465756, 0.0, 815)
femmesh.addNode(40.8799788043, 9.15011465756, 0.0, 816)
femmesh.addNode(41.8799788043, 9.15011465756, 0.0, 817)
femmesh.addNode(41.663082537, 7.43634510711, 0.0, 818)
femmesh.addNode(43.3358043886, 0.844337643495, 0.0, 819)
femmesh.addNode(42.3358043886, 0.844337643495, 0.0, 820)
femmesh.addNode(47.4212031542, 0.875275961552, 0.0, 821)
femmesh.addNode(46.4212031542, 0.875275961552, 0.0, 822)
femmesh.addNode(51.4668849037, 0.905290541907, 0.0, 823)
femmesh.addNode(50.4668849037, 0.905290541907, 0.0, 824)
femmesh.addNode(55.5010765419, 0.923394343021, 0.0, 825)
femmesh.addNode(54.5010765419, 0.923394343021, 0.0, 826)
femmesh.addNode(59.0, 1.0, 0.0, 827)
femmesh.addNode(42.8271914806, 9.20685330599, 0.0, 828)
femmesh.addNode(43.8271914806, 9.20685330599, 0.0, 829)
femmesh.addNode(42.7071702849, 8.35696796355, 0.0, 830)
femmesh.addNode(44.7956936449, 9.20725170835, 0.0, 831)
femmesh.addNode(45.7956936449, 9.20725170835, 0.0, 832)
femmesh.addNode(44.6228851255, 8.41410501433, 0.0, 833)
femmesh.addNode(46.771061309, 9.20961019347, 0.0, 834)
femmesh.addNode(47.771061309, 9.20961019347, 0.0, 835)
femmesh.addNode(46.5667549539, 8.41686190181, 0.0, 836)
femmesh.addNode(48.7366007602, 9.22491923573, 0.0, 837)
femmesh.addNode(49.7366007602, 9.22491923573, 0.0, 838)
femmesh.addNode(48.5076620692, 8.4345294292, 0.0, 839)
femmesh.addNode(50.6791951963, 9.26425468996, 0.0, 840)
femmesh.addNode(51.6791951963, 9.26425468996, 0.0, 841)
femmesh.addNode(50.4157959565, 8.48917392569, 0.0, 842)
femmesh.addNode(52.6514932573, 9.24224440178, 0.0, 843)
femmesh.addNode(53.6514932573, 9.24224440178, 0.0, 844)
femmesh.addNode(52.3306884536, 8.50649909174, 0.0, 845)
femmesh.addNode(54.581976664, 9.21021009881, 0.0, 846)
femmesh.addNode(55.581976664, 9.21021009881, 0.0, 847)
femmesh.addNode(54.2334699214, 8.45245450059, 0.0, 848)
femmesh.addNode(56.623707006, 9.19239108518, 0.0, 849)
femmesh.addNode(57.623707006, 9.19239108518, 0.0, 850)
femmesh.addNode(56.2056836701, 8.40260118399, 0.0, 851)
femmesh.addNode(59.0, 9.0, 0.0, 852)
femmesh.addNode(59.1450465877, 3.31982343367, 0.0, 853)
femmesh.addNode(59.1450465877, 2.31982343367, 0.0, 854)
femmesh.addNode(58.999643789, 7.28613711686, 0.0, 855)
femmesh.addNode(58.999643789, 6.28613711686, 0.0, 856)
femmesh.addNode(58.1450465877, 1.31982343367, 0.0, 857)
femmesh.addNode(45.3803266194, 0.883361392153, 0.0, 858)
femmesh.addNode(44.3803266194, 0.883361392153, 0.0, 859)
femmesh.addNode(49.4475101385, 0.880844433239, 0.0, 860)
femmesh.addNode(48.4475101385, 0.880844433239, 0.0, 861)
femmesh.addNode(53.5023803204, 0.95444780804, 0.0, 862)
femmesh.addNode(52.5023803204, 0.95444780804, 0.0, 863)
femmesh.addNode(45.8015297736, 1.7586373537, 0.0, 864)
femmesh.addNode(57.3872171108, 0.790623449336, 0.0, 865)
femmesh.addNode(56.3872171108, 0.790623449336, 0.0, 866)
femmesh.addNode(43.716131008, 1.72769903565, 0.0, 867)
femmesh.addNode(58.623707006, 8.19239108518, 0.0, 868)
femmesh.addNode(59.0744098563, 5.32591330773, 0.0, 869)
femmesh.addNode(59.0744098563, 4.32591330773, 0.0, 870)
femmesh.addNode(57.623350795, 7.47852820204, 0.0, 871)
femmesh.addNode(41.3358043886, 1.8443376435, 0.0, 872)
femmesh.addNode(51.9692652241, 1.85973834995, 0.0, 873)
femmesh.addNode(55.8882936527, 1.71401779236, 0.0, 874)
femmesh.addNode(49.9143950422, 1.78613497515, 0.0, 875)
femmesh.addNode(54.0034568623, 1.87784215106, 0.0, 876)
femmesh.addNode(58.219456444, 3.6457367414, 0.0, 877)
femmesh.addNode(47.8687132927, 1.75612039479, 0.0, 878)
femmesh.addNode(58.0740536453, 5.6120504246, 0.0, 879)
femmesh.addNode(42.1054142717, 2.42172246523, 0.0, 880)
femmesh.addNode(42.2190452733, 5.08373399625, 0.0, 881)
femmesh.addNode(42.2431034801, 5.9787763762, 0.0, 882)
femmesh.addNode(42.4433513308, 6.70793930915, 0.0, 883)
femmesh.addNode(42.5402264025, 7.57182351717, 0.0, 884)
femmesh.addNode(43.4874390787, 7.62856216559, 0.0, 885)
femmesh.addNode(43.1202473456, 6.11425478625, 0.0, 886)
femmesh.addNode(57.0961260376, 5.93802860286, 0.0, 887)
femmesh.addNode(57.1708921049, 4.97780479373, 0.0, 888)
femmesh.addNode(52.430079832, 2.78478748033, 0.0, 889)
femmesh.addNode(51.3945844153, 2.73563021419, 0.0, 890)
femmesh.addNode(50.3518835421, 2.66685782902, 0.0, 891)
femmesh.addNode(49.332508777, 2.64241172035, 0.0, 892)
femmesh.addNode(50.81269815, 3.59190695939, 0.0, 893)
femmesh.addNode(56.5918843321, 2.48483866701, 0.0, 894)
femmesh.addNode(55.7057437631, 2.61760956069, 0.0, 895)
femmesh.addNode(46.2441805152, 2.61728907528, 0.0, 896)
femmesh.addNode(45.2033039804, 2.62537450588, 0.0, 897)
femmesh.addNode(57.5322636986, 2.110446883, 0.0, 898)
femmesh.addNode(44.0904305239, 2.68442565496, 0.0, 899)
femmesh.addNode(43.0459082931, 2.6454019063, 0.0, 900)
femmesh.addNode(44.5330812655, 3.54307737653, 0.0, 901)
femmesh.addNode(44.4108512954, 7.61606840071, 0.0, 902)
femmesh.addNode(45.3793534598, 7.61646680308, 0.0, 903)
femmesh.addNode(44.243907413, 6.83092395433, 0.0, 904)
femmesh.addNode(46.3456150476, 7.61222548472, 0.0, 905)
femmesh.addNode(47.3209827116, 7.61458396984, 0.0, 906)
femmesh.addNode(46.1335812175, 6.8141888711, 0.0, 907)
femmesh.addNode(48.2783441159, 7.62775531302, 0.0, 908)
femmesh.addNode(49.2438835671, 7.64306435528, 0.0, 909)
femmesh.addNode(48.0572042095, 6.82311889592, 0.0, 910)
femmesh.addNode(50.1691299747, 7.68977161365, 0.0, 911)
femmesh.addNode(51.1117244107, 7.72910706787, 0.0, 912)
femmesh.addNode(49.9398120213, 6.88299749746, 0.0, 913)
femmesh.addNode(51.9326015077, 7.84936306012, 0.0, 914)
femmesh.addNode(52.9048995687, 7.82735277194, 0.0, 915)
femmesh.addNode(51.6859355259, 7.04996074807, 0.0, 916)
femmesh.addNode(48.3106889663, 2.62018124129, 0.0, 917)
femmesh.addNode(47.284381982, 2.6146127696, 0.0, 918)
femmesh.addNode(46.6861561888, 3.48134992178, 0.0, 919)
femmesh.addNode(48.7481774662, 3.50090409516, 0.0, 920)
femmesh.addNode(42.4797137876, 3.37844908454, 0.0, 921)
femmesh.addNode(56.7199731668, 7.70406943871, 0.0, 922)
femmesh.addNode(57.0959099497, 6.7978154704, 0.0, 923)
femmesh.addNode(56.1927484094, 6.16356983953, 0.0, 924)
femmesh.addNode(55.6782428248, 7.72188845234, 0.0, 925)
femmesh.addNode(42.4691494304, 4.19225233241, 0.0, 926)
femmesh.addNode(57.349713809, 3.01403865134, 0.0, 927)
femmesh.addNode(43.170103652, 4.49361018945, 0.0, 928)
femmesh.addNode(57.2790770775, 4.02012852541, 0.0, 929)
femmesh.addNode(54.5996866448, 2.94082804557, 0.0, 930)
femmesh.addNode(53.6009904232, 2.97188151058, 0.0, 931)
femmesh.addNode(53.0263096145, 3.84777337483, 0.0, 932)
femmesh.addNode(53.8204373097, 7.50133986199, 0.0, 933)
femmesh.addNode(54.7509207164, 7.46930555902, 0.0, 934)
femmesh.addNode(56.3011494699, 4.34610670367, 0.0, 935)
femmesh.addNode(55.2652102131, 6.77077381374, 0.0, 936)
femmesh.addNode(55.3032773241, 3.71164892022, 0.0, 937)
femmesh.addNode(55.1950923515, 4.66932518854, 0.0, 938)
femmesh.addNode(51.1558003365, 4.45587748324, 0.0, 939)
femmesh.addNode(50.1130994633, 4.38710509806, 0.0, 940)
femmesh.addNode(53.4223503638, 6.84420383037, 0.0, 941)
femmesh.addNode(43.9575434805, 6.02586336572, 0.0, 942)
femmesh.addNode(44.8809556972, 6.01336960085, 0.0, 943)
femmesh.addNode(43.7572956298, 5.29670043277, 0.0, 944)
femmesh.addNode(45.8410167712, 5.99489520713, 0.0, 945)
femmesh.addNode(46.807278359, 5.99065388877, 0.0, 946)
femmesh.addNode(45.5546528388, 5.18983461852, 0.0, 947)
femmesh.addNode(49.7353836317, 6.04368293051, 0.0, 948)
femmesh.addNode(50.6606300393, 6.09039018887, 0.0, 949)
femmesh.addNode(51.6535560627, 6.21602945085, 0.0, 950)
femmesh.addNode(52.4744331597, 6.3362854431, 0.0, 951)
femmesh.addNode(53.3899709007, 6.01027253315, 0.0, 952)
femmesh.addNode(51.4491276731, 5.3767148839, 0.0, 953)
femmesh.addNode(55.265426301, 5.91098694621, 0.0, 954)
femmesh.addNode(47.1205357842, 4.32501692045, 0.0, 955)
femmesh.addNode(46.0803343174, 4.32769322613, 0.0, 956)
femmesh.addNode(53.3196369511, 4.76861077548, 0.0, 957)
femmesh.addNode(54.2675541552, 5.27652916275, 0.0, 958)
femmesh.addNode(44.0073997869, 4.40521876893, 0.0, 959)
femmesh.addNode(52.1487263599, 4.58151674523, 0.0, 960)
femmesh.addNode(45.1202732434, 4.34616761985, 0.0, 961)
femmesh.addNode(47.8017502142, 5.99721080113, 0.0, 962)
femmesh.addNode(48.7591116185, 6.01038214431, 0.0, 963)
femmesh.addNode(49.4799296364, 5.21777483571, 0.0, 964)
femmesh.addNode(47.509185768, 5.17791713715, 0.0, 965)
femmesh.addNode(49.13682745, 4.35380431186, 0.0, 966)
femmesh.addNode(48.1150076394, 4.3315738328, 0.0, 967)
femmesh.addNode(21.0, 1.0, 0.0, 968)
femmesh.addNode(20.7696098831, 2.57738482174, 0.0, 969)
femmesh.addNode(20.7696098831, 3.57738482174, 0.0, 970)
femmesh.addNode(20.7590455259, 4.3911880696, 0.0, 971)
femmesh.addNode(20.7590455259, 5.3911880696, 0.0, 972)
femmesh.addNode(21.5286554089, 3.96857289134, 0.0, 973)
femmesh.addNode(20.7831037327, 6.28623044955, 0.0, 974)
femmesh.addNode(20.7831037327, 7.28623044955, 0.0, 975)
femmesh.addNode(21.5421492586, 5.67741851915, 0.0, 976)
femmesh.addNode(20.8799788043, 8.15011465756, 0.0, 977)
femmesh.addNode(20.8799788043, 9.15011465756, 0.0, 978)
femmesh.addNode(21.8799788043, 9.15011465756, 0.0, 979)
femmesh.addNode(21.663082537, 7.43634510711, 0.0, 980)
femmesh.addNode(23.3358043886, 0.844337643495, 0.0, 981)
femmesh.addNode(22.3358043886, 0.844337643495, 0.0, 982)
femmesh.addNode(27.4212031542, 0.875275961552, 0.0, 983)
femmesh.addNode(26.4212031542, 0.875275961552, 0.0, 984)
femmesh.addNode(31.4668849037, 0.905290541907, 0.0, 985)
femmesh.addNode(30.4668849037, 0.905290541907, 0.0, 986)
femmesh.addNode(35.5010765419, 0.923394343021, 0.0, 987)
femmesh.addNode(34.5010765419, 0.923394343021, 0.0, 988)
femmesh.addNode(39.0, 1.0, 0.0, 989)
femmesh.addNode(22.8271914806, 9.20685330599, 0.0, 990)
femmesh.addNode(23.8271914806, 9.20685330599, 0.0, 991)
femmesh.addNode(22.7071702849, 8.35696796355, 0.0, 992)
femmesh.addNode(24.7956936449, 9.20725170835, 0.0, 993)
femmesh.addNode(25.7956936449, 9.20725170835, 0.0, 994)
femmesh.addNode(24.6228851255, 8.41410501433, 0.0, 995)
femmesh.addNode(26.771061309, 9.20961019347, 0.0, 996)
femmesh.addNode(27.771061309, 9.20961019347, 0.0, 997)
femmesh.addNode(26.5667549539, 8.41686190181, 0.0, 998)
femmesh.addNode(28.7366007602, 9.22491923573, 0.0, 999)
femmesh.addNode(29.7366007602, 9.22491923573, 0.0, 1000)
femmesh.addNode(28.5076620692, 8.4345294292, 0.0, 1001)
femmesh.addNode(30.6791951963, 9.26425468996, 0.0, 1002)
femmesh.addNode(31.6791951963, 9.26425468996, 0.0, 1003)
femmesh.addNode(30.4157959565, 8.48917392569, 0.0, 1004)
femmesh.addNode(32.6514932573, 9.24224440178, 0.0, 1005)
femmesh.addNode(33.6514932573, 9.24224440178, 0.0, 1006)
femmesh.addNode(32.3306884536, 8.50649909174, 0.0, 1007)
femmesh.addNode(34.581976664, 9.21021009881, 0.0, 1008)
femmesh.addNode(35.581976664, 9.21021009881, 0.0, 1009)
femmesh.addNode(34.2334699214, 8.45245450059, 0.0, 1010)
femmesh.addNode(36.623707006, 9.19239108518, 0.0, 1011)
femmesh.addNode(37.623707006, 9.19239108518, 0.0, 1012)
femmesh.addNode(36.2056836701, 8.40260118399, 0.0, 1013)
femmesh.addNode(39.0, 9.0, 0.0, 1014)
femmesh.addNode(39.1450465877, 3.31982343367, 0.0, 1015)
femmesh.addNode(39.1450465877, 2.31982343367, 0.0, 1016)
femmesh.addNode(38.999643789, 7.28613711686, 0.0, 1017)
femmesh.addNode(38.999643789, 6.28613711686, 0.0, 1018)
femmesh.addNode(38.1450465877, 1.31982343367, 0.0, 1019)
femmesh.addNode(25.3803266194, 0.883361392153, 0.0, 1020)
femmesh.addNode(24.3803266194, 0.883361392153, 0.0, 1021)
femmesh.addNode(29.4475101385, 0.880844433239, 0.0, 1022)
femmesh.addNode(28.4475101385, 0.880844433239, 0.0, 1023)
femmesh.addNode(33.5023803204, 0.95444780804, 0.0, 1024)
femmesh.addNode(32.5023803204, 0.95444780804, 0.0, 1025)
femmesh.addNode(25.8015297736, 1.7586373537, 0.0, 1026)
femmesh.addNode(37.3872171108, 0.790623449336, 0.0, 1027)
femmesh.addNode(36.3872171108, 0.790623449336, 0.0, 1028)
femmesh.addNode(23.716131008, 1.72769903565, 0.0, 1029)
femmesh.addNode(38.623707006, 8.19239108518, 0.0, 1030)
femmesh.addNode(39.0744098563, 5.32591330773, 0.0, 1031)
femmesh.addNode(39.0744098563, 4.32591330773, 0.0, 1032)
femmesh.addNode(37.623350795, 7.47852820204, 0.0, 1033)
femmesh.addNode(21.3358043886, 1.8443376435, 0.0, 1034)
femmesh.addNode(31.9692652241, 1.85973834995, 0.0, 1035)
femmesh.addNode(35.8882936527, 1.71401779236, 0.0, 1036)
femmesh.addNode(29.9143950422, 1.78613497515, 0.0, 1037)
femmesh.addNode(34.0034568623, 1.87784215106, 0.0, 1038)
femmesh.addNode(38.219456444, 3.6457367414, 0.0, 1039)
femmesh.addNode(27.8687132927, 1.75612039479, 0.0, 1040)
femmesh.addNode(38.0740536453, 5.6120504246, 0.0, 1041)
femmesh.addNode(22.1054142717, 2.42172246523, 0.0, 1042)
femmesh.addNode(22.2190452733, 5.08373399625, 0.0, 1043)
femmesh.addNode(22.2431034801, 5.9787763762, 0.0, 1044)
femmesh.addNode(22.4433513308, 6.70793930915, 0.0, 1045)
femmesh.addNode(22.5402264025, 7.57182351717, 0.0, 1046)
femmesh.addNode(23.4874390787, 7.62856216559, 0.0, 1047)
femmesh.addNode(23.1202473456, 6.11425478625, 0.0, 1048)
femmesh.addNode(37.0961260376, 5.93802860286, 0.0, 1049)
femmesh.addNode(37.1708921049, 4.97780479373, 0.0, 1050)
femmesh.addNode(32.430079832, 2.78478748033, 0.0, 1051)
femmesh.addNode(31.3945844153, 2.73563021419, 0.0, 1052)
femmesh.addNode(30.3518835421, 2.66685782902, 0.0, 1053)
femmesh.addNode(29.332508777, 2.64241172035, 0.0, 1054)
femmesh.addNode(30.81269815, 3.59190695939, 0.0, 1055)
femmesh.addNode(36.5918843321, 2.48483866701, 0.0, 1056)
femmesh.addNode(35.7057437631, 2.61760956069, 0.0, 1057)
femmesh.addNode(26.2441805152, 2.61728907528, 0.0, 1058)
femmesh.addNode(25.2033039804, 2.62537450588, 0.0, 1059)
femmesh.addNode(37.5322636986, 2.110446883, 0.0, 1060)
femmesh.addNode(24.0904305239, 2.68442565496, 0.0, 1061)
femmesh.addNode(23.0459082931, 2.6454019063, 0.0, 1062)
femmesh.addNode(24.5330812655, 3.54307737653, 0.0, 1063)
femmesh.addNode(24.4108512954, 7.61606840071, 0.0, 1064)
femmesh.addNode(25.3793534598, 7.61646680308, 0.0, 1065)
femmesh.addNode(24.243907413, 6.83092395433, 0.0, 1066)
femmesh.addNode(26.3456150476, 7.61222548472, 0.0, 1067)
femmesh.addNode(27.3209827116, 7.61458396984, 0.0, 1068)
femmesh.addNode(26.1335812175, 6.8141888711, 0.0, 1069)
femmesh.addNode(28.2783441159, 7.62775531302, 0.0, 1070)
femmesh.addNode(29.2438835671, 7.64306435528, 0.0, 1071)
femmesh.addNode(28.0572042095, 6.82311889592, 0.0, 1072)
femmesh.addNode(30.1691299747, 7.68977161365, 0.0, 1073)
femmesh.addNode(31.1117244107, 7.72910706787, 0.0, 1074)
femmesh.addNode(29.9398120213, 6.88299749746, 0.0, 1075)
femmesh.addNode(31.9326015077, 7.84936306012, 0.0, 1076)
femmesh.addNode(32.9048995687, 7.82735277194, 0.0, 1077)
femmesh.addNode(31.6859355259, 7.04996074807, 0.0, 1078)
femmesh.addNode(28.3106889663, 2.62018124129, 0.0, 1079)
femmesh.addNode(27.284381982, 2.6146127696, 0.0, 1080)
femmesh.addNode(26.6861561888, 3.48134992178, 0.0, 1081)
femmesh.addNode(28.7481774662, 3.50090409516, 0.0, 1082)
femmesh.addNode(22.4797137876, 3.37844908454, 0.0, 1083)
femmesh.addNode(36.7199731668, 7.70406943871, 0.0, 1084)
femmesh.addNode(37.0959099497, 6.7978154704, 0.0, 1085)
femmesh.addNode(36.1927484094, 6.16356983953, 0.0, 1086)
femmesh.addNode(35.6782428248, 7.72188845234, 0.0, 1087)
femmesh.addNode(22.4691494304, 4.19225233241, 0.0, 1088)
femmesh.addNode(37.349713809, 3.01403865134, 0.0, 1089)
femmesh.addNode(23.170103652, 4.49361018945, 0.0, 1090)
femmesh.addNode(37.2790770775, 4.02012852541, 0.0, 1091)
femmesh.addNode(34.5996866448, 2.94082804557, 0.0, 1092)
femmesh.addNode(33.6009904232, 2.97188151058, 0.0, 1093)
femmesh.addNode(33.0263096145, 3.84777337483, 0.0, 1094)
femmesh.addNode(33.8204373097, 7.50133986199, 0.0, 1095)
femmesh.addNode(34.7509207164, 7.46930555902, 0.0, 1096)
femmesh.addNode(36.3011494699, 4.34610670367, 0.0, 1097)
femmesh.addNode(35.2652102131, 6.77077381374, 0.0, 1098)
femmesh.addNode(35.3032773241, 3.71164892022, 0.0, 1099)
femmesh.addNode(35.1950923515, 4.66932518854, 0.0, 1100)
femmesh.addNode(31.1558003365, 4.45587748324, 0.0, 1101)
femmesh.addNode(30.1130994633, 4.38710509806, 0.0, 1102)
femmesh.addNode(33.4223503638, 6.84420383037, 0.0, 1103)
femmesh.addNode(23.9575434805, 6.02586336572, 0.0, 1104)
femmesh.addNode(24.8809556972, 6.01336960085, 0.0, 1105)
femmesh.addNode(23.7572956298, 5.29670043277, 0.0, 1106)
femmesh.addNode(25.8410167712, 5.99489520713, 0.0, 1107)
femmesh.addNode(26.807278359, 5.99065388877, 0.0, 1108)
femmesh.addNode(25.5546528388, 5.18983461852, 0.0, 1109)
femmesh.addNode(29.7353836317, 6.04368293051, 0.0, 1110)
femmesh.addNode(30.6606300393, 6.09039018887, 0.0, 1111)
femmesh.addNode(31.6535560627, 6.21602945085, 0.0, 1112)
femmesh.addNode(32.4744331597, 6.3362854431, 0.0, 1113)
femmesh.addNode(33.3899709007, 6.01027253315, 0.0, 1114)
femmesh.addNode(31.4491276731, 5.3767148839, 0.0, 1115)
femmesh.addNode(35.265426301, 5.91098694621, 0.0, 1116)
femmesh.addNode(27.1205357842, 4.32501692045, 0.0, 1117)
femmesh.addNode(26.0803343174, 4.32769322613, 0.0, 1118)
femmesh.addNode(33.3196369511, 4.76861077548, 0.0, 1119)
femmesh.addNode(34.2675541552, 5.27652916275, 0.0, 1120)
femmesh.addNode(24.0073997869, 4.40521876893, 0.0, 1121)
femmesh.addNode(32.1487263599, 4.58151674523, 0.0, 1122)
femmesh.addNode(25.1202732434, 4.34616761985, 0.0, 1123)
femmesh.addNode(27.8017502142, 5.99721080113, 0.0, 1124)
femmesh.addNode(28.7591116185, 6.01038214431, 0.0, 1125)
femmesh.addNode(29.4799296364, 5.21777483571, 0.0, 1126)
femmesh.addNode(27.509185768, 5.17791713715, 0.0, 1127)
femmesh.addNode(29.13682745, 4.35380431186, 0.0, 1128)
femmesh.addNode(28.1150076394, 4.3315738328, 0.0, 1129)
femmesh.addNode(1.0, 1.0, 0.0, 1130)
femmesh.addNode(0.769609883053, 2.57738482174, 0.0, 1131)
femmesh.addNode(0.769609883053, 3.57738482174, 0.0, 1132)
femmesh.addNode(0.75904552589, 4.3911880696, 0.0, 1133)
femmesh.addNode(0.75904552589, 5.3911880696, 0.0, 1134)
femmesh.addNode(1.52865540894, 3.96857289134, 0.0, 1135)
femmesh.addNode(0.783103732661, 6.28623044955, 0.0, 1136)
femmesh.addNode(0.783103732661, 7.28623044955, 0.0, 1137)
femmesh.addNode(1.54214925855, 5.67741851915, 0.0, 1138)
femmesh.addNode(0.879978804346, 8.15011465756, 0.0, 1139)
femmesh.addNode(0.879978804346, 9.15011465756, 0.0, 1140)
femmesh.addNode(1.87997880435, 9.15011465756, 0.0, 1141)
femmesh.addNode(1.66308253701, 7.43634510711, 0.0, 1142)
femmesh.addNode(3.3358043886, 0.844337643495, 0.0, 1143)
femmesh.addNode(2.3358043886, 0.844337643495, 0.0, 1144)
femmesh.addNode(7.42120315421, 0.875275961552, 0.0, 1145)
femmesh.addNode(6.42120315421, 0.875275961552, 0.0, 1146)
femmesh.addNode(11.4668849037, 0.905290541907, 0.0, 1147)
femmesh.addNode(10.4668849037, 0.905290541907, 0.0, 1148)
femmesh.addNode(15.5010765419, 0.923394343021, 0.0, 1149)
femmesh.addNode(14.5010765419, 0.923394343021, 0.0, 1150)
femmesh.addNode(19.0, 1.0, 0.0, 1151)
femmesh.addNode(2.82719148058, 9.20685330599, 0.0, 1152)
femmesh.addNode(3.82719148058, 9.20685330599, 0.0, 1153)
femmesh.addNode(2.70717028492, 8.35696796355, 0.0, 1154)
femmesh.addNode(4.79569364494, 9.20725170835, 0.0, 1155)
femmesh.addNode(5.79569364494, 9.20725170835, 0.0, 1156)
femmesh.addNode(4.62288512552, 8.41410501433, 0.0, 1157)
femmesh.addNode(6.77106130898, 9.20961019347, 0.0, 1158)
femmesh.addNode(7.77106130898, 9.20961019347, 0.0, 1159)
femmesh.addNode(6.56675495392, 8.41686190181, 0.0, 1160)
femmesh.addNode(8.73660076024, 9.22491923573, 0.0, 1161)
femmesh.addNode(9.73660076024, 9.22491923573, 0.0, 1162)
femmesh.addNode(8.50766206922, 8.4345294292, 0.0, 1163)
femmesh.addNode(10.6791951963, 9.26425468996, 0.0, 1164)
femmesh.addNode(11.6791951963, 9.26425468996, 0.0, 1165)
femmesh.addNode(10.4157959565, 8.48917392569, 0.0, 1166)
femmesh.addNode(12.6514932573, 9.24224440178, 0.0, 1167)
femmesh.addNode(13.6514932573, 9.24224440178, 0.0, 1168)
femmesh.addNode(12.3306884536, 8.50649909174, 0.0, 1169)
femmesh.addNode(14.581976664, 9.21021009881, 0.0, 1170)
femmesh.addNode(15.581976664, 9.21021009881, 0.0, 1171)
femmesh.addNode(14.2334699214, 8.45245450059, 0.0, 1172)
femmesh.addNode(16.623707006, 9.19239108518, 0.0, 1173)
femmesh.addNode(17.623707006, 9.19239108518, 0.0, 1174)
femmesh.addNode(16.2056836701, 8.40260118399, 0.0, 1175)
femmesh.addNode(19.0, 9.0, 0.0, 1176)
femmesh.addNode(19.1450465877, 3.31982343367, 0.0, 1177)
femmesh.addNode(19.1450465877, 2.31982343367, 0.0, 1178)
femmesh.addNode(18.999643789, 7.28613711686, 0.0, 1179)
femmesh.addNode(18.999643789, 6.28613711686, 0.0, 1180)
femmesh.addNode(18.1450465877, 1.31982343367, 0.0, 1181)
femmesh.addNode(5.38032661942, 0.883361392153, 0.0, 1182)
femmesh.addNode(4.38032661942, 0.883361392153, 0.0, 1183)
femmesh.addNode(9.44751013852, 0.880844433239, 0.0, 1184)
femmesh.addNode(8.44751013852, 0.880844433239, 0.0, 1185)
femmesh.addNode(13.5023803204, 0.95444780804, 0.0, 1186)
femmesh.addNode(12.5023803204, 0.95444780804, 0.0, 1187)
femmesh.addNode(5.80152977363, 1.7586373537, 0.0, 1188)
femmesh.addNode(17.3872171108, 0.790623449336, 0.0, 1189)
femmesh.addNode(16.3872171108, 0.790623449336, 0.0, 1190)
femmesh.addNode(3.71613100802, 1.72769903565, 0.0, 1191)
femmesh.addNode(18.623707006, 8.19239108518, 0.0, 1192)
femmesh.addNode(19.0744098563, 5.32591330773, 0.0, 1193)
femmesh.addNode(19.0744098563, 4.32591330773, 0.0, 1194)
femmesh.addNode(17.623350795, 7.47852820204, 0.0, 1195)
femmesh.addNode(1.3358043886, 1.8443376435, 0.0, 1196)
femmesh.addNode(11.9692652241, 1.85973834995, 0.0, 1197)
femmesh.addNode(15.8882936527, 1.71401779236, 0.0, 1198)
femmesh.addNode(9.9143950422, 1.78613497515, 0.0, 1199)
femmesh.addNode(14.0034568623, 1.87784215106, 0.0, 1200)
femmesh.addNode(18.219456444, 3.6457367414, 0.0, 1201)
femmesh.addNode(7.86871329272, 1.75612039479, 0.0, 1202)
femmesh.addNode(18.0740536453, 5.6120504246, 0.0, 1203)
femmesh.addNode(2.10541427165, 2.42172246523, 0.0, 1204)
femmesh.addNode(2.21904527333, 5.08373399625, 0.0, 1205)
femmesh.addNode(2.2431034801, 5.9787763762, 0.0, 1206)
femmesh.addNode(2.44335133079, 6.70793930915, 0.0, 1207)
femmesh.addNode(2.54022640248, 7.57182351717, 0.0, 1208)
femmesh.addNode(3.48743907871, 7.62856216559, 0.0, 1209)
femmesh.addNode(3.12024734557, 6.11425478625, 0.0, 1210)
femmesh.addNode(17.0961260376, 5.93802860286, 0.0, 1211)
femmesh.addNode(17.1708921049, 4.97780479373, 0.0, 1212)
femmesh.addNode(12.430079832, 2.78478748033, 0.0, 1213)
femmesh.addNode(11.3945844153, 2.73563021419, 0.0, 1214)
femmesh.addNode(10.3518835421, 2.66685782902, 0.0, 1215)
femmesh.addNode(9.33250877695, 2.64241172035, 0.0, 1216)
femmesh.addNode(10.81269815, 3.59190695939, 0.0, 1217)
femmesh.addNode(16.5918843321, 2.48483866701, 0.0, 1218)
femmesh.addNode(15.7057437631, 2.61760956069, 0.0, 1219)
femmesh.addNode(6.2441805152, 2.61728907528, 0.0, 1220)
femmesh.addNode(5.20330398041, 2.62537450588, 0.0, 1221)
femmesh.addNode(17.5322636986, 2.110446883, 0.0, 1222)
femmesh.addNode(4.09043052393, 2.68442565496, 0.0, 1223)
femmesh.addNode(3.04590829311, 2.6454019063, 0.0, 1224)
femmesh.addNode(4.5330812655, 3.54307737653, 0.0, 1225)
femmesh.addNode(4.41085129543, 7.61606840071, 0.0, 1226)
femmesh.addNode(5.37935345979, 7.61646680308, 0.0, 1227)
femmesh.addNode(4.24390741298, 6.83092395433, 0.0, 1228)
femmesh.addNode(6.34561504756, 7.61222548472, 0.0, 1229)
femmesh.addNode(7.3209827116, 7.61458396984, 0.0, 1230)
femmesh.addNode(6.13358121747, 6.8141888711, 0.0, 1231)
femmesh.addNode(8.27834411586, 7.62775531302, 0.0, 1232)
femmesh.addNode(9.24388356712, 7.64306435528, 0.0, 1233)
femmesh.addNode(8.0572042095, 6.82311889592, 0.0, 1234)
femmesh.addNode(10.1691299747, 7.68977161365, 0.0, 1235)
femmesh.addNode(11.1117244107, 7.72910706787, 0.0, 1236)
femmesh.addNode(9.93981202132, 6.88299749746, 0.0, 1237)
femmesh.addNode(11.9326015077, 7.84936306012, 0.0, 1238)
femmesh.addNode(12.9048995687, 7.82735277194, 0.0, 1239)
femmesh.addNode(11.6859355259, 7.04996074807, 0.0, 1240)
femmesh.addNode(8.31068896631, 2.62018124129, 0.0, 1241)
femmesh.addNode(7.28438198201, 2.6146127696, 0.0, 1242)
femmesh.addNode(6.68615618879, 3.48134992178, 0.0, 1243)
femmesh.addNode(8.74817746623, 3.50090409516, 0.0, 1244)
femmesh.addNode(2.47971378757, 3.37844908454, 0.0, 1245)
femmesh.addNode(16.7199731668, 7.70406943871, 0.0, 1246)
femmesh.addNode(17.0959099497, 6.7978154704, 0.0, 1247)
femmesh.addNode(16.1927484094, 6.16356983953, 0.0, 1248)
femmesh.addNode(15.6782428248, 7.72188845234, 0.0, 1249)
femmesh.addNode(2.46914943041, 4.19225233241, 0.0, 1250)
femmesh.addNode(17.349713809, 3.01403865134, 0.0, 1251)
femmesh.addNode(3.17010365196, 4.49361018945, 0.0, 1252)
femmesh.addNode(17.2790770775, 4.02012852541, 0.0, 1253)
femmesh.addNode(14.5996866448, 2.94082804557, 0.0, 1254)
femmesh.addNode(13.6009904232, 2.97188151058, 0.0, 1255)
femmesh.addNode(13.0263096145, 3.84777337483, 0.0, 1256)
femmesh.addNode(13.8204373097, 7.50133986199, 0.0, 1257)
femmesh.addNode(14.7509207164, 7.46930555902, 0.0, 1258)
femmesh.addNode(16.3011494699, 4.34610670367, 0.0, 1259)
femmesh.addNode(15.2652102131, 6.77077381374, 0.0, 1260)
femmesh.addNode(15.3032773241, 3.71164892022, 0.0, 1261)
femmesh.addNode(15.1950923515, 4.66932518854, 0.0, 1262)
femmesh.addNode(11.1558003365, 4.45587748324, 0.0, 1263)
femmesh.addNode(10.1130994633, 4.38710509806, 0.0, 1264)
femmesh.addNode(13.4223503638, 6.84420383037, 0.0, 1265)
femmesh.addNode(3.95754348051, 6.02586336572, 0.0, 1266)
femmesh.addNode(4.88095569723, 6.01336960085, 0.0, 1267)
femmesh.addNode(3.75729562982, 5.29670043277, 0.0, 1268)
femmesh.addNode(5.84101677123, 5.99489520713, 0.0, 1269)
femmesh.addNode(6.807278359, 5.99065388877, 0.0, 1270)
femmesh.addNode(5.55465283876, 5.18983461852, 0.0, 1271)
femmesh.addNode(9.73538363172, 6.04368293051, 0.0, 1272)
femmesh.addNode(10.6606300393, 6.09039018887, 0.0, 1273)
femmesh.addNode(11.6535560627, 6.21602945085, 0.0, 1274)
femmesh.addNode(12.4744331597, 6.3362854431, 0.0, 1275)
femmesh.addNode(13.3899709007, 6.01027253315, 0.0, 1276)
femmesh.addNode(11.4491276731, 5.3767148839, 0.0, 1277)
femmesh.addNode(15.265426301, 5.91098694621, 0.0, 1278)
femmesh.addNode(7.12053578418, 4.32501692045, 0.0, 1279)
femmesh.addNode(6.08033431737, 4.32769322613, 0.0, 1280)
femmesh.addNode(13.3196369511, 4.76861077548, 0.0, 1281)
femmesh.addNode(14.2675541552, 5.27652916275, 0.0, 1282)
femmesh.addNode(4.0073997869, 4.40521876893, 0.0, 1283)
femmesh.addNode(12.1487263599, 4.58151674523, 0.0, 1284)
femmesh.addNode(5.12027324337, 4.34616761985, 0.0, 1285)
femmesh.addNode(7.8017502142, 5.99721080113, 0.0, 1286)
femmesh.addNode(8.75911161846, 6.01038214431, 0.0, 1287)
femmesh.addNode(9.47992963642, 5.21777483571, 0.0, 1288)
femmesh.addNode(7.50918576796, 5.17791713715, 0.0, 1289)
femmesh.addNode(9.13682745001, 4.35380431186, 0.0, 1290)
femmesh.addNode(8.11500763938, 4.3315738328, 0.0, 1291)
return True
def create_elements(femmesh):
# elements
femmesh.addFace([13, 4, 17, 352, 357, 482], 131)
femmesh.addFace([14, 13, 127, 353, 483, 484], 132)
femmesh.addFace([15, 14, 128, 354, 485, 486], 133)
femmesh.addFace([14, 127, 128, 484, 487, 485], 134)
femmesh.addFace([16, 15, 129, 355, 488, 489], 135)
femmesh.addFace([15, 128, 129, 486, 490, 488], 136)
femmesh.addFace([3, 16, 130, 356, 491, 492], 137)
femmesh.addFace([26, 3, 130, 367, 492, 493], 138)
femmesh.addFace([16, 129, 130, 489, 494, 491], 139)
femmesh.addFace([17, 18, 131, 358, 495, 496], 140)
femmesh.addFace([19, 20, 132, 360, 497, 498], 141)
femmesh.addFace([21, 22, 133, 362, 499, 500], 142)
femmesh.addFace([23, 24, 134, 364, 501, 502], 143)
femmesh.addFace([25, 2, 35, 366, 377, 503], 144)
femmesh.addFace([27, 26, 135, 368, 504, 505], 145)
femmesh.addFace([26, 130, 135, 493, 506, 504], 146)
femmesh.addFace([28, 27, 136, 369, 507, 508], 147)
femmesh.addFace([27, 135, 136, 505, 509, 507], 148)
femmesh.addFace([29, 28, 137, 370, 510, 511], 149)
femmesh.addFace([28, 136, 137, 508, 512, 510], 150)
femmesh.addFace([30, 29, 138, 371, 513, 514], 151)
femmesh.addFace([29, 137, 138, 511, 515, 513], 152)
femmesh.addFace([31, 30, 139, 372, 516, 517], 153)
femmesh.addFace([30, 138, 139, 514, 518, 516], 154)
femmesh.addFace([32, 31, 140, 373, 519, 520], 155)
femmesh.addFace([31, 139, 140, 517, 521, 519], 156)
femmesh.addFace([33, 32, 141, 374, 522, 523], 157)
femmesh.addFace([32, 140, 141, 520, 524, 522], 158)
femmesh.addFace([34, 33, 142, 375, 525, 526], 159)
femmesh.addFace([33, 141, 142, 523, 527, 525], 160)
femmesh.addFace([1, 34, 38, 376, 528, 381], 161)
femmesh.addFace([35, 36, 143, 378, 529, 530], 162)
femmesh.addFace([37, 38, 144, 380, 531, 532], 163)
femmesh.addFace([25, 35, 143, 503, 530, 533], 164)
femmesh.addFace([18, 19, 145, 359, 534, 535], 165)
femmesh.addFace([20, 21, 146, 361, 536, 537], 166)
femmesh.addFace([22, 23, 147, 363, 538, 539], 167)
femmesh.addFace([145, 19, 132, 534, 498, 540], 168)
femmesh.addFace([24, 25, 148, 365, 541, 542], 169)
femmesh.addFace([131, 18, 145, 495, 535, 543], 170)
femmesh.addFace([38, 34, 142, 528, 526, 544], 171)
femmesh.addFace([36, 37, 149, 379, 545, 546], 172)
femmesh.addFace([38, 142, 144, 544, 547, 531], 173)
femmesh.addFace([13, 17, 131, 482, 496, 548], 174)
femmesh.addFace([133, 22, 147, 499, 539, 549], 175)
femmesh.addFace([134, 24, 148, 501, 542, 550], 176)
femmesh.addFace([146, 21, 133, 536, 500, 551], 177)
femmesh.addFace([147, 23, 134, 538, 502, 552], 178)
femmesh.addFace([36, 149, 143, 546, 553, 529], 179)
femmesh.addFace([20, 146, 132, 537, 554, 497], 180)
femmesh.addFace([149, 37, 144, 545, 532, 555], 181)
femmesh.addFace([127, 13, 131, 483, 548, 556], 182)
femmesh.addFace([129, 128, 150, 490, 557, 558], 183)
femmesh.addFace([130, 129, 151, 494, 559, 560], 184)
femmesh.addFace([135, 130, 151, 506, 560, 561], 185)
femmesh.addFace([129, 150, 151, 558, 562, 559], 186)
femmesh.addFace([149, 144, 152, 555, 563, 564], 187)
femmesh.addFace([133, 147, 153, 549, 565, 566], 188)
femmesh.addFace([146, 133, 154, 551, 567, 568], 189)
femmesh.addFace([133, 153, 154, 566, 569, 567], 190)
femmesh.addFace([134, 148, 155, 550, 570, 571], 191)
femmesh.addFace([145, 132, 156, 540, 572, 573], 192)
femmesh.addFace([148, 25, 143, 541, 533, 574], 193)
femmesh.addFace([131, 145, 157, 543, 575, 576], 194)
femmesh.addFace([145, 156, 157, 573, 577, 575], 195)
femmesh.addFace([136, 135, 158, 509, 578, 579], 196)
femmesh.addFace([135, 151, 158, 561, 580, 578], 197)
femmesh.addFace([137, 136, 159, 512, 581, 582], 198)
femmesh.addFace([136, 158, 159, 579, 583, 581], 199)
femmesh.addFace([138, 137, 160, 515, 584, 585], 200)
femmesh.addFace([137, 159, 160, 582, 586, 584], 201)
femmesh.addFace([139, 138, 161, 518, 587, 588], 202)
femmesh.addFace([138, 160, 161, 585, 589, 587], 203)
femmesh.addFace([140, 139, 162, 521, 590, 591], 204)
femmesh.addFace([139, 161, 162, 588, 592, 590], 205)
femmesh.addFace([132, 146, 163, 554, 593, 594], 206)
femmesh.addFace([156, 132, 163, 572, 594, 595], 207)
femmesh.addFace([146, 154, 163, 568, 596, 593], 208)
femmesh.addFace([127, 131, 157, 556, 576, 597], 209)
femmesh.addFace([144, 142, 164, 547, 598, 599], 210)
femmesh.addFace([152, 144, 164, 563, 599, 600], 211)
femmesh.addFace([142, 141, 164, 527, 601, 598], 212)
femmesh.addFace([128, 127, 157, 487, 597, 602], 213)
femmesh.addFace([148, 143, 155, 574, 603, 570], 214)
femmesh.addFace([150, 128, 157, 557, 602, 604], 215)
femmesh.addFace([155, 143, 149, 603, 553, 605], 216)
femmesh.addFace([147, 134, 165, 552, 606, 607], 217)
femmesh.addFace([147, 165, 153, 607, 608, 565], 218)
femmesh.addFace([141, 140, 166, 524, 609, 610], 219)
femmesh.addFace([155, 149, 152, 605, 564, 611], 220)
femmesh.addFace([164, 141, 166, 601, 610, 612], 221)
femmesh.addFace([165, 134, 155, 606, 571, 613], 222)
femmesh.addFace([155, 152, 165, 611, 614, 613], 223)
femmesh.addFace([154, 153, 167, 569, 615, 616], 224)
femmesh.addFace([140, 162, 166, 591, 617, 609], 225)
femmesh.addFace([158, 151, 168, 580, 618, 619], 226)
femmesh.addFace([151, 150, 168, 562, 620, 618], 227)
femmesh.addFace([159, 158, 169, 583, 621, 622], 228)
femmesh.addFace([158, 168, 169, 619, 623, 621], 229)
femmesh.addFace([161, 160, 167, 589, 624, 625], 230)
femmesh.addFace([162, 161, 170, 592, 626, 627], 231)
femmesh.addFace([166, 162, 170, 617, 627, 628], 232)
femmesh.addFace([161, 167, 170, 625, 629, 626], 233)
femmesh.addFace([164, 166, 152, 612, 630, 600], 234)
femmesh.addFace([156, 163, 169, 595, 631, 632], 235)
femmesh.addFace([165, 166, 170, 634, 628, 633], 236)
femmesh.addFace([166, 165, 152, 634, 614, 630], 237)
femmesh.addFace([150, 157, 168, 604, 635, 620], 238)
femmesh.addFace([153, 165, 170, 608, 633, 636], 239)
femmesh.addFace([153, 170, 167, 636, 629, 615], 240)
femmesh.addFace([168, 157, 156, 635, 577, 637], 241)
femmesh.addFace([160, 159, 171, 586, 638, 639], 242)
femmesh.addFace([167, 160, 171, 624, 639, 640], 243)
femmesh.addFace([159, 169, 171, 622, 641, 638], 244)
femmesh.addFace([163, 154, 171, 596, 642, 643], 245)
femmesh.addFace([168, 156, 169, 637, 632, 623], 246)
femmesh.addFace([171, 154, 167, 642, 616, 640], 247)
femmesh.addFace([171, 169, 163, 641, 631, 643], 248)
femmesh.addFace([39, 6, 43, 382, 387, 644], 249)
femmesh.addFace([40, 39, 172, 383, 645, 646], 250)
femmesh.addFace([41, 40, 173, 384, 647, 648], 251)
femmesh.addFace([40, 172, 173, 646, 649, 647], 252)
femmesh.addFace([42, 41, 174, 385, 650, 651], 253)
femmesh.addFace([41, 173, 174, 648, 652, 650], 254)
femmesh.addFace([5, 42, 175, 386, 653, 654], 255)
femmesh.addFace([52, 5, 175, 397, 654, 655], 256)
femmesh.addFace([42, 174, 175, 651, 656, 653], 257)
femmesh.addFace([43, 44, 176, 388, 657, 658], 258)
femmesh.addFace([45, 46, 177, 390, 659, 660], 259)
femmesh.addFace([47, 48, 178, 392, 661, 662], 260)
femmesh.addFace([49, 50, 179, 394, 663, 664], 261)
femmesh.addFace([51, 4, 13, 396, 352, 665], 262)
femmesh.addFace([53, 52, 180, 398, 666, 667], 263)
femmesh.addFace([52, 175, 180, 655, 668, 666], 264)
femmesh.addFace([54, 53, 181, 399, 669, 670], 265)
femmesh.addFace([53, 180, 181, 667, 671, 669], 266)
femmesh.addFace([55, 54, 182, 400, 672, 673], 267)
femmesh.addFace([54, 181, 182, 670, 674, 672], 268)
femmesh.addFace([56, 55, 183, 401, 675, 676], 269)
femmesh.addFace([55, 182, 183, 673, 677, 675], 270)
femmesh.addFace([57, 56, 184, 402, 678, 679], 271)
femmesh.addFace([56, 183, 184, 676, 680, 678], 272)
femmesh.addFace([58, 57, 185, 403, 681, 682], 273)
femmesh.addFace([57, 184, 185, 679, 683, 681], 274)
femmesh.addFace([59, 58, 186, 404, 684, 685], 275)
femmesh.addFace([58, 185, 186, 682, 686, 684], 276)
femmesh.addFace([60, 59, 187, 405, 687, 688], 277)
femmesh.addFace([59, 186, 187, 685, 689, 687], 278)
femmesh.addFace([3, 60, 16, 406, 690, 356], 279)
femmesh.addFace([13, 14, 188, 353, 691, 692], 280)
femmesh.addFace([15, 16, 189, 355, 693, 694], 281)
femmesh.addFace([51, 13, 188, 665, 692, 695], 282)
femmesh.addFace([44, 45, 190, 389, 696, 697], 283)
femmesh.addFace([46, 47, 191, 391, 698, 699], 284)
femmesh.addFace([48, 49, 192, 393, 700, 701], 285)
femmesh.addFace([190, 45, 177, 696, 660, 702], 286)
femmesh.addFace([50, 51, 193, 395, 703, 704], 287)
femmesh.addFace([176, 44, 190, 657, 697, 705], 288)
femmesh.addFace([16, 60, 187, 690, 688, 706], 289)
femmesh.addFace([14, 15, 194, 354, 707, 708], 290)
femmesh.addFace([16, 187, 189, 706, 709, 693], 291)
femmesh.addFace([39, 43, 176, 644, 658, 710], 292)
femmesh.addFace([178, 48, 192, 661, 701, 711], 293)
femmesh.addFace([179, 50, 193, 663, 704, 712], 294)
femmesh.addFace([191, 47, 178, 698, 662, 713], 295)
femmesh.addFace([192, 49, 179, 700, 664, 714], 296)
femmesh.addFace([14, 194, 188, 708, 715, 691], 297)
femmesh.addFace([46, 191, 177, 699, 716, 659], 298)
femmesh.addFace([194, 15, 189, 707, 694, 717], 299)
femmesh.addFace([172, 39, 176, 645, 710, 718], 300)
femmesh.addFace([174, 173, 195, 652, 719, 720], 301)
femmesh.addFace([175, 174, 196, 656, 721, 722], 302)
femmesh.addFace([180, 175, 196, 668, 722, 723], 303)
femmesh.addFace([174, 195, 196, 720, 724, 721], 304)
femmesh.addFace([194, 189, 197, 717, 725, 726], 305)
femmesh.addFace([178, 192, 198, 711, 727, 728], 306)
femmesh.addFace([191, 178, 199, 713, 729, 730], 307)
femmesh.addFace([178, 198, 199, 728, 731, 729], 308)
femmesh.addFace([179, 193, 200, 712, 732, 733], 309)
femmesh.addFace([190, 177, 201, 702, 734, 735], 310)
femmesh.addFace([193, 51, 188, 703, 695, 736], 311)
femmesh.addFace([176, 190, 202, 705, 737, 738], 312)
femmesh.addFace([190, 201, 202, 735, 739, 737], 313)
femmesh.addFace([181, 180, 203, 671, 740, 741], 314)
femmesh.addFace([180, 196, 203, 723, 742, 740], 315)
femmesh.addFace([182, 181, 204, 674, 743, 744], 316)
femmesh.addFace([181, 203, 204, 741, 745, 743], 317)
femmesh.addFace([183, 182, 205, 677, 746, 747], 318)
femmesh.addFace([182, 204, 205, 744, 748, 746], 319)
femmesh.addFace([184, 183, 206, 680, 749, 750], 320)
femmesh.addFace([183, 205, 206, 747, 751, 749], 321)
femmesh.addFace([185, 184, 207, 683, 752, 753], 322)
femmesh.addFace([184, 206, 207, 750, 754, 752], 323)
femmesh.addFace([177, 191, 208, 716, 755, 756], 324)
femmesh.addFace([201, 177, 208, 734, 756, 757], 325)
femmesh.addFace([191, 199, 208, 730, 758, 755], 326)
femmesh.addFace([172, 176, 202, 718, 738, 759], 327)
femmesh.addFace([189, 187, 209, 709, 760, 761], 328)
femmesh.addFace([197, 189, 209, 725, 761, 762], 329)
femmesh.addFace([187, 186, 209, 689, 763, 760], 330)
femmesh.addFace([173, 172, 202, 649, 759, 764], 331)
femmesh.addFace([193, 188, 200, 736, 765, 732], 332)
femmesh.addFace([195, 173, 202, 719, 764, 766], 333)
femmesh.addFace([200, 188, 194, 765, 715, 767], 334)
femmesh.addFace([192, 179, 210, 714, 768, 769], 335)
femmesh.addFace([192, 210, 198, 769, 770, 727], 336)
femmesh.addFace([186, 185, 211, 686, 771, 772], 337)
femmesh.addFace([200, 194, 197, 767, 726, 773], 338)
femmesh.addFace([209, 186, 211, 763, 772, 774], 339)
femmesh.addFace([210, 179, 200, 768, 733, 775], 340)
femmesh.addFace([200, 197, 210, 773, 776, 775], 341)
femmesh.addFace([199, 198, 212, 731, 777, 778], 342)
femmesh.addFace([185, 207, 211, 753, 779, 771], 343)
femmesh.addFace([203, 196, 213, 742, 780, 781], 344)
femmesh.addFace([196, 195, 213, 724, 782, 780], 345)
femmesh.addFace([204, 203, 214, 745, 783, 784], 346)
femmesh.addFace([203, 213, 214, 781, 785, 783], 347)
femmesh.addFace([206, 205, 212, 751, 786, 787], 348)
femmesh.addFace([207, 206, 215, 754, 788, 789], 349)
femmesh.addFace([211, 207, 215, 779, 789, 790], 350)
femmesh.addFace([206, 212, 215, 787, 791, 788], 351)
femmesh.addFace([209, 211, 197, 774, 792, 762], 352)
femmesh.addFace([201, 208, 214, 757, 793, 794], 353)
femmesh.addFace([210, 211, 215, 796, 790, 795], 354)
femmesh.addFace([211, 210, 197, 796, 776, 792], 355)
femmesh.addFace([195, 202, 213, 766, 797, 782], 356)
femmesh.addFace([198, 210, 215, 770, 795, 798], 357)
femmesh.addFace([198, 215, 212, 798, 791, 777], 358)
femmesh.addFace([213, 202, 201, 797, 739, 799], 359)
femmesh.addFace([205, 204, 216, 748, 800, 801], 360)
femmesh.addFace([212, 205, 216, 786, 801, 802], 361)
femmesh.addFace([204, 214, 216, 784, 803, 800], 362)
femmesh.addFace([208, 199, 216, 758, 804, 805], 363)
femmesh.addFace([213, 201, 214, 799, 794, 785], 364)
femmesh.addFace([216, 199, 212, 804, 778, 802], 365)
femmesh.addFace([216, 214, 208, 803, 793, 805], 366)
femmesh.addFace([61, 8, 65, 407, 412, 806], 367)
femmesh.addFace([62, 61, 217, 408, 807, 808], 368)
femmesh.addFace([63, 62, 218, 409, 809, 810], 369)
femmesh.addFace([62, 217, 218, 808, 811, 809], 370)
femmesh.addFace([64, 63, 219, 410, 812, 813], 371)
femmesh.addFace([63, 218, 219, 810, 814, 812], 372)
femmesh.addFace([7, 64, 220, 411, 815, 816], 373)
femmesh.addFace([74, 7, 220, 422, 816, 817], 374)
femmesh.addFace([64, 219, 220, 813, 818, 815], 375)
femmesh.addFace([65, 66, 221, 413, 819, 820], 376)
femmesh.addFace([67, 68, 222, 415, 821, 822], 377)
femmesh.addFace([69, 70, 223, 417, 823, 824], 378)
femmesh.addFace([71, 72, 224, 419, 825, 826], 379)
femmesh.addFace([73, 6, 39, 421, 382, 827], 380)
femmesh.addFace([75, 74, 225, 423, 828, 829], 381)
femmesh.addFace([74, 220, 225, 817, 830, 828], 382)
femmesh.addFace([76, 75, 226, 424, 831, 832], 383)
femmesh.addFace([75, 225, 226, 829, 833, 831], 384)
femmesh.addFace([77, 76, 227, 425, 834, 835], 385)
femmesh.addFace([76, 226, 227, 832, 836, 834], 386)
femmesh.addFace([78, 77, 228, 426, 837, 838], 387)
femmesh.addFace([77, 227, 228, 835, 839, 837], 388)
femmesh.addFace([79, 78, 229, 427, 840, 841], 389)
femmesh.addFace([78, 228, 229, 838, 842, 840], 390)
femmesh.addFace([80, 79, 230, 428, 843, 844], 391)
femmesh.addFace([79, 229, 230, 841, 845, 843], 392)
femmesh.addFace([81, 80, 231, 429, 846, 847], 393)
femmesh.addFace([80, 230, 231, 844, 848, 846], 394)
femmesh.addFace([82, 81, 232, 430, 849, 850], 395)
femmesh.addFace([81, 231, 232, 847, 851, 849], 396)
femmesh.addFace([5, 82, 42, 431, 852, 386], 397)
femmesh.addFace([39, 40, 233, 383, 853, 854], 398)
femmesh.addFace([41, 42, 234, 385, 855, 856], 399)
femmesh.addFace([73, 39, 233, 827, 854, 857], 400)
femmesh.addFace([66, 67, 235, 414, 858, 859], 401)
femmesh.addFace([68, 69, 236, 416, 860, 861], 402)
femmesh.addFace([70, 71, 237, 418, 862, 863], 403)
femmesh.addFace([235, 67, 222, 858, 822, 864], 404)
femmesh.addFace([72, 73, 238, 420, 865, 866], 405)
femmesh.addFace([221, 66, 235, 819, 859, 867], 406)
femmesh.addFace([42, 82, 232, 852, 850, 868], 407)
femmesh.addFace([40, 41, 239, 384, 869, 870], 408)
femmesh.addFace([42, 232, 234, 868, 871, 855], 409)
femmesh.addFace([61, 65, 221, 806, 820, 872], 410)
femmesh.addFace([223, 70, 237, 823, 863, 873], 411)
femmesh.addFace([224, 72, 238, 825, 866, 874], 412)
femmesh.addFace([236, 69, 223, 860, 824, 875], 413)
femmesh.addFace([237, 71, 224, 862, 826, 876], 414)
femmesh.addFace([40, 239, 233, 870, 877, 853], 415)
femmesh.addFace([68, 236, 222, 861, 878, 821], 416)
femmesh.addFace([239, 41, 234, 869, 856, 879], 417)
femmesh.addFace([217, 61, 221, 807, 872, 880], 418)
femmesh.addFace([219, 218, 240, 814, 881, 882], 419)
femmesh.addFace([220, 219, 241, 818, 883, 884], 420)
femmesh.addFace([225, 220, 241, 830, 884, 885], 421)
femmesh.addFace([219, 240, 241, 882, 886, 883], 422)
femmesh.addFace([239, 234, 242, 879, 887, 888], 423)
femmesh.addFace([223, 237, 243, 873, 889, 890], 424)
femmesh.addFace([236, 223, 244, 875, 891, 892], 425)
femmesh.addFace([223, 243, 244, 890, 893, 891], 426)
femmesh.addFace([224, 238, 245, 874, 894, 895], 427)
femmesh.addFace([235, 222, 246, 864, 896, 897], 428)
femmesh.addFace([238, 73, 233, 865, 857, 898], 429)
femmesh.addFace([221, 235, 247, 867, 899, 900], 430)
femmesh.addFace([235, 246, 247, 897, 901, 899], 431)
femmesh.addFace([226, 225, 248, 833, 902, 903], 432)
femmesh.addFace([225, 241, 248, 885, 904, 902], 433)
femmesh.addFace([227, 226, 249, 836, 905, 906], 434)
femmesh.addFace([226, 248, 249, 903, 907, 905], 435)
femmesh.addFace([228, 227, 250, 839, 908, 909], 436)
femmesh.addFace([227, 249, 250, 906, 910, 908], 437)
femmesh.addFace([229, 228, 251, 842, 911, 912], 438)
femmesh.addFace([228, 250, 251, 909, 913, 911], 439)
femmesh.addFace([230, 229, 252, 845, 914, 915], 440)
femmesh.addFace([229, 251, 252, 912, 916, 914], 441)
femmesh.addFace([222, 236, 253, 878, 917, 918], 442)
femmesh.addFace([246, 222, 253, 896, 918, 919], 443)
femmesh.addFace([236, 244, 253, 892, 920, 917], 444)
femmesh.addFace([217, 221, 247, 880, 900, 921], 445)
femmesh.addFace([234, 232, 254, 871, 922, 923], 446)
femmesh.addFace([242, 234, 254, 887, 923, 924], 447)
femmesh.addFace([232, 231, 254, 851, 925, 922], 448)
femmesh.addFace([218, 217, 247, 811, 921, 926], 449)
femmesh.addFace([238, 233, 245, 898, 927, 894], 450)
femmesh.addFace([240, 218, 247, 881, 926, 928], 451)
femmesh.addFace([245, 233, 239, 927, 877, 929], 452)
femmesh.addFace([237, 224, 255, 876, 930, 931], 453)
femmesh.addFace([237, 255, 243, 931, 932, 889], 454)
femmesh.addFace([231, 230, 256, 848, 933, 934], 455)
femmesh.addFace([245, 239, 242, 929, 888, 935], 456)
femmesh.addFace([254, 231, 256, 925, 934, 936], 457)
femmesh.addFace([255, 224, 245, 930, 895, 937], 458)
femmesh.addFace([245, 242, 255, 935, 938, 937], 459)
femmesh.addFace([244, 243, 257, 893, 939, 940], 460)
femmesh.addFace([230, 252, 256, 915, 941, 933], 461)
femmesh.addFace([248, 241, 258, 904, 942, 943], 462)
femmesh.addFace([241, 240, 258, 886, 944, 942], 463)
femmesh.addFace([249, 248, 259, 907, 945, 946], 464)
femmesh.addFace([248, 258, 259, 943, 947, 945], 465)
femmesh.addFace([251, 250, 257, 913, 948, 949], 466)
femmesh.addFace([252, 251, 260, 916, 950, 951], 467)
femmesh.addFace([256, 252, 260, 941, 951, 952], 468)
femmesh.addFace([251, 257, 260, 949, 953, 950], 469)
femmesh.addFace([254, 256, 242, 936, 954, 924], 470)
femmesh.addFace([246, 253, 259, 919, 955, 956], 471)
femmesh.addFace([255, 256, 260, 958, 952, 957], 472)
femmesh.addFace([256, 255, 242, 958, 938, 954], 473)
femmesh.addFace([240, 247, 258, 928, 959, 944], 474)
femmesh.addFace([243, 255, 260, 932, 957, 960], 475)
femmesh.addFace([243, 260, 257, 960, 953, 939], 476)
femmesh.addFace([258, 247, 246, 959, 901, 961], 477)
femmesh.addFace([250, 249, 261, 910, 962, 963], 478)
femmesh.addFace([257, 250, 261, 948, 963, 964], 479)
femmesh.addFace([249, 259, 261, 946, 965, 962], 480)
femmesh.addFace([253, 244, 261, 920, 966, 967], 481)
femmesh.addFace([258, 246, 259, 961, 956, 947], 482)
femmesh.addFace([261, 244, 257, 966, 940, 964], 483)
femmesh.addFace([261, 259, 253, 965, 955, 967], 484)
femmesh.addFace([83, 10, 87, 432, 437, 968], 485)
femmesh.addFace([84, 83, 262, 433, 969, 970], 486)
femmesh.addFace([85, 84, 263, 434, 971, 972], 487)
femmesh.addFace([84, 262, 263, 970, 973, 971], 488)
femmesh.addFace([86, 85, 264, 435, 974, 975], 489)
femmesh.addFace([85, 263, 264, 972, 976, 974], 490)
femmesh.addFace([9, 86, 265, 436, 977, 978], 491)
femmesh.addFace([96, 9, 265, 447, 978, 979], 492)
femmesh.addFace([86, 264, 265, 975, 980, 977], 493)
femmesh.addFace([87, 88, 266, 438, 981, 982], 494)
femmesh.addFace([89, 90, 267, 440, 983, 984], 495)
femmesh.addFace([91, 92, 268, 442, 985, 986], 496)
femmesh.addFace([93, 94, 269, 444, 987, 988], 497)
femmesh.addFace([95, 8, 61, 446, 407, 989], 498)
femmesh.addFace([97, 96, 270, 448, 990, 991], 499)
femmesh.addFace([96, 265, 270, 979, 992, 990], 500)
femmesh.addFace([98, 97, 271, 449, 993, 994], 501)
femmesh.addFace([97, 270, 271, 991, 995, 993], 502)
femmesh.addFace([99, 98, 272, 450, 996, 997], 503)
femmesh.addFace([98, 271, 272, 994, 998, 996], 504)
femmesh.addFace([100, 99, 273, 451, 999, 1000], 505)
femmesh.addFace([99, 272, 273, 997, 1001, 999], 506)
femmesh.addFace([101, 100, 274, 452, 1002, 1003], 507)
femmesh.addFace([100, 273, 274, 1000, 1004, 1002], 508)
femmesh.addFace([102, 101, 275, 453, 1005, 1006], 509)
femmesh.addFace([101, 274, 275, 1003, 1007, 1005], 510)
femmesh.addFace([103, 102, 276, 454, 1008, 1009], 511)
femmesh.addFace([102, 275, 276, 1006, 1010, 1008], 512)
femmesh.addFace([104, 103, 277, 455, 1011, 1012], 513)
femmesh.addFace([103, 276, 277, 1009, 1013, 1011], 514)
femmesh.addFace([7, 104, 64, 456, 1014, 411], 515)
femmesh.addFace([61, 62, 278, 408, 1015, 1016], 516)
femmesh.addFace([63, 64, 279, 410, 1017, 1018], 517)
femmesh.addFace([95, 61, 278, 989, 1016, 1019], 518)
femmesh.addFace([88, 89, 280, 439, 1020, 1021], 519)
femmesh.addFace([90, 91, 281, 441, 1022, 1023], 520)
femmesh.addFace([92, 93, 282, 443, 1024, 1025], 521)
femmesh.addFace([280, 89, 267, 1020, 984, 1026], 522)
femmesh.addFace([94, 95, 283, 445, 1027, 1028], 523)
femmesh.addFace([266, 88, 280, 981, 1021, 1029], 524)
femmesh.addFace([64, 104, 277, 1014, 1012, 1030], 525)
femmesh.addFace([62, 63, 284, 409, 1031, 1032], 526)
femmesh.addFace([64, 277, 279, 1030, 1033, 1017], 527)
femmesh.addFace([83, 87, 266, 968, 982, 1034], 528)
femmesh.addFace([268, 92, 282, 985, 1025, 1035], 529)
femmesh.addFace([269, 94, 283, 987, 1028, 1036], 530)
femmesh.addFace([281, 91, 268, 1022, 986, 1037], 531)
femmesh.addFace([282, 93, 269, 1024, 988, 1038], 532)
femmesh.addFace([62, 284, 278, 1032, 1039, 1015], 533)
femmesh.addFace([90, 281, 267, 1023, 1040, 983], 534)
femmesh.addFace([284, 63, 279, 1031, 1018, 1041], 535)
femmesh.addFace([262, 83, 266, 969, 1034, 1042], 536)
femmesh.addFace([264, 263, 285, 976, 1043, 1044], 537)
femmesh.addFace([265, 264, 286, 980, 1045, 1046], 538)
femmesh.addFace([270, 265, 286, 992, 1046, 1047], 539)
femmesh.addFace([264, 285, 286, 1044, 1048, 1045], 540)
femmesh.addFace([284, 279, 287, 1041, 1049, 1050], 541)
femmesh.addFace([268, 282, 288, 1035, 1051, 1052], 542)
femmesh.addFace([281, 268, 289, 1037, 1053, 1054], 543)
femmesh.addFace([268, 288, 289, 1052, 1055, 1053], 544)
femmesh.addFace([269, 283, 290, 1036, 1056, 1057], 545)
femmesh.addFace([280, 267, 291, 1026, 1058, 1059], 546)
femmesh.addFace([283, 95, 278, 1027, 1019, 1060], 547)
femmesh.addFace([266, 280, 292, 1029, 1061, 1062], 548)
femmesh.addFace([280, 291, 292, 1059, 1063, 1061], 549)
femmesh.addFace([271, 270, 293, 995, 1064, 1065], 550)
femmesh.addFace([270, 286, 293, 1047, 1066, 1064], 551)
femmesh.addFace([272, 271, 294, 998, 1067, 1068], 552)
femmesh.addFace([271, 293, 294, 1065, 1069, 1067], 553)
femmesh.addFace([273, 272, 295, 1001, 1070, 1071], 554)
femmesh.addFace([272, 294, 295, 1068, 1072, 1070], 555)
femmesh.addFace([274, 273, 296, 1004, 1073, 1074], 556)
femmesh.addFace([273, 295, 296, 1071, 1075, 1073], 557)
femmesh.addFace([275, 274, 297, 1007, 1076, 1077], 558)
femmesh.addFace([274, 296, 297, 1074, 1078, 1076], 559)
femmesh.addFace([267, 281, 298, 1040, 1079, 1080], 560)
femmesh.addFace([291, 267, 298, 1058, 1080, 1081], 561)
femmesh.addFace([281, 289, 298, 1054, 1082, 1079], 562)
femmesh.addFace([262, 266, 292, 1042, 1062, 1083], 563)
femmesh.addFace([279, 277, 299, 1033, 1084, 1085], 564)
femmesh.addFace([287, 279, 299, 1049, 1085, 1086], 565)
femmesh.addFace([277, 276, 299, 1013, 1087, 1084], 566)
femmesh.addFace([263, 262, 292, 973, 1083, 1088], 567)
femmesh.addFace([283, 278, 290, 1060, 1089, 1056], 568)
femmesh.addFace([285, 263, 292, 1043, 1088, 1090], 569)
femmesh.addFace([290, 278, 284, 1089, 1039, 1091], 570)
femmesh.addFace([282, 269, 300, 1038, 1092, 1093], 571)
femmesh.addFace([282, 300, 288, 1093, 1094, 1051], 572)
femmesh.addFace([276, 275, 301, 1010, 1095, 1096], 573)
femmesh.addFace([290, 284, 287, 1091, 1050, 1097], 574)
femmesh.addFace([299, 276, 301, 1087, 1096, 1098], 575)
femmesh.addFace([300, 269, 290, 1092, 1057, 1099], 576)
femmesh.addFace([290, 287, 300, 1097, 1100, 1099], 577)
femmesh.addFace([289, 288, 302, 1055, 1101, 1102], 578)
femmesh.addFace([275, 297, 301, 1077, 1103, 1095], 579)
femmesh.addFace([293, 286, 303, 1066, 1104, 1105], 580)
femmesh.addFace([286, 285, 303, 1048, 1106, 1104], 581)
femmesh.addFace([294, 293, 304, 1069, 1107, 1108], 582)
femmesh.addFace([293, 303, 304, 1105, 1109, 1107], 583)
femmesh.addFace([296, 295, 302, 1075, 1110, 1111], 584)
femmesh.addFace([297, 296, 305, 1078, 1112, 1113], 585)
femmesh.addFace([301, 297, 305, 1103, 1113, 1114], 586)
femmesh.addFace([296, 302, 305, 1111, 1115, 1112], 587)
femmesh.addFace([299, 301, 287, 1098, 1116, 1086], 588)
femmesh.addFace([291, 298, 304, 1081, 1117, 1118], 589)
femmesh.addFace([300, 301, 305, 1120, 1114, 1119], 590)
femmesh.addFace([301, 300, 287, 1120, 1100, 1116], 591)
femmesh.addFace([285, 292, 303, 1090, 1121, 1106], 592)
femmesh.addFace([288, 300, 305, 1094, 1119, 1122], 593)
femmesh.addFace([288, 305, 302, 1122, 1115, 1101], 594)
femmesh.addFace([303, 292, 291, 1121, 1063, 1123], 595)
femmesh.addFace([295, 294, 306, 1072, 1124, 1125], 596)
femmesh.addFace([302, 295, 306, 1110, 1125, 1126], 597)
femmesh.addFace([294, 304, 306, 1108, 1127, 1124], 598)
femmesh.addFace([298, 289, 306, 1082, 1128, 1129], 599)
femmesh.addFace([303, 291, 304, 1123, 1118, 1109], 600)
femmesh.addFace([306, 289, 302, 1128, 1102, 1126], 601)
femmesh.addFace([306, 304, 298, 1127, 1117, 1129], 602)
femmesh.addFace([105, 12, 109, 457, 462, 1130], 603)
femmesh.addFace([106, 105, 307, 458, 1131, 1132], 604)
femmesh.addFace([107, 106, 308, 459, 1133, 1134], 605)
femmesh.addFace([106, 307, 308, 1132, 1135, 1133], 606)
femmesh.addFace([108, 107, 309, 460, 1136, 1137], 607)
femmesh.addFace([107, 308, 309, 1134, 1138, 1136], 608)
femmesh.addFace([11, 108, 310, 461, 1139, 1140], 609)
femmesh.addFace([118, 11, 310, 472, 1140, 1141], 610)
femmesh.addFace([108, 309, 310, 1137, 1142, 1139], 611)
femmesh.addFace([109, 110, 311, 463, 1143, 1144], 612)
femmesh.addFace([111, 112, 312, 465, 1145, 1146], 613)
femmesh.addFace([113, 114, 313, 467, 1147, 1148], 614)
femmesh.addFace([115, 116, 314, 469, 1149, 1150], 615)
femmesh.addFace([117, 10, 83, 471, 432, 1151], 616)
femmesh.addFace([119, 118, 315, 473, 1152, 1153], 617)
femmesh.addFace([118, 310, 315, 1141, 1154, 1152], 618)
femmesh.addFace([120, 119, 316, 474, 1155, 1156], 619)
femmesh.addFace([119, 315, 316, 1153, 1157, 1155], 620)
femmesh.addFace([121, 120, 317, 475, 1158, 1159], 621)
femmesh.addFace([120, 316, 317, 1156, 1160, 1158], 622)
femmesh.addFace([122, 121, 318, 476, 1161, 1162], 623)
femmesh.addFace([121, 317, 318, 1159, 1163, 1161], 624)
femmesh.addFace([123, 122, 319, 477, 1164, 1165], 625)
femmesh.addFace([122, 318, 319, 1162, 1166, 1164], 626)
femmesh.addFace([124, 123, 320, 478, 1167, 1168], 627)
femmesh.addFace([123, 319, 320, 1165, 1169, 1167], 628)
femmesh.addFace([125, 124, 321, 479, 1170, 1171], 629)
femmesh.addFace([124, 320, 321, 1168, 1172, 1170], 630)
femmesh.addFace([126, 125, 322, 480, 1173, 1174], 631)
femmesh.addFace([125, 321, 322, 1171, 1175, 1173], 632)
femmesh.addFace([9, 126, 86, 481, 1176, 436], 633)
femmesh.addFace([83, 84, 323, 433, 1177, 1178], 634)
femmesh.addFace([85, 86, 324, 435, 1179, 1180], 635)
femmesh.addFace([117, 83, 323, 1151, 1178, 1181], 636)
femmesh.addFace([110, 111, 325, 464, 1182, 1183], 637)
femmesh.addFace([112, 113, 326, 466, 1184, 1185], 638)
femmesh.addFace([114, 115, 327, 468, 1186, 1187], 639)
femmesh.addFace([325, 111, 312, 1182, 1146, 1188], 640)
femmesh.addFace([116, 117, 328, 470, 1189, 1190], 641)
femmesh.addFace([311, 110, 325, 1143, 1183, 1191], 642)
femmesh.addFace([86, 126, 322, 1176, 1174, 1192], 643)
femmesh.addFace([84, 85, 329, 434, 1193, 1194], 644)
femmesh.addFace([86, 322, 324, 1192, 1195, 1179], 645)
femmesh.addFace([105, 109, 311, 1130, 1144, 1196], 646)
femmesh.addFace([313, 114, 327, 1147, 1187, 1197], 647)
femmesh.addFace([314, 116, 328, 1149, 1190, 1198], 648)
femmesh.addFace([326, 113, 313, 1184, 1148, 1199], 649)
femmesh.addFace([327, 115, 314, 1186, 1150, 1200], 650)
femmesh.addFace([84, 329, 323, 1194, 1201, 1177], 651)
femmesh.addFace([112, 326, 312, 1185, 1202, 1145], 652)
femmesh.addFace([329, 85, 324, 1193, 1180, 1203], 653)
femmesh.addFace([307, 105, 311, 1131, 1196, 1204], 654)
femmesh.addFace([309, 308, 330, 1138, 1205, 1206], 655)
femmesh.addFace([310, 309, 331, 1142, 1207, 1208], 656)
femmesh.addFace([315, 310, 331, 1154, 1208, 1209], 657)
femmesh.addFace([309, 330, 331, 1206, 1210, 1207], 658)
femmesh.addFace([329, 324, 332, 1203, 1211, 1212], 659)
femmesh.addFace([313, 327, 333, 1197, 1213, 1214], 660)
femmesh.addFace([326, 313, 334, 1199, 1215, 1216], 661)
femmesh.addFace([313, 333, 334, 1214, 1217, 1215], 662)
femmesh.addFace([314, 328, 335, 1198, 1218, 1219], 663)
femmesh.addFace([325, 312, 336, 1188, 1220, 1221], 664)
femmesh.addFace([328, 117, 323, 1189, 1181, 1222], 665)
femmesh.addFace([311, 325, 337, 1191, 1223, 1224], 666)
femmesh.addFace([325, 336, 337, 1221, 1225, 1223], 667)
femmesh.addFace([316, 315, 338, 1157, 1226, 1227], 668)
femmesh.addFace([315, 331, 338, 1209, 1228, 1226], 669)
femmesh.addFace([317, 316, 339, 1160, 1229, 1230], 670)
femmesh.addFace([316, 338, 339, 1227, 1231, 1229], 671)
femmesh.addFace([318, 317, 340, 1163, 1232, 1233], 672)
femmesh.addFace([317, 339, 340, 1230, 1234, 1232], 673)
femmesh.addFace([319, 318, 341, 1166, 1235, 1236], 674)
femmesh.addFace([318, 340, 341, 1233, 1237, 1235], 675)
femmesh.addFace([320, 319, 342, 1169, 1238, 1239], 676)
femmesh.addFace([319, 341, 342, 1236, 1240, 1238], 677)
femmesh.addFace([312, 326, 343, 1202, 1241, 1242], 678)
femmesh.addFace([336, 312, 343, 1220, 1242, 1243], 679)
femmesh.addFace([326, 334, 343, 1216, 1244, 1241], 680)
femmesh.addFace([307, 311, 337, 1204, 1224, 1245], 681)
femmesh.addFace([324, 322, 344, 1195, 1246, 1247], 682)
femmesh.addFace([332, 324, 344, 1211, 1247, 1248], 683)
femmesh.addFace([322, 321, 344, 1175, 1249, 1246], 684)
femmesh.addFace([308, 307, 337, 1135, 1245, 1250], 685)
femmesh.addFace([328, 323, 335, 1222, 1251, 1218], 686)
femmesh.addFace([330, 308, 337, 1205, 1250, 1252], 687)
femmesh.addFace([335, 323, 329, 1251, 1201, 1253], 688)
femmesh.addFace([327, 314, 345, 1200, 1254, 1255], 689)
femmesh.addFace([327, 345, 333, 1255, 1256, 1213], 690)
femmesh.addFace([321, 320, 346, 1172, 1257, 1258], 691)
femmesh.addFace([335, 329, 332, 1253, 1212, 1259], 692)
femmesh.addFace([344, 321, 346, 1249, 1258, 1260], 693)
femmesh.addFace([345, 314, 335, 1254, 1219, 1261], 694)
femmesh.addFace([335, 332, 345, 1259, 1262, 1261], 695)
femmesh.addFace([334, 333, 347, 1217, 1263, 1264], 696)
femmesh.addFace([320, 342, 346, 1239, 1265, 1257], 697)
femmesh.addFace([338, 331, 348, 1228, 1266, 1267], 698)
femmesh.addFace([331, 330, 348, 1210, 1268, 1266], 699)
femmesh.addFace([339, 338, 349, 1231, 1269, 1270], 700)
femmesh.addFace([338, 348, 349, 1267, 1271, 1269], 701)
femmesh.addFace([341, 340, 347, 1237, 1272, 1273], 702)
femmesh.addFace([342, 341, 350, 1240, 1274, 1275], 703)
femmesh.addFace([346, 342, 350, 1265, 1275, 1276], 704)
femmesh.addFace([341, 347, 350, 1273, 1277, 1274], 705)
femmesh.addFace([344, 346, 332, 1260, 1278, 1248], 706)
femmesh.addFace([336, 343, 349, 1243, 1279, 1280], 707)
femmesh.addFace([345, 346, 350, 1282, 1276, 1281], 708)
femmesh.addFace([346, 345, 332, 1282, 1262, 1278], 709)
femmesh.addFace([330, 337, 348, 1252, 1283, 1268], 710)
femmesh.addFace([333, 345, 350, 1256, 1281, 1284], 711)
femmesh.addFace([333, 350, 347, 1284, 1277, 1263], 712)
femmesh.addFace([348, 337, 336, 1283, 1225, 1285], 713)
femmesh.addFace([340, 339, 351, 1234, 1286, 1287], 714)
femmesh.addFace([347, 340, 351, 1272, 1287, 1288], 715)
femmesh.addFace([339, 349, 351, 1270, 1289, 1286], 716)
femmesh.addFace([343, 334, 351, 1244, 1290, 1291], 717)
femmesh.addFace([348, 336, 349, 1285, 1280, 1271], 718)
femmesh.addFace([351, 334, 347, 1290, 1264, 1288], 719)
femmesh.addFace([351, 349, 343, 1289, 1279, 1291], 720)
return True
|
AddonManager | AddonManager | #!/usr/bin/env python3
# SPDX-License-Identifier: LGPL-2.1-or-later
# ***************************************************************************
# * *
# * Copyright (c) 2022-2023 FreeCAD Project Association *
# * Copyright (c) 2015 Yorik van Havre <yorik@uncreated.net> *
# * *
# * This file is part of FreeCAD. *
# * *
# * FreeCAD is free software: you can redistribute it and/or modify it *
# * under the terms of the GNU Lesser General Public License as *
# * published by the Free Software Foundation, either version 2.1 of the *
# * License, or (at your option) any later version. *
# * *
# * FreeCAD is distributed in the hope that it will be useful, but *
# * WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU *
# * Lesser General Public License for more details. *
# * *
# * You should have received a copy of the GNU Lesser General Public *
# * License along with FreeCAD. If not, see *
# * <https://www.gnu.org/licenses/>. *
# * *
# ***************************************************************************
import functools
import json
import os
import tempfile
import threading
from datetime import date
from typing import Dict
import AddonManager_rc # This is required by Qt, it's not unused
import addonmanager_utilities as utils
import FreeCAD
import FreeCADGui
import NetworkManager
from Addon import Addon
from addonmanager_cache import local_cache_needs_update
from addonmanager_connection_checker import ConnectionCheckerGUI
from addonmanager_devmode import DeveloperMode
from addonmanager_devmode_metadata_checker import MetadataValidators
from addonmanager_firstrun import FirstRunDialog
from addonmanager_installer_gui import AddonInstallerGUI, MacroInstallerGUI
from addonmanager_uninstaller_gui import AddonUninstallerGUI
from addonmanager_update_all_gui import UpdateAllGUI
from addonmanager_workers_installation import UpdateMetadataCacheWorker
from addonmanager_workers_startup import (
CacheMacroCodeWorker,
CheckWorkbenchesForUpdatesWorker,
CreateAddonListWorker,
LoadMacrosFromCacheWorker,
LoadPackagesFromCacheWorker,
)
from AddonManagerOptions import AddonManagerOptions
from manage_python_dependencies import PythonPackageManager
from package_details import PackageDetails
from package_list import PackageList, PackageListItemModel
from PySide import QtCore, QtGui, QtWidgets
translate = FreeCAD.Qt.translate
def QT_TRANSLATE_NOOP(_, txt):
return txt
__title__ = "FreeCAD Addon Manager Module"
__author__ = "Yorik van Havre", "Jonathan Wiedemann", "Kurt Kremitzki", "Chris Hennes"
__url__ = "http://www.freecad.org"
"""
FreeCAD Addon Manager Module
Fetches various types of addons from a variety of sources. Built-in sources are:
* https://github.com/FreeCAD/FreeCAD-addons
* https://github.com/FreeCAD/FreeCAD-macros
* https://wiki.freecad.org/
Additional git sources may be configure via user preferences.
You need a working internet connection, and optionally git -- if git is not available, ZIP archives
are downloaded instead.
"""
# \defgroup ADDONMANAGER AddonManager
# \ingroup ADDONMANAGER
# \brief The Addon Manager allows users to install workbenches and macros made by other users
# @{
INSTANCE = None
class CommandAddonManager:
"""The main Addon Manager class and FreeCAD command"""
workers = [
"create_addon_list_worker",
"check_worker",
"show_worker",
"showmacro_worker",
"macro_worker",
"update_metadata_cache_worker",
"load_macro_metadata_worker",
"update_all_worker",
"check_for_python_package_updates_worker",
]
lock = threading.Lock()
restart_required = False
def __init__(self):
QT_TRANSLATE_NOOP("QObject", "Addon Manager")
FreeCADGui.addPreferencePage(
AddonManagerOptions,
"Addon Manager",
)
self.check_worker = None
self.check_for_python_package_updates_worker = None
self.update_all_worker = None
self.developer_mode = None
self.installer_gui = None
self.update_cache = False
self.dialog = None
self.startup_sequence = []
# Set up the connection checker
self.connection_checker = ConnectionCheckerGUI()
self.connection_checker.connection_available.connect(self.launch)
# Give other parts of the AM access to the current instance
global INSTANCE
INSTANCE = self
def GetResources(self) -> Dict[str, str]:
"""FreeCAD-required function: get the core resource information for this Mod."""
return {
"Pixmap": "AddonManager",
"MenuText": QT_TRANSLATE_NOOP("Std_AddonMgr", "&Addon manager"),
"ToolTip": QT_TRANSLATE_NOOP(
"Std_AddonMgr",
"Manage external workbenches, macros, and preference packs",
),
"Group": "Tools",
}
def Activated(self) -> None:
"""FreeCAD-required function: called when the command is activated."""
NetworkManager.InitializeNetworkManager()
firstRunDialog = FirstRunDialog()
if not firstRunDialog.exec():
return
self.connection_checker.start()
def launch(self) -> None:
"""Shows the Addon Manager UI"""
# create the dialog
self.dialog = FreeCADGui.PySideUic.loadUi(
os.path.join(os.path.dirname(__file__), "AddonManager.ui")
)
self.dialog.setObjectName("AddonManager_Main_Window")
# self.dialog.setWindowFlag(QtCore.Qt.WindowStaysOnTopHint, True)
# cleanup the leftovers from previous runs
self.macro_repo_dir = FreeCAD.getUserMacroDir(True)
self.packages_with_updates = set()
self.startup_sequence = []
self.cleanup_workers()
self.update_cache = local_cache_needs_update()
# restore window geometry from stored state
pref = FreeCAD.ParamGet("User parameter:BaseApp/Preferences/Addons")
w = pref.GetInt("WindowWidth", 800)
h = pref.GetInt("WindowHeight", 600)
self.dialog.resize(w, h)
# If we are checking for updates automatically, hide the Check for updates button:
autocheck = pref.GetBool("AutoCheck", False)
if autocheck:
self.dialog.buttonCheckForUpdates.hide()
else:
self.dialog.buttonUpdateAll.hide()
# Set up the listing of packages using the model-view-controller architecture
self.packageList = PackageList(self.dialog)
self.item_model = PackageListItemModel()
self.packageList.setModel(self.item_model)
self.dialog.contentPlaceholder.hide()
self.dialog.layout().replaceWidget(
self.dialog.contentPlaceholder, self.packageList
)
self.packageList.show()
# Package details start out hidden
self.packageDetails = PackageDetails(self.dialog)
self.packageDetails.hide()
index = self.dialog.layout().indexOf(self.packageList)
self.dialog.layout().insertWidget(index, self.packageDetails)
# set nice icons to everything, by theme with fallback to FreeCAD icons
self.dialog.setWindowIcon(QtGui.QIcon(":/icons/AddonManager.svg"))
self.dialog.buttonUpdateAll.setIcon(QtGui.QIcon(":/icons/button_valid.svg"))
self.dialog.buttonCheckForUpdates.setIcon(
QtGui.QIcon(":/icons/view-refresh.svg")
)
self.dialog.buttonClose.setIcon(
QtGui.QIcon.fromTheme("close", QtGui.QIcon(":/icons/process-stop.svg"))
)
self.dialog.buttonPauseUpdate.setIcon(
QtGui.QIcon.fromTheme(
"pause", QtGui.QIcon(":/icons/media-playback-stop.svg")
)
)
pref = FreeCAD.ParamGet("User parameter:BaseApp/Preferences/Addons")
dev_mode_active = pref.GetBool("developerMode", False)
# enable/disable stuff
self.dialog.buttonUpdateAll.setEnabled(False)
self.hide_progress_widgets()
self.dialog.buttonUpdateCache.setEnabled(False)
self.dialog.buttonUpdateCache.setText(
translate("AddonsInstaller", "Starting up...")
)
if dev_mode_active:
self.dialog.buttonDevTools.show()
else:
self.dialog.buttonDevTools.hide()
# connect slots
self.dialog.rejected.connect(self.reject)
self.dialog.buttonUpdateAll.clicked.connect(self.update_all)
self.dialog.buttonClose.clicked.connect(self.dialog.reject)
self.dialog.buttonUpdateCache.clicked.connect(self.on_buttonUpdateCache_clicked)
self.dialog.buttonPauseUpdate.clicked.connect(self.stop_update)
self.dialog.buttonCheckForUpdates.clicked.connect(
lambda: self.force_check_updates(standalone=True)
)
self.dialog.buttonUpdateDependencies.clicked.connect(
self.show_python_updates_dialog
)
self.dialog.buttonDevTools.clicked.connect(self.show_developer_tools)
self.packageList.itemSelected.connect(self.table_row_activated)
self.packageList.setEnabled(False)
self.packageDetails.execute.connect(self.executemacro)
self.packageDetails.install.connect(self.launch_installer_gui)
self.packageDetails.uninstall.connect(self.remove)
self.packageDetails.update.connect(self.update)
self.packageDetails.back.connect(self.on_buttonBack_clicked)
self.packageDetails.update_status.connect(self.status_updated)
# center the dialog over the FreeCAD window
mw = FreeCADGui.getMainWindow()
self.dialog.move(
mw.frameGeometry().topLeft()
+ mw.rect().center()
- self.dialog.rect().center()
)
# set info for the progress bar:
self.dialog.progressBar.setMaximum(1000)
# begin populating the table in a set of sub-threads
self.startup()
# set the label text to start with
self.show_information(translate("AddonsInstaller", "Loading addon information"))
# rock 'n roll!!!
self.dialog.exec()
def cleanup_workers(self) -> None:
"""Ensure that no workers are running by explicitly asking them to stop and waiting for
them until they do"""
for worker in self.workers:
if hasattr(self, worker):
thread = getattr(self, worker)
if thread:
if not thread.isFinished():
thread.blockSignals(True)
thread.requestInterruption()
for worker in self.workers:
if hasattr(self, worker):
thread = getattr(self, worker)
if thread:
if not thread.isFinished():
finished = thread.wait(500)
if not finished:
FreeCAD.Console.PrintWarning(
translate(
"AddonsInstaller",
"Worker process {} is taking a long time to stop...",
).format(worker)
+ "\n"
)
def reject(self) -> None:
"""called when the window has been closed"""
# save window geometry for next use
pref = FreeCAD.ParamGet("User parameter:BaseApp/Preferences/Addons")
pref.SetInt("WindowWidth", self.dialog.width())
pref.SetInt("WindowHeight", self.dialog.height())
# ensure all threads are finished before closing
oktoclose = True
worker_killed = False
self.startup_sequence = []
for worker in self.workers:
if hasattr(self, worker):
thread = getattr(self, worker)
if thread:
if not thread.isFinished():
thread.blockSignals(True)
thread.requestInterruption()
worker_killed = True
oktoclose = False
while not oktoclose:
oktoclose = True
for worker in self.workers:
if hasattr(self, worker):
thread = getattr(self, worker)
if thread:
thread.wait(25)
if not thread.isFinished():
oktoclose = False
QtCore.QCoreApplication.processEvents(QtCore.QEventLoop.AllEvents)
# Write the cache data if it's safe to do so:
if not worker_killed:
for repo in self.item_model.repos:
if repo.repo_type == Addon.Kind.MACRO:
self.cache_macro(repo)
else:
self.cache_package(repo)
self.write_package_cache()
self.write_macro_cache()
else:
self.write_cache_stopfile()
FreeCAD.Console.PrintLog(
"Not writing the cache because a process was forcibly terminated and the state is "
"unknown.\n"
)
if self.restart_required:
# display restart dialog
m = QtWidgets.QMessageBox()
m.setWindowTitle(translate("AddonsInstaller", "Addon manager"))
m.setWindowIcon(QtGui.QIcon(":/icons/AddonManager.svg"))
m.setText(
translate(
"AddonsInstaller",
"You must restart FreeCAD for changes to take effect.",
)
)
m.setIcon(m.Warning)
m.setStandardButtons(m.Ok | m.Cancel)
m.setDefaultButton(m.Cancel)
okBtn = m.button(QtWidgets.QMessageBox.StandardButton.Ok)
cancelBtn = m.button(QtWidgets.QMessageBox.StandardButton.Cancel)
okBtn.setText(translate("AddonsInstaller", "Restart now"))
cancelBtn.setText(translate("AddonsInstaller", "Restart later"))
ret = m.exec_()
if ret == m.Ok:
# restart FreeCAD after a delay to give time to this dialog to close
QtCore.QTimer.singleShot(1000, utils.restart_freecad)
def startup(self) -> None:
"""Downloads the available packages listings and populates the table
This proceeds in four stages: first, the main GitHub repository is queried for a list of
possible addons. Each addon is specified as a git submodule with name and branch
information. The actual specific commit ID of the submodule (as listed on Github) is
ignored. Any extra repositories specified by the user are appended to this list.
Second, the list of macros is downloaded from the FreeCAD/FreeCAD-macros repository and
the wiki.
Third, each of these items is queried for a package.xml metadata file. If that file exists
it is downloaded, cached, and any icons that it references are also downloaded and cached.
Finally, for workbenches that are not contained within a package (e.g. they provide no
metadata), an additional git query is made to see if an update is available. Macros are
checked for file changes.
Each of these stages is launched in a separate thread to ensure that the UI remains
responsive, and the operation can be cancelled.
Each stage is also subject to caching, so may return immediately, if no cache update has
been requested."""
# Each function in this list is expected to launch a thread and connect its completion
# signal to self.do_next_startup_phase, or to shortcut to calling
# self.do_next_startup_phase if it is not launching a worker
self.startup_sequence = [
self.populate_packages_table,
self.activate_table_widgets,
self.populate_macros,
self.update_metadata_cache,
self.check_updates,
self.check_python_updates,
]
pref = FreeCAD.ParamGet("User parameter:BaseApp/Preferences/Addons")
if pref.GetBool("DownloadMacros", False):
self.startup_sequence.append(self.load_macro_metadata)
selection = pref.GetString("SelectedAddon", "")
if selection:
self.startup_sequence.insert(
2, functools.partial(self.select_addon, selection)
)
pref.SetString("SelectedAddon", "")
self.current_progress_region = 0
self.number_of_progress_regions = len(self.startup_sequence)
self.do_next_startup_phase()
def do_next_startup_phase(self) -> None:
"""Pop the top item in self.startup_sequence off the list and run it"""
if len(self.startup_sequence) > 0:
phase_runner = self.startup_sequence.pop(0)
self.current_progress_region += 1
phase_runner()
else:
self.hide_progress_widgets()
self.update_cache = False
self.dialog.buttonUpdateCache.setEnabled(True)
self.dialog.buttonUpdateCache.setText(
translate("AddonsInstaller", "Refresh local cache")
)
pref = FreeCAD.ParamGet("User parameter:BaseApp/Preferences/Addons")
pref.SetString("LastCacheUpdate", date.today().isoformat())
self.packageList.item_filter.invalidateFilter()
def populate_packages_table(self) -> None:
self.item_model.clear()
use_cache = not self.update_cache
if use_cache:
if os.path.isfile(utils.get_cache_file_name("package_cache.json")):
with open(
utils.get_cache_file_name("package_cache.json"), encoding="utf-8"
) as f:
data = f.read()
try:
from_json = json.loads(data)
if len(from_json) == 0:
use_cache = False
except json.JSONDecodeError:
use_cache = False
else:
use_cache = False
if not use_cache:
self.update_cache = (
True # Make sure to trigger the other cache updates, if the json
)
# file was missing
self.create_addon_list_worker = CreateAddonListWorker()
self.create_addon_list_worker.status_message.connect(self.show_information)
self.create_addon_list_worker.addon_repo.connect(self.add_addon_repo)
self.update_progress_bar(10, 100)
self.create_addon_list_worker.finished.connect(
self.do_next_startup_phase
) # Link to step 2
self.create_addon_list_worker.start()
else:
self.create_addon_list_worker = LoadPackagesFromCacheWorker(
utils.get_cache_file_name("package_cache.json")
)
self.create_addon_list_worker.addon_repo.connect(self.add_addon_repo)
self.update_progress_bar(10, 100)
self.create_addon_list_worker.finished.connect(
self.do_next_startup_phase
) # Link to step 2
self.create_addon_list_worker.start()
def cache_package(self, repo: Addon):
if not hasattr(self, "package_cache"):
self.package_cache = {}
self.package_cache[repo.name] = repo.to_cache()
def write_package_cache(self):
if hasattr(self, "package_cache"):
package_cache_path = utils.get_cache_file_name("package_cache.json")
with open(package_cache_path, "w", encoding="utf-8") as f:
f.write(json.dumps(self.package_cache, indent=" "))
def activate_table_widgets(self) -> None:
self.packageList.setEnabled(True)
self.packageList.ui.lineEditFilter.setFocus()
self.do_next_startup_phase()
def populate_macros(self) -> None:
macro_cache_file = utils.get_cache_file_name("macro_cache.json")
cache_is_bad = True
if os.path.isfile(macro_cache_file):
size = os.path.getsize(macro_cache_file)
if size > 1000: # Make sure there is actually data in there
cache_is_bad = False
if cache_is_bad:
if not self.update_cache:
self.update_cache = (
True # Make sure to trigger the other cache updates, if the
)
# json file was missing
self.create_addon_list_worker = CreateAddonListWorker()
self.create_addon_list_worker.status_message.connect(
self.show_information
)
self.create_addon_list_worker.addon_repo.connect(self.add_addon_repo)
self.update_progress_bar(10, 100)
self.create_addon_list_worker.finished.connect(
self.do_next_startup_phase
) # Link to step 2
self.create_addon_list_worker.start()
else:
# It's already been done in the previous step (TODO: Refactor to eliminate this
# step)
self.do_next_startup_phase()
else:
self.macro_worker = LoadMacrosFromCacheWorker(
utils.get_cache_file_name("macro_cache.json")
)
self.macro_worker.add_macro_signal.connect(self.add_addon_repo)
self.macro_worker.finished.connect(self.do_next_startup_phase)
self.macro_worker.start()
def cache_macro(self, repo: Addon):
if not hasattr(self, "macro_cache"):
self.macro_cache = []
if repo.macro is not None:
self.macro_cache.append(repo.macro.to_cache())
else:
FreeCAD.Console.PrintError(
f"Addon Manager: Internal error, cache_macro called on non-macro {repo.name}\n"
)
def write_macro_cache(self):
if not hasattr(self, "macro_cache"):
return
macro_cache_path = utils.get_cache_file_name("macro_cache.json")
with open(macro_cache_path, "w", encoding="utf-8") as f:
f.write(json.dumps(self.macro_cache, indent=" "))
self.macro_cache = []
def update_metadata_cache(self) -> None:
if self.update_cache:
self.update_metadata_cache_worker = UpdateMetadataCacheWorker(
self.item_model.repos
)
self.update_metadata_cache_worker.status_message.connect(
self.show_information
)
self.update_metadata_cache_worker.finished.connect(
self.do_next_startup_phase
) # Link to step 4
self.update_metadata_cache_worker.progress_made.connect(
self.update_progress_bar
)
self.update_metadata_cache_worker.package_updated.connect(
self.on_package_updated
)
self.update_metadata_cache_worker.start()
else:
self.do_next_startup_phase()
def on_buttonUpdateCache_clicked(self) -> None:
self.update_cache = True
cache_path = FreeCAD.getUserCachePath()
am_path = os.path.join(cache_path, "AddonManager")
utils.rmdir(am_path)
self.dialog.buttonUpdateCache.setEnabled(False)
self.dialog.buttonUpdateCache.setText(
translate("AddonsInstaller", "Updating cache...")
)
self.startup()
# Recaching implies checking for updates, regardless of the user's autocheck option
self.startup_sequence.remove(self.check_updates)
self.startup_sequence.append(self.force_check_updates)
def on_package_updated(self, repo: Addon) -> None:
"""Called when the named package has either new metadata or a new icon (or both)"""
with self.lock:
repo.icon = self.get_icon(repo, update=True)
self.item_model.reload_item(repo)
def load_macro_metadata(self) -> None:
if self.update_cache:
self.load_macro_metadata_worker = CacheMacroCodeWorker(
self.item_model.repos
)
self.load_macro_metadata_worker.status_message.connect(
self.show_information
)
self.load_macro_metadata_worker.update_macro.connect(
self.on_package_updated
)
self.load_macro_metadata_worker.progress_made.connect(
self.update_progress_bar
)
self.load_macro_metadata_worker.finished.connect(self.do_next_startup_phase)
self.load_macro_metadata_worker.start()
else:
self.do_next_startup_phase()
def select_addon(self, name: str) -> None:
found = False
for addon in self.item_model.repos:
if addon.name == name:
self.table_row_activated(addon)
found = True
break
if not found:
FreeCAD.Console.PrintWarning(
translate(
"AddonsInstaller", "Could not find addon '{}' to select\n"
).format(name)
)
self.do_next_startup_phase()
def check_updates(self) -> None:
"checks every installed addon for available updates"
pref = FreeCAD.ParamGet("User parameter:BaseApp/Preferences/Addons")
autocheck = pref.GetBool("AutoCheck", False)
if not autocheck:
FreeCAD.Console.PrintLog(
"Addon Manager: Skipping update check because AutoCheck user preference is False\n"
)
self.do_next_startup_phase()
return
if not self.packages_with_updates:
self.force_check_updates(standalone=False)
else:
self.do_next_startup_phase()
def force_check_updates(self, standalone=False) -> None:
if hasattr(self, "check_worker"):
thread = self.check_worker
if thread:
if not thread.isFinished():
self.do_next_startup_phase()
return
self.dialog.buttonUpdateAll.setText(
translate("AddonsInstaller", "Checking for updates...")
)
self.packages_with_updates.clear()
self.dialog.buttonUpdateAll.show()
self.dialog.buttonCheckForUpdates.setDisabled(True)
self.check_worker = CheckWorkbenchesForUpdatesWorker(self.item_model.repos)
self.check_worker.finished.connect(self.do_next_startup_phase)
self.check_worker.finished.connect(self.update_check_complete)
self.check_worker.progress_made.connect(self.update_progress_bar)
if standalone:
self.current_progress_region = 1
self.number_of_progress_regions = 1
self.check_worker.update_status.connect(self.status_updated)
self.check_worker.start()
self.enable_updates(len(self.packages_with_updates))
def status_updated(self, repo: Addon) -> None:
self.item_model.reload_item(repo)
if repo.status() == Addon.Status.UPDATE_AVAILABLE:
self.packages_with_updates.add(repo)
self.enable_updates(len(self.packages_with_updates))
elif repo.status() == Addon.Status.PENDING_RESTART:
self.restart_required = True
def enable_updates(self, number_of_updates: int) -> None:
"""enables the update button"""
if number_of_updates:
s = translate(
"AddonsInstaller", "Apply {} update(s)", "", number_of_updates
)
self.dialog.buttonUpdateAll.setText(s.format(number_of_updates))
self.dialog.buttonUpdateAll.setEnabled(True)
elif hasattr(self, "check_worker") and self.check_worker.isRunning():
self.dialog.buttonUpdateAll.setText(
translate("AddonsInstaller", "Checking for updates...")
)
else:
self.dialog.buttonUpdateAll.setText(
translate("AddonsInstaller", "No updates available")
)
self.dialog.buttonUpdateAll.setEnabled(False)
def update_check_complete(self) -> None:
self.enable_updates(len(self.packages_with_updates))
self.dialog.buttonCheckForUpdates.setEnabled(True)
def check_python_updates(self) -> None:
PythonPackageManager.migrate_old_am_installations() # Migrate 0.20 to 0.21
self.do_next_startup_phase()
def show_python_updates_dialog(self) -> None:
if not hasattr(self, "manage_python_packages_dialog"):
self.manage_python_packages_dialog = PythonPackageManager(
self.item_model.repos
)
self.manage_python_packages_dialog.show()
def show_developer_tools(self) -> None:
"""Display the developer tools dialog"""
if not self.developer_mode:
self.developer_mode = DeveloperMode()
self.developer_mode.show()
checker = MetadataValidators()
checker.validate_all(self.item_model.repos)
def add_addon_repo(self, addon_repo: Addon) -> None:
"""adds a workbench to the list"""
if addon_repo.icon is None or addon_repo.icon.isNull():
addon_repo.icon = self.get_icon(addon_repo)
for repo in self.item_model.repos:
if repo.name == addon_repo.name:
# self.item_model.reload_item(repo) # If we want to have later additions superseded
# earlier
return
self.item_model.append_item(addon_repo)
def get_icon(self, repo: Addon, update: bool = False) -> QtGui.QIcon:
"""Returns an icon for an Addon. Uses a cached icon if possible, unless update is True,
in which case the icon is regenerated."""
if not update and repo.icon and not repo.icon.isNull() and repo.icon.isValid():
return repo.icon
path = ":/icons/" + repo.name.replace(" ", "_")
if repo.repo_type == Addon.Kind.WORKBENCH:
path += "_workbench_icon.svg"
default_icon = QtGui.QIcon(":/icons/document-package.svg")
elif repo.repo_type == Addon.Kind.MACRO:
if repo.macro and repo.macro.icon:
if os.path.isabs(repo.macro.icon):
path = repo.macro.icon
default_icon = QtGui.QIcon(":/icons/document-python.svg")
else:
path = os.path.join(
os.path.dirname(repo.macro.src_filename), repo.macro.icon
)
default_icon = QtGui.QIcon(":/icons/document-python.svg")
elif repo.macro and repo.macro.xpm:
cache_path = FreeCAD.getUserCachePath()
am_path = os.path.join(cache_path, "AddonManager", "MacroIcons")
os.makedirs(am_path, exist_ok=True)
path = os.path.join(am_path, repo.name + "_icon.xpm")
if not os.path.exists(path):
with open(path, "w") as f:
f.write(repo.macro.xpm)
default_icon = QtGui.QIcon(repo.macro.xpm)
else:
path += "_macro_icon.svg"
default_icon = QtGui.QIcon(":/icons/document-python.svg")
elif repo.repo_type == Addon.Kind.PACKAGE:
# The cache might not have been downloaded yet, check to see if it's there...
if os.path.isfile(repo.get_cached_icon_filename()):
path = repo.get_cached_icon_filename()
elif repo.contains_workbench():
path += "_workbench_icon.svg"
default_icon = QtGui.QIcon(":/icons/document-package.svg")
elif repo.contains_macro():
path += "_macro_icon.svg"
default_icon = QtGui.QIcon(":/icons/document-python.svg")
else:
default_icon = QtGui.QIcon(":/icons/document-package.svg")
if QtCore.QFile.exists(path):
addonicon = QtGui.QIcon(path)
else:
addonicon = default_icon
repo.icon = addonicon
return addonicon
def table_row_activated(self, selected_repo: Addon) -> None:
"""a row was activated, show the relevant data"""
self.packageList.hide()
self.packageDetails.show()
self.packageDetails.show_repo(selected_repo)
def show_information(self, message: str) -> None:
"""shows generic text in the information pane"""
self.dialog.labelStatusInfo.setText(message)
self.dialog.labelStatusInfo.repaint()
def show_workbench(self, repo: Addon) -> None:
self.packageList.hide()
self.packageDetails.show()
self.packageDetails.show_repo(repo)
def on_buttonBack_clicked(self) -> None:
self.packageDetails.hide()
self.packageList.show()
def append_to_repos_list(self, repo: Addon) -> None:
"""this function allows threads to update the main list of workbenches"""
self.item_model.append_item(repo)
def update(self, repo: Addon) -> None:
self.launch_installer_gui(repo)
def mark_repo_update_available(self, repo: Addon, available: bool) -> None:
if available:
repo.set_status(Addon.Status.UPDATE_AVAILABLE)
else:
repo.set_status(Addon.Status.NO_UPDATE_AVAILABLE)
self.item_model.reload_item(repo)
self.packageDetails.show_repo(repo)
def launch_installer_gui(self, addon: Addon) -> None:
if self.installer_gui is not None:
FreeCAD.Console.PrintError(
translate(
"AddonsInstaller",
"Cannot launch a new installer until the previous one has finished.",
)
)
return
if addon.macro is not None:
self.installer_gui = MacroInstallerGUI(addon)
else:
self.installer_gui = AddonInstallerGUI(addon, self.item_model.repos)
self.installer_gui.success.connect(self.on_package_status_changed)
self.installer_gui.finished.connect(self.cleanup_installer)
self.installer_gui.run() # Does not block
def cleanup_installer(self) -> None:
QtCore.QTimer.singleShot(500, self.no_really_clean_up_the_installer)
def no_really_clean_up_the_installer(self) -> None:
self.installer_gui = None
def update_all(self) -> None:
"""Asynchronously apply all available updates: individual failures are noted, but do not
stop other updates"""
if self.installer_gui is not None:
FreeCAD.Console.PrintError(
translate(
"AddonsInstaller",
"Cannot launch a new installer until the previous one has finished.",
)
)
return
self.installer_gui = UpdateAllGUI(self.item_model.repos)
self.installer_gui.addon_updated.connect(self.on_package_status_changed)
self.installer_gui.finished.connect(self.cleanup_installer)
self.installer_gui.run() # Does not block
def hide_progress_widgets(self) -> None:
"""hides the progress bar and related widgets"""
self.dialog.labelStatusInfo.hide()
self.dialog.progressBar.hide()
self.dialog.buttonPauseUpdate.hide()
self.packageList.ui.lineEditFilter.setFocus()
def show_progress_widgets(self) -> None:
if self.dialog.progressBar.isHidden():
self.dialog.progressBar.show()
self.dialog.buttonPauseUpdate.show()
self.dialog.labelStatusInfo.show()
def update_progress_bar(self, current_value: int, max_value: int) -> None:
"""Update the progress bar, showing it if it's hidden"""
max_value = max_value if max_value > 0 else 1
if current_value < 0:
current_value = 0
elif current_value > max_value:
current_value = max_value
self.show_progress_widgets()
region_size = 100.0 / self.number_of_progress_regions
completed_region_portion = (self.current_progress_region - 1) * region_size
current_region_portion = (float(current_value) / float(max_value)) * region_size
value = completed_region_portion + current_region_portion
self.dialog.progressBar.setValue(
value * 10
) # Out of 1000 segments, so it moves sort of smoothly
self.dialog.progressBar.repaint()
def stop_update(self) -> None:
self.cleanup_workers()
self.hide_progress_widgets()
self.write_cache_stopfile()
self.dialog.buttonUpdateCache.setEnabled(True)
self.dialog.buttonUpdateCache.setText(
translate("AddonsInstaller", "Refresh local cache")
)
def write_cache_stopfile(self) -> None:
stopfile = utils.get_cache_file_name("CACHE_UPDATE_INTERRUPTED")
with open(stopfile, "w", encoding="utf8") as f:
f.write(
"This file indicates that a cache operation was interrupted, and "
"the cache is in an unknown state. It will be deleted next time "
"AddonManager recaches."
)
def on_package_status_changed(self, repo: Addon) -> None:
if repo.status() == Addon.Status.PENDING_RESTART:
self.restart_required = True
self.item_model.reload_item(repo)
self.packageDetails.show_repo(repo)
if repo in self.packages_with_updates:
self.packages_with_updates.remove(repo)
self.enable_updates(len(self.packages_with_updates))
def executemacro(self, repo: Addon) -> None:
"""executes a selected macro"""
macro = repo.macro
if not macro or not macro.code:
return
if macro.is_installed():
macro_path = os.path.join(self.macro_repo_dir, macro.filename)
FreeCADGui.open(str(macro_path))
self.dialog.hide()
FreeCADGui.SendMsgToActiveView("Run")
else:
with tempfile.TemporaryDirectory() as dir:
temp_install_succeeded = macro.install(dir)
if not temp_install_succeeded:
message = translate(
"AddonsInstaller",
"Execution of macro failed. See console for failure details.",
)
return
macro_path = os.path.join(dir, macro.filename)
FreeCADGui.open(str(macro_path))
self.dialog.hide()
FreeCADGui.SendMsgToActiveView("Run")
def remove(self, addon: Addon) -> None:
"""Remove this addon."""
if self.installer_gui is not None:
FreeCAD.Console.PrintError(
translate(
"AddonsInstaller",
"Cannot launch a new installer until the previous one has finished.",
)
)
return
self.installer_gui = AddonUninstallerGUI(addon)
self.installer_gui.finished.connect(self.cleanup_installer)
self.installer_gui.finished.connect(
functools.partial(self.on_package_status_changed, addon)
)
self.installer_gui.run() # Does not block
# @}
|
sbe | app | """Static configuration for the application.
TODO: add more (runtime) flexibility in plugin discovery, selection
and activation.
"""
from __future__ import annotations
import logging
import jinja2
from abilian.app import Application as BaseApplication
from abilian.core.celery import FlaskLoader as CeleryBaseLoader
from abilian.services import converter
from .apps.documents.repository import repository
from .extension import sbe
# Used for side effects, do not remove
__all__ = ["create_app", "Application"]
logger = logging.getLogger(__name__)
# loader to be used by celery workers
class CeleryLoader(CeleryBaseLoader):
flask_app_factory = "abilian.sbe.app.create_app"
class Application(BaseApplication):
APP_PLUGINS = BaseApplication.APP_PLUGINS + (
"abilian.sbe.apps.main",
"abilian.sbe.apps.notifications",
"abilian.sbe.apps.preferences",
"abilian.sbe.apps.wiki",
"abilian.sbe.apps.wall",
"abilian.sbe.apps.documents",
"abilian.sbe.apps.forum",
# "abilian.sbe.apps.calendar",
"abilian.sbe.apps.communities",
"abilian.sbe.apps.social",
"abilian.sbe.apps.preferences",
)
def setup(self, config: type | None):
super().setup(config)
loader = jinja2.PackageLoader("abilian.sbe", "templates")
self.register_jinja_loaders(loader)
def init_extensions(self):
BaseApplication.init_extensions(self)
sbe.init_app(self)
repository.init_app(self)
converter.init_app(self)
def create_app(config: type | None = None, **kw) -> Application:
app = Application(**kw)
app.setup(config)
return app
|
cache | projected | # =============================================================================
# Copyright (C) 2010 Diego Duclos
#
# This file is part of pyfa.
#
# pyfa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyfa. If not, see <http://www.gnu.org/licenses/>.
# =============================================================================
from collections import namedtuple
from eos.modifiedAttributeDict import getResistanceAttrID
from graphs.data.base import FitDataCache
ModProjData = namedtuple(
"ModProjData", ("boost", "optimal", "falloff", "stackingGroup", "resAttrID")
)
MobileProjData = namedtuple(
"MobileProjData",
("boost", "optimal", "falloff", "stackingGroup", "resAttrID", "speed", "radius"),
)
class ProjectedDataCache(FitDataCache):
def getProjModData(self, src):
try:
projectedData = self._data[src.item.ID]["modules"]
except KeyError:
# Format of items for both: (boost strength, optimal, falloff, stacking group, resistance attr ID)
webMods = []
tpMods = []
projectedData = self._data.setdefault(src.item.ID, {})["modules"] = (
webMods,
tpMods,
)
for mod in src.item.activeModulesIter():
for webEffectName in (
"remoteWebifierFalloff",
"structureModuleEffectStasisWebifier",
):
if webEffectName in mod.item.effects:
webMods.append(
ModProjData(
mod.getModifiedItemAttr("speedFactor"),
mod.maxRange or 0,
mod.falloff or 0,
"default",
getResistanceAttrID(
modifyingItem=mod,
effect=mod.item.effects[webEffectName],
),
)
)
if "doomsdayAOEWeb" in mod.item.effects:
webMods.append(
ModProjData(
mod.getModifiedItemAttr("speedFactor"),
max(
0,
(mod.maxRange or 0)
+ mod.getModifiedItemAttr("doomsdayAOERange"),
),
mod.falloff or 0,
"default",
getResistanceAttrID(
modifyingItem=mod,
effect=mod.item.effects["doomsdayAOEWeb"],
),
)
)
for tpEffectName in (
"remoteTargetPaintFalloff",
"structureModuleEffectTargetPainter",
):
if tpEffectName in mod.item.effects:
tpMods.append(
ModProjData(
mod.getModifiedItemAttr("signatureRadiusBonus"),
mod.maxRange or 0,
mod.falloff or 0,
"default",
getResistanceAttrID(
modifyingItem=mod,
effect=mod.item.effects[tpEffectName],
),
)
)
if "doomsdayAOEPaint" in mod.item.effects:
tpMods.append(
ModProjData(
mod.getModifiedItemAttr("signatureRadiusBonus"),
max(
0,
(mod.maxRange or 0)
+ mod.getModifiedItemAttr("doomsdayAOERange"),
),
mod.falloff or 0,
"default",
getResistanceAttrID(
modifyingItem=mod,
effect=mod.item.effects["doomsdayAOEPaint"],
),
)
)
return projectedData
def getProjDroneData(self, src):
try:
projectedData = self._data[src.item.ID]["drones"]
except KeyError:
# Format of items for both: (boost strength, optimal, falloff, stacking group, resistance attr ID, drone speed, drone radius)
webDrones = []
tpDrones = []
projectedData = self._data.setdefault(src.item.ID, {})["drones"] = (
webDrones,
tpDrones,
)
for drone in src.item.activeDronesIter():
if "remoteWebifierEntity" in drone.item.effects:
webDrones.extend(
drone.amountActive
* (
MobileProjData(
drone.getModifiedItemAttr("speedFactor"),
drone.maxRange or 0,
drone.falloff or 0,
"default",
getResistanceAttrID(
modifyingItem=drone,
effect=drone.item.effects["remoteWebifierEntity"],
),
drone.getModifiedItemAttr("maxVelocity"),
drone.getModifiedItemAttr("radius"),
),
)
)
if "remoteTargetPaintEntity" in drone.item.effects:
tpDrones.extend(
drone.amountActive
* (
MobileProjData(
drone.getModifiedItemAttr("signatureRadiusBonus"),
drone.maxRange or 0,
drone.falloff or 0,
"default",
getResistanceAttrID(
modifyingItem=drone,
effect=drone.item.effects[
"remoteTargetPaintEntity"
],
),
drone.getModifiedItemAttr("maxVelocity"),
drone.getModifiedItemAttr("radius"),
),
)
)
return projectedData
def getProjFighterData(self, src):
try:
projectedData = self._data[src.item.ID]["fighters"]
except KeyError:
# Format of items for both: (boost strength, optimal, falloff, stacking group, resistance attr ID, fighter speed, fighter radius)
webFighters = []
tpFighters = []
projectedData = self._data.setdefault(src.item.ID, {})["fighters"] = (
webFighters,
tpFighters,
)
for fighter, ability in src.item.activeFighterAbilityIter():
if ability.effect.name == "fighterAbilityStasisWebifier":
webFighters.append(
MobileProjData(
fighter.getModifiedItemAttr(
"fighterAbilityStasisWebifierSpeedPenalty"
)
* fighter.amount,
fighter.getModifiedItemAttr(
"fighterAbilityStasisWebifierOptimalRange"
),
fighter.getModifiedItemAttr(
"fighterAbilityStasisWebifierFalloffRange"
),
"default",
getResistanceAttrID(
modifyingItem=fighter,
effect=fighter.item.effects[
"fighterAbilityStasisWebifier"
],
),
fighter.getModifiedItemAttr("maxVelocity"),
fighter.getModifiedItemAttr("radius"),
)
)
return projectedData
|
projectedDrone | changeAmount | import math
import eos.db
import gui.mainFrame
import wx
from gui import globalEvents as GE
from gui.fitCommands.calc.drone.projectedChangeAmount import (
CalcChangeProjectedDroneAmountCommand,
)
from gui.fitCommands.calc.drone.projectedRemove import CalcRemoveProjectedDroneCommand
from gui.fitCommands.helpers import InternalCommandHistory
from service.fit import Fit
class GuiChangeProjectedDroneAmountCommand(wx.Command):
def __init__(self, fitID, itemID, amount):
wx.Command.__init__(self, True, "Change Projected Drone Amount")
self.internalHistory = InternalCommandHistory()
self.fitID = fitID
self.itemID = itemID
self.amount = amount
def Do(self):
if self.amount > 0:
cmd = CalcChangeProjectedDroneAmountCommand(
fitID=self.fitID, itemID=self.itemID, amount=self.amount
)
else:
cmd = CalcRemoveProjectedDroneCommand(
fitID=self.fitID, itemID=self.itemID, amount=math.inf
)
success = self.internalHistory.submit(cmd)
eos.db.flush()
sFit = Fit.getInstance()
sFit.recalc(self.fitID)
sFit.fill(self.fitID)
eos.db.commit()
wx.PostEvent(
gui.mainFrame.MainFrame.getInstance(), GE.FitChanged(fitIDs=(self.fitID,))
)
return success
def Undo(self):
success = self.internalHistory.undoAll()
eos.db.flush()
sFit = Fit.getInstance()
sFit.recalc(self.fitID)
sFit.fill(self.fitID)
eos.db.commit()
wx.PostEvent(
gui.mainFrame.MainFrame.getInstance(), GE.FitChanged(fitIDs=(self.fitID,))
)
return success
|
builtinViews | emptyView | # noinspection PyPackageRequirements
import gui.globalEvents as GE
import gui.mainFrame
import wx
from gui.chrome_tabs import EVT_NOTEBOOK_PAGE_CHANGED
class BlankPage(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent, size=(0, 0))
self.mainFrame = gui.mainFrame.MainFrame.getInstance()
self.parent = parent
self.parent.Bind(EVT_NOTEBOOK_PAGE_CHANGED, self.pageChanged)
self.SetBackgroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOW))
wx.PostEvent(self.mainFrame, GE.FitChanged(fitIDs=()))
def Destroy(self):
# todo: This unbind caused fits to not recalc when switching to their tabs; find out why
# self.parent.Unbind(EVT_NOTEBOOK_PAGE_CHANGED)
wx.Panel.Destroy(self)
def pageChanged(self, event):
if self.parent.IsActive(self):
fitID = None
# sFit = Fit.getInstance()
# sFit.switchFit(fitID)
wx.PostEvent(self.mainFrame, GE.FitChanged(fitIDs=(fitID,)))
event.Skip()
|
transfer | urls | # This file is part of Archivematica.
#
# Copyright 2010-2013 Artefactual Systems Inc. <http://artefactual.com>
#
# Archivematica is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Archivematica is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Archivematica. If not, see <http://www.gnu.org/licenses/>.
from components.transfer import views
from django.conf import settings
from django.conf.urls import url
app_name = "transfer"
urlpatterns = [
url(r"^$", views.grid, name="transfer_index"),
# Transfer metadata set functions
url(r"^create_metadata_set_uuid/$", views.create_metadata_set_uuid),
url(
r"^rename_metadata_set/(?P<set_uuid>"
+ settings.UUID_REGEX
+ r")/(?P<placeholder_id>[\w\-]+)/$",
views.rename_metadata_set,
),
url(
r"^cleanup_metadata_set/(?P<set_uuid>" + settings.UUID_REGEX + ")/$",
views.cleanup_metadata_set,
),
url(r"^locations/$", views.transfer_source_locations),
url(
r"^component/(?P<uuid>" + settings.UUID_REGEX + ")/$",
views.component,
name="component",
),
url(r"^status/$", views.status),
url(r"^status/(?P<uuid>" + settings.UUID_REGEX + ")/$", views.status),
url(
r"^(?P<uuid>" + settings.UUID_REGEX + ")/metadata/$",
views.transfer_metadata_list,
name="transfer_metadata_list",
),
url(
r"^(?P<uuid>" + settings.UUID_REGEX + ")/metadata/add/$",
views.transfer_metadata_edit,
name="transfer_metadata_add",
),
url(
r"^(?P<uuid>" + settings.UUID_REGEX + r")/metadata/(?P<id>\d+)/$",
views.transfer_metadata_edit,
name="transfer_metadata_edit",
),
]
|
parser | context | """Produce a rendering of the account balances just before and after a
particular entry is applied.
"""
__copyright__ = "Copyright (C) 2014-2017 Martin Blais"
__license__ = "GNU GPLv2"
import functools
import io
from os import path
from beancount.core import compare, convert, data, getters, interpolate, inventory
from beancount.parser import parser, printer
def render_file_context(entries, options_map, filename, lineno):
"""Render the context before and after a particular transaction is applied.
Args:
entries: A list of directives.
options_map: A dict of options, as produced by the parser.
filename: A string, the name of the file from which the transaction was parsed.
lineno: An integer, the line number in the file the transaction was parsed from.
Returns:
A multiline string of text, which consists of the context before the
transaction is applied, the transaction itself, and the context after it
is applied. You can just print that, it is in form that is intended to be
consumed by the user.
"""
# Find the closest entry.
closest_entry = data.find_closest(entries, filename, lineno)
if closest_entry is None:
raise SystemExit("No entry could be found before {}:{}".format(filename, lineno))
# Run just the parser stage (no booking nor interpolation, which would
# remove the postings) on the input file to produced the corresponding
# unbooked transaction, so that we can get the list of accounts.
if path.exists(filename):
parsed_entries, _, __ = parser.parse_file(filename)
# Note: We cannot bisect as we cannot rely on sorting behavior from the parser.
lineno = closest_entry.meta["lineno"]
closest_parsed_entries = [
parsed_entry
for parsed_entry in parsed_entries
if parsed_entry.meta["lineno"] == lineno
]
if len(closest_parsed_entries) != 1:
# This is an internal error, this should never occur.
raise RuntimeError(
"Parsed entry corresponding to real entry not found in original filename."
)
closest_parsed_entry = next(iter(closest_parsed_entries))
else:
closest_parsed_entry = None
return render_entry_context(entries, options_map, closest_entry, closest_parsed_entry)
def render_entry_context(entries, options_map, entry, parsed_entry=None):
"""Render the context before and after a particular transaction is applied.
Args:
entries: A list of directives.
options_map: A dict of options, as produced by the parser.
entry: The entry instance which should be rendered. (Note that this object is
expected to be in the set of entries, not just structurally equal.)
parsed_entry: An optional incomplete, parsed but not booked nor interpolated
entry. If this is provided, this is used for inspecting the list of prior
accounts and it is also rendered.
Returns:
A multiline string of text, which consists of the context before the
transaction is applied, the transaction itself, and the context after it
is applied. You can just print that, it is in form that is intended to be
consumed by the user.
"""
oss = io.StringIO()
pr = functools.partial(print, file=oss)
header = "** {} --------------------------------"
meta = entry.meta
pr(header.format("Transaction Id"))
pr()
pr("Hash:{}".format(compare.hash_entry(entry)))
pr("Location: {}:{}".format(meta["filename"], meta["lineno"]))
pr()
pr()
# Get the list of accounts sorted by the order in which they appear in the
# closest entry.
order = {}
if parsed_entry is None:
parsed_entry = entry
if isinstance(parsed_entry, data.Transaction):
order = {
posting.account: index for index, posting in enumerate(parsed_entry.postings)
}
accounts = sorted(
getters.get_entry_accounts(parsed_entry),
key=lambda account: order.get(account, 10000),
)
# Accumulate the balances of these accounts up to the entry.
balance_before, balance_after = interpolate.compute_entry_context(
entries, entry, additional_accounts=accounts
)
# Create a format line for printing the contents of account balances.
max_account_width = max(map(len, accounts)) if accounts else 1
position_line = "{{:1}} {{:{width}}} {{:>49}}".format(width=max_account_width)
# Print the context before.
pr(header.format("Balances before transaction"))
pr()
before_hashes = set()
average_costs = {}
for account in accounts:
balance = balance_before[account]
pc_balances = balance.split()
for currency, pc_balance in pc_balances.items():
if len(pc_balance) > 1:
average_costs[account] = pc_balance.average()
positions = balance.get_positions()
for position in positions:
before_hashes.add((account, hash(position)))
pr(position_line.format("", account, str(position)))
if not positions:
pr(position_line.format("", account, ""))
pr()
pr()
# Print average cost per account, if relevant.
if average_costs:
pr(header.format("Average Costs"))
pr()
for account, average_cost in sorted(average_costs.items()):
for position in average_cost:
pr(position_line.format("", account, str(position)))
pr()
pr()
# Print the entry itself.
dcontext = options_map["dcontext"]
pr(header.format("Unbooked Transaction"))
pr()
if parsed_entry:
printer.print_entry(parsed_entry, dcontext, render_weights=True, file=oss)
pr()
pr(header.format("Transaction"))
pr()
printer.print_entry(entry, dcontext, render_weights=True, file=oss)
pr()
if isinstance(entry, data.Transaction):
pr(header.format("Residual and Tolerances"))
pr()
# Print residuals.
residual = interpolate.compute_residual(entry.postings)
if not residual.is_empty():
# Note: We render the residual at maximum precision, for debugging.
pr("Residual: {}".format(residual))
# Dump the tolerances used.
tolerances = interpolate.infer_tolerances(entry.postings, options_map)
if tolerances:
pr(
"Tolerances: {}".format(
", ".join(
"{}={}".format(key, value)
for key, value in sorted(tolerances.items())
)
)
)
# Compute the total cost basis.
cost_basis = inventory.Inventory(
pos for pos in entry.postings if pos.cost is not None
).reduce(convert.get_cost)
if not cost_basis.is_empty():
pr("Basis: {}".format(cost_basis))
pr()
pr()
# Print the context after.
pr(header.format("Balances after transaction"))
pr()
for account in accounts:
positions = balance_after[account].get_positions()
for position in positions:
changed = (account, hash(position)) not in before_hashes
print(
position_line.format("*" if changed else "", account, str(position)),
file=oss,
)
if not positions:
pr(position_line.format("", account, ""))
pr()
return oss.getvalue()
|
calculix | writer | # ***************************************************************************
# * Copyright (c) 2015 Przemo Firszt <przemo@firszt.eu> *
# * Copyright (c) 2015 Bernd Hahnebach <bernd@bimstatik.org> *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
__title__ = "FreeCAD FEM solver CalculiX writer"
__author__ = "Przemo Firszt, Bernd Hahnebach"
__url__ = "https://www.freecad.org"
## \addtogroup FEM
# @{
import time
from os.path import join
import FreeCAD
from femtools import constants
from FreeCAD import Units
from .. import writerbase
from . import write_constraint_centrif as con_centrif
from . import write_constraint_contact as con_contact
from . import write_constraint_displacement as con_displacement
from . import write_constraint_fixed as con_fixed
from . import write_constraint_fluidsection as con_fluidsection
from . import write_constraint_force as con_force
from . import write_constraint_heatflux as con_heatflux
from . import write_constraint_initialtemperature as con_itemp
from . import write_constraint_planerotation as con_planerotation
from . import write_constraint_pressure as con_pressure
from . import write_constraint_sectionprint as con_sectionprint
from . import write_constraint_selfweight as con_selfweight
from . import write_constraint_temperature as con_temperature
from . import write_constraint_tie as con_tie
from . import write_constraint_transform as con_transform
from . import (
write_femelement_geometry,
write_femelement_material,
write_femelement_matgeosets,
write_footer,
write_mesh,
write_step_equation,
write_step_output,
)
# Interesting forum topic: https://forum.freecad.org/viewtopic.php?&t=48451
# TODO somehow set units at beginning and every time a value is retrieved use this identifier
# this would lead to support of unit system, force might be retrieved in base writer!
# the following text will be at the end of the main calculix input file
units_information = """***********************************************************
** About units:
** See ccx manual, ccx does not know about any unit.
** Golden rule: The user must make sure that the numbers they provide have consistent units.
** The user is the FreeCAD calculix writer module ;-)
**
** The unit system which is used at Guido Dhondt's company: mm, N, s, K
** Since Length and Mass are connected by Force, if Length is mm the Mass is in t to get N
** The following units are used to write to inp file:
**
** Length: mm (this includes the mesh geometry)
** Mass: t
** TimeSpan: s
** Temperature: K
**
** This leads to:
** Force: N
** Pressure: N/mm^2 == MPa (Young's Modulus has unit Pressure)
** Density: t/mm^3
** Gravity: mm/s^2
** Thermal conductivity: t*mm/K/s^3 == as W/m/K == kW/mm/K
** Specific Heat: mm^2/s^2/K = J/kg/K == kJ/t/K
"""
# TODO
# {0:.13G} or {:.13G} should be used on all places writing floating points to ccx
# All floating points fields read from ccx are F20.0 FORTRAN input fields.
# see in dload.f in ccx's source
# https://forum.freecad.org/viewtopic.php?f=18&p=516518#p516433
# https://forum.freecad.org/viewtopic.php?f=18&t=22759&#p176578
# example "{:.13G}".format(math.sqrt(2.)*-1e100) and count chars
# a property type is best checked in FreeCAD objects definition
# see femobjects package for Python objects or in objects App
class FemInputWriterCcx(writerbase.FemInputWriter):
def __init__(
self,
analysis_obj,
solver_obj,
mesh_obj,
member,
dir_name=None,
mat_geo_sets=None,
):
writerbase.FemInputWriter.__init__(
self, analysis_obj, solver_obj, mesh_obj, member, dir_name, mat_geo_sets
)
self.mesh_name = self.mesh_object.Name
self.file_name = join(self.dir_name, self.mesh_name + ".inp")
self.femmesh_file = (
"" # the file the femmesh is in, no matter if one or split input file
)
self.gravity = int(
Units.Quantity(constants.gravity()).getValueAs("mm/s^2")
) # 9820 mm/s2
self.units_information = units_information
# ********************************************************************************************
# write calculix input
def write_solver_input(self):
time_start = time.process_time()
FreeCAD.Console.PrintMessage("\n") # because of time print in separate line
FreeCAD.Console.PrintMessage("CalculiX solver input writing...\n")
FreeCAD.Console.PrintMessage("Input file:{}\n".format(self.file_name))
if self.solver_obj.SplitInputWriter is True:
FreeCAD.Console.PrintMessage("Split input file.\n")
self.split_inpfile = True
else:
FreeCAD.Console.PrintMessage("One monster input file.\n")
self.split_inpfile = False
# mesh
inpfile = write_mesh.write_mesh(self)
# element sets for materials and element geometry
write_femelement_matgeosets.write_femelement_matgeosets(inpfile, self)
# some fluidsection objs need special treatment, mat_geo_sets are needed for this
inpfile = con_fluidsection.handle_fluidsection_liquid_inlet_outlet(
inpfile, self
)
# element sets constraints
self.write_constraints_meshsets(inpfile, self.member.cons_centrif, con_centrif)
# node sets
self.write_constraints_meshsets(inpfile, self.member.cons_fixed, con_fixed)
self.write_constraints_meshsets(
inpfile, self.member.cons_displacement, con_displacement
)
self.write_constraints_meshsets(
inpfile, self.member.cons_planerotation, con_planerotation
)
self.write_constraints_meshsets(
inpfile, self.member.cons_transform, con_transform
)
self.write_constraints_meshsets(
inpfile, self.member.cons_temperature, con_temperature
)
# surface sets
self.write_constraints_meshsets(inpfile, self.member.cons_contact, con_contact)
self.write_constraints_meshsets(inpfile, self.member.cons_tie, con_tie)
self.write_constraints_meshsets(
inpfile, self.member.cons_sectionprint, con_sectionprint
)
# materials and fem element types
write_femelement_material.write_femelement_material(inpfile, self)
self.write_constraints_propdata(
inpfile, self.member.cons_initialtemperature, con_itemp
)
write_femelement_geometry.write_femelement_geometry(inpfile, self)
# constraints independent from steps
self.write_constraints_propdata(
inpfile, self.member.cons_planerotation, con_planerotation
)
self.write_constraints_propdata(inpfile, self.member.cons_contact, con_contact)
self.write_constraints_propdata(inpfile, self.member.cons_tie, con_tie)
self.write_constraints_propdata(
inpfile, self.member.cons_transform, con_transform
)
# step equation
write_step_equation.write_step_equation(inpfile, self)
# constraints dependent from steps
self.write_constraints_propdata(inpfile, self.member.cons_fixed, con_fixed)
self.write_constraints_propdata(
inpfile, self.member.cons_displacement, con_displacement
)
self.write_constraints_propdata(
inpfile, self.member.cons_sectionprint, con_sectionprint
)
self.write_constraints_propdata(
inpfile, self.member.cons_selfweight, con_selfweight
)
self.write_constraints_propdata(inpfile, self.member.cons_centrif, con_centrif)
self.write_constraints_meshsets(inpfile, self.member.cons_force, con_force)
self.write_constraints_meshsets(
inpfile, self.member.cons_pressure, con_pressure
)
self.write_constraints_propdata(
inpfile, self.member.cons_temperature, con_temperature
)
self.write_constraints_meshsets(
inpfile, self.member.cons_heatflux, con_heatflux
)
con_fluidsection.write_constraints_fluidsection(inpfile, self)
# output and step end
write_step_output.write_step_output(inpfile, self)
write_step_equation.write_step_end(inpfile, self)
# footer
write_footer.write_footer(inpfile, self)
# close file
inpfile.close()
writetime = round((time.process_time() - time_start), 3)
FreeCAD.Console.PrintMessage(
"Writing time CalculiX input file: {} seconds.\n".format(writetime)
)
# return
if self.femelement_count_test is True:
return self.file_name
else:
FreeCAD.Console.PrintError(
"Problems on writing input file, check report prints.\n\n"
)
return ""
## @}
|
i18n | molint | #!/usr/bin/env python
"""
This script reads a MO file and checks that translated messages do not use named
string formatting fields that do not appear in the original messages.
This would cause keyword errors when the format method is called on a translated
string.
"""
import string
import sys
import mofile
_parse = string.Formatter().parse
def fields(text):
"""Returns the format field names in text as a set().
If the text contains erroneous formatting delimiters, ValueError is raised.
"""
return {i[1] for i in _parse(text) if i[1] and i[1][0].isalpha()}
def molint(filename):
"""Checks filename for superfluous fields in the translated messages.
Returns True if there are no errors, otherwise prints messages to stderr
and returns False.
"""
correct = True
with open(filename, "rb") as f:
buf = f.read()
for context, messages, translations in mofile.parse_mo_decode(buf):
# collect fields in messages
s = set()
for m in messages:
try:
s |= fields(m)
except ValueError:
pass
if not s:
continue
# collect superfluous fields in translations
errors = []
for t in translations:
try:
superfluous = fields(t) - s
except ValueError:
errors.append((t, "Erroneous format string"))
else:
if superfluous:
errors.append(
(
t,
"Field{} {} not in message".format(
"s" if len(superfluous) > 1 else "",
", ".join(f"{{{name}}}" for name in superfluous),
),
)
)
# write out errors if any
if errors:
correct = False
sys.stderr.write(
"\n{}: Translation contains errors!\n" " Message{}:\n".format(
filename, "" if len(messages) == 1 else "s"
)
)
for m in messages:
sys.stderr.write(f" {m}\n")
sys.stderr.write(
" Offending translation{}:\n".format("" if len(errors) == 1 else "s")
)
for t, errmsg in errors:
sys.stderr.write(f" {errmsg}:\n {t}\n")
return correct
if __name__ == "__main__":
filenames = sys.argv[1:]
if not filenames:
sys.stderr.write(
"usage: python molint.py <mofiles> ...\n"
"\n"
"checks the given MO files if the translations contain erroneous\n"
"embedded variable names.\n"
)
sys.exit(2)
errorfiles = []
for filename in filenames:
if not molint(filename):
errorfiles.append(filename)
if errorfiles:
sys.stderr.write(
"\nFiles containing errors: {}\n".format(", ".join(errorfiles))
)
sys.exit(1 if errorfiles else 0)
|
legecy-translators | jsparser | """
The process of translating JS will go like that: # TOP = 'imports and scope set'
1. Remove all the comments
2. Replace number, string and regexp literals with markers
4. Remove global Functions and move their translation to the TOP. Also add register code there.
5. Replace inline functions with lvals
6. Remove List and Object literals and replace them with lvals
7. Find and remove var declarations, generate python register code that would go on TOP.
Here we should be left with global code only where 1 line of js code = 1 line of python code.
Routine translating this code should be called glob_translate:
1. Search for outer structures and translate them using glob and inside using exps_translate
exps_translate routine:
1. Remove outer {}
2. Split lines at ;
3. Convert line by line using exp_translate
4. In case of error in 3 try to insert ; according to ECMA rules and repeat 3.
exp_translate routine:
It takes a single line of JS code and returns a SINGLE line of Python code.
Note var is not present here because it was removed in previous stages.
If case of parsing errors it must return a pos of error.
1. Convert all assignment operations to put operations, this may be hard :(
2. Convert all gets and calls to get and callprop.
3. Convert unary operators like typeof, new, !, delete.
Delete can be handled by replacing last get method with delete.
4. Convert remaining operators that are not handled by python eg: === and ,
lval format PyJsLvalNR
marker PyJs(TYPE_NAME)(NR)
TODO
1. Number literal replacement
2. Array literal replacement
3. Object literal replacement
5. Function replacement
4. Literal replacement translators
"""
from utils import *
OP_METHODS = {'*': '__mul__',
'/': '__div__',
'%': '__mod__',
'+': '__add__',
'-': '__sub__',
'<<': '__lshift__',
'>>': '__rshift__',
'&': '__and__',
'^': '__xor__',
'|': '__or__'}
def dbg(source):
try:
with open('C:\Users\Piotrek\Desktop\dbg.py','w') as f:
f.write(source)
except:
pass
def indent(lines, ind=4):
return ind*' '+lines.replace('\n', '\n'+ind*' ').rstrip(' ')
def inject_before_lval(source, lval, code):
if source.count(lval)>1:
dbg(source)
print
print lval
raise RuntimeError('To many lvals (%s)' % lval)
elif not source.count(lval):
dbg(source)
print
print lval
assert lval not in source
raise RuntimeError('No lval found "%s"' % lval)
end = source.index(lval)
inj = source.rfind('\n', 0, end)
ind = inj
while source[ind+1]==' ':
ind+=1
ind -= inj
return source[:inj+1]+ indent(code, ind) + source[inj+1:]
def bracket_split(source, brackets=('()','{}','[]'), strip=False):
"""DOES NOT RETURN EMPTY STRINGS (can only return empty bracket content if strip=True)"""
starts = [e[0] for e in brackets]
in_bracket = 0
n = 0
last = 0
while n<len(source):
e = source[n]
if not in_bracket and e in starts:
in_bracket = 1
start = n
b_start, b_end = brackets[starts.index(e)]
elif in_bracket:
if e==b_start:
in_bracket += 1
elif e==b_end:
in_bracket -= 1
if not in_bracket:
if source[last:start]:
yield source[last:start]
last = n+1
yield source[start+strip:n+1-strip]
n+=1
if source[last:]:
yield source[last:]
def pass_bracket(source, start, bracket='()'):
"""Returns content of brackets with brackets and first pos after brackets
if source[start] is followed by some optional white space and brackets.
Otherwise None"""
e = bracket_split(source[start:],[bracket], False)
try:
cand = e.next()
except StopIteration:
return None, None
if not cand.strip(): #white space...
try:
res = e.next()
return res, start + len(cand) + len(res)
except StopIteration:
return None, None
elif cand[-1] == bracket[1]:
return cand, start + len(cand)
else:
return None, None
def startswith_keyword(start, keyword):
start = start.lstrip()
if start.startswith(keyword):
if len(keyword)<len(start):
if start[len(keyword)] in IDENTIFIER_PART:
return False
return True
return False
def endswith_keyword(ending, keyword):
ending = ending.rstrip()
if ending.endswith(keyword):
if len(keyword)<len(ending):
if ending[len(ending)-len(keyword)-1] in IDENTIFIER_PART:
return False
return True
return False
def pass_white(source, start):
n = start
while n<len(source):
if source[n] in SPACE:
n += 1
else:
break
return n
def except_token(source, start, token, throw=True):
"""Token can be only a single char. Returns position after token if found. Otherwise raises syntax error if throw
otherwise returns None"""
start = pass_white(source, start)
if start<len(source) and source[start]==token:
return start+1
if throw:
raise SyntaxError('Missing token. Expected %s'%token)
return None
def except_keyword(source, start, keyword):
""" Returns position after keyword if found else None
Note: skips white space"""
start = pass_white(source, start)
kl = len(keyword) #keyword len
if kl+start > len(source):
return None
if source[start:start+kl] != keyword:
return None
if kl+start<len(source) and source[start+kl] in IDENTIFIER_PART:
return None
return start + kl
def parse_identifier(source, start, throw=True):
"""passes white space from start and returns first identifier,
if identifier invalid and throw raises SyntaxError otherwise returns None"""
start = pass_white(source, start)
end = start
if not end<len(source):
if throw:
raise SyntaxError('Missing identifier!')
return None
if source[end] not in IDENTIFIER_START:
if throw:
raise SyntaxError('Invalid identifier start: "%s"'%source[end])
return None
end += 1
while end < len(source) and source[end] in IDENTIFIER_PART:
end += 1
if not is_valid_lval(source[start:end]):
if throw:
raise SyntaxError('Invalid identifier name: "%s"'%source[start:end])
return None
return source[start:end], end
def argsplit(args, sep=','):
"""used to split JS args (it is not that simple as it seems because
sep can be inside brackets).
pass args *without* brackets!
Used also to parse array and object elements, and more"""
parsed_len = 0
last = 0
splits = []
for e in bracket_split(args, brackets=['()', '[]', '{}']):
if e[0] not in {'(', '[', '{'}:
for i, char in enumerate(e):
if char==sep:
splits.append(args[last:parsed_len+i])
last = parsed_len + i + 1
parsed_len += len(e)
splits.append(args[last:])
return splits
def split_add_ops(text):
"""Specialized function splitting text at add/sub operators.
Operands are *not* translated. Example result ['op1', '+', 'op2', '-', 'op3']"""
n = 0
text = text.replace('++', '##').replace('--', '@@') #text does not normally contain any of these
spotted = False # set to true if noticed anything other than +- or white space
last = 0
while n<len(text):
e = text[n]
if e=='+' or e=='-':
if spotted:
yield text[last:n].replace('##', '++').replace('@@', '--')
yield e
last = n+1
spotted = False
elif e=='/' or e=='*' or e=='%':
spotted = False
elif e!=' ':
spotted = True
n+=1
yield text[last:n].replace('##', '++').replace('@@', '--')
def split_at_any(text, lis, translate=False, not_before=[], not_after=[], validitate=None):
""" doc """
lis.sort(key=lambda x: len(x), reverse=True)
last = 0
n = 0
text_len = len(text)
while n<text_len:
if any(text[:n].endswith(e) for e in not_before): #Cant end with end before
n+=1
continue
for e in lis:
s = len(e)
if s+n>text_len:
continue
if validitate and not validitate(e, text[:n], text[n+s:]):
continue
if any(text[n+s:].startswith(e) for e in not_after): #Cant end with end before
n+=1
break
if e==text[n:n+s]:
yield text[last:n] if not translate else translate(text[last:n])
yield e
n+=s
last = n
break
else:
n+=1
yield text[last:n] if not translate else translate(text[last:n])
def split_at_single(text, sep, not_before=[], not_after=[]):
"""Works like text.split(sep) but separated fragments
cant end with not_before or start with not_after"""
n = 0
lt, s= len(text), len(sep)
last = 0
while n<lt:
if not s+n>lt:
if sep==text[n:n+s]:
if any(text[last:n].endswith(e) for e in not_before):
pass
elif any(text[n+s:].startswith(e) for e in not_after):
pass
else:
yield text[last:n]
last = n+s
n += s-1
n+=1
yield text[last:] |
gui-qt | utils | from plover import _
from PyQt5.QtCore import QSettings
from PyQt5.QtGui import QGuiApplication, QKeySequence
from PyQt5.QtWidgets import QAction, QMainWindow, QToolBar, QToolButton, QWidget
def ActionCopyViewSelectionToClipboard(view):
def copy_selection_to_clipboard():
indexes = view.selectedIndexes()
data = view.model().mimeData(indexes)
QGuiApplication.clipboard().setMimeData(data)
action = QAction(_("Copy selection to clipboard"))
action.setShortcut(QKeySequence(QKeySequence.Copy))
action.triggered.connect(copy_selection_to_clipboard)
return action
def ToolButton(action):
button = QToolButton()
button.setDefaultAction(action)
return button
def ToolBar(*action_list):
toolbar = QToolBar()
for action in action_list:
if action is None:
toolbar.addSeparator()
else:
toolbar.addWidget(ToolButton(action))
return toolbar
class WindowState(QWidget):
ROLE = None
def _save_state(self, settings):
pass
def save_state(self):
assert self.ROLE
settings = QSettings()
settings.beginGroup(self.ROLE)
settings.setValue("geometry", self.saveGeometry())
if isinstance(self, QMainWindow):
settings.setValue("state", self.saveState())
self._save_state(settings)
settings.endGroup()
def _restore_state(self, settings):
pass
def restore_state(self):
assert self.ROLE
settings = QSettings()
settings.beginGroup(self.ROLE)
geometry = settings.value("geometry")
if geometry is not None:
self.restoreGeometry(geometry)
if isinstance(self, QMainWindow):
state = settings.value("state")
if state is not None:
self.restoreState(state)
self._restore_state(settings)
settings.endGroup()
def find_menu_actions(menu):
"""Recursively find and return a menu actions."""
actions_dict = {}
for action in menu.actions():
name = action.objectName()
if not name:
sub_menu = action.menu()
if sub_menu is None:
continue
actions_dict.update(find_menu_actions(sub_menu))
name = sub_menu.objectName()
assert name
assert name not in actions_dict
actions_dict[name] = action
return actions_dict
|
settings | sphinx | """
Django configuration for sphinx build.
Most settings are left empty or to their default values
as they are not actually used.
"""
import os
from datetime import timedelta
BASE_DIR = os.path.dirname(os.path.join(os.path.dirname(__file__), "..", "..", ".."))
DEBUG = False
ALLOWED_HOSTS = []
CACHES = {"default": {"BACKEND": "django.core.cache.backends.locmem.LocMemCache"}}
ORCID_BASE_DOMAIN = "sandbox.orcid.org" # for the sandbox API
SOCIALACCOUNT_PROVIDERS = {
"orcid": {
"BASE_DOMAIN": ORCID_BASE_DOMAIN,
# Member API or Public API? Default: False (for the public API)
"MEMBER_API": False, # for the member API
}
}
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
MEDIA_ROOT = os.path.join(BASE_DIR, "dissemin_media")
DEBUG = False
DATABASES = {}
EMAIL_HOST = None
EMAIL_HOST_PASSWORD = None
EMAIL_HOST_USER = None
EMAIL_USE_TLS = None
REDIS_DB = None
REDIS_HOST = None
REDIS_PASSWORD = None
REDIS_PORT = None
ROMEO_API_KEY = None
SECRET_KEY = "54eabc7548440450681f7b48daf688aca3800bda"
BASE_DIR = os.path.dirname(os.path.join(os.path.dirname(__file__), "..", "..", ".."))
DOI_PROXY_DOMAIN = "doi-cache.dissem.in"
DOI_PROXY_SUPPORTS_BATCH = True
CROSSREF_MAILTO = "dev@dissem.in"
CROSSREF_USER_AGENT = "Dissemin/0.1 (https://dissem.in/; mailto:dev@dissem.in)"
DEPOSIT_MAX_FILE_SIZE = 1024 * 1024 * 200 # 20 MB
URL_DEPOSIT_DOWNLOAD_TIMEOUT = 10
PROFILE_REFRESH_ON_LOGIN = timedelta(days=1)
INSTALLED_APPS = (
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.sites",
"rest_framework",
"crispy_forms",
"allauth",
"allauth.account",
"allauth.socialaccount",
"allauth.socialaccount.providers.orcid",
"statistics",
"publishers",
"papers",
"upload",
"deposit",
"deposit.zenodo",
"deposit.hal",
"deposit.osf",
"autocomplete",
"notification",
"bootstrap_pagination",
"django_js_reverse",
"solo",
"haystack",
"widget_tweaks",
"capture_tag",
"memoize",
"django_countries",
"leaflet",
"djgeojson",
"django_select2",
)
CRISPY_TEMPLATE_PACK = "bootstrap4"
SITE_ID = 1
MIDDLEWARE = [
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.locale.LocaleMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [os.path.join(BASE_DIR, "templates")],
"OPTIONS": {
"loaders": (
(
"django.template.loaders.cached.Loader",
(
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
),
),
),
"context_processors": (
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.debug",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.template.context_processors.request",
"dissemin.tcp.orcid_base_domain",
),
"debug": True,
},
}
]
ROOT_URLCONF = "dissemin.urls"
WSGI_APPLICATION = "dissemin.wsgi.application"
STATIC_URL = "/static/"
MEDIA_URL = "/media/"
BROKER_URL = "redis://"
CELERY_RESULT_BACKEND = BROKER_URL
redis_client = None
CELERY_ACCEPT_CONTENT = ["pickle", "json", "msgpack", "yaml"]
CELERY_IMPORTS = ["backend.tasks"]
CELERYBEAT_SCHEDULE = {}
BROKER_TRANSPORT_OPTIONS = {"visibility_timeout": 43200}
LANGUAGE_CODE = "en-us"
POSSIBLE_LANGUAGE_CODES = ["en", "fr", "zh-hans", "mk"]
LANGUAGES = [
("en", "English"),
]
TIME_ZONE = "Europe/Paris"
USE_I18N = True
USE_L10N = True
USE_TZ = True
LOCALE_PATHS = (os.path.join(BASE_DIR, "locale"),)
LOGIN_URL = "/accounts/login/"
LOGIN_REDIRECT_URL = "/"
REST_FRAMEWORK = {
"DEFAULT_PERMISSION_CLASSES": ("rest_framework.permissions.IsAuthenticated",),
"PAGE_SIZE": 10,
"DEFAULT_PAGINATION_CLASS": "rest_framework.pagination.LimitOffsetPagination", # silences useless warning
"DEFAULT_RENDERER_CLASSES": (
"rest_framework.renderers.JSONRenderer",
"rest_framework.renderers.BrowsableAPIRenderer",
),
}
HAYSTACK_CONNECTIONS = {
"default": {
"ENGINE": "search.SearchEngine",
"URL": "http://localhost:9200/",
"INDEX_NAME": "dissemin",
},
}
DEPOSIT_NOTIFICATION_CALLBACK = lambda payload: None
|
fcgear | svggear | # (c) 2014 David Douard <david.douard@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (LGPL)
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
# for detail see the LICENCE text file.
#
# FCGear is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with FCGear; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
import itertools
from involute import CreateExternalGear, CreateInternalGear, rotate
class SVGWireBuilder(object):
def __init__(self):
self.theta = 0.0
self.pos = None
self.svg = []
def move(self, p):
p = rotate(p, self.theta)
self.svg.append("M %s,%s" % (p[0], p[1]))
self.pos = p
def line(self, p):
p = rotate(p, self.theta)
self.svg.append("L %s,%s" % (p[0], p[1]))
self.pos = p
def arc(self, p, r, sweep):
p = rotate(p, self.theta)
self.svg.append("A %s,%s 0,0,%s %s,%s" % (r, r, str(sweep), p[0], p[1]))
self.pos = p
def curve(self, *points):
"""Add a Bezier curve from self.pos to points[-1]
every other points are the control points of the Bezier curve (which
will thus be of degree len(points) )
"""
assert len(points) == 3
points = [rotate(p, self.theta) for p in points]
self.svg.append("C %s,%s %s,%s %s,%s" % tuple(itertools.chain(*points)))
self.pos = points[-1]
def close(self):
self.svg.append("Z")
if __name__ == "__main__":
from optparse import OptionParser
p = OptionParser(
usage="usage: %prog [options] [[MODULE] NUMER_OF_TEETH]",
description=(
"Generates the outline of a metric, involute gear. "
"Prints out an SVG path. "
"This is mainly a debugging tool to quickly inspect the gear visually. "
"For this, online tools like https://yqnn.github.io/svg-path-editor/ are handy. "
"Most of the time it's enough to just use the first 20 lines or so, e.g.:\n\t"
"%prog -z50 | head -n20 | pbcopy"
),
)
p.add_option(
"-z",
"--numer-of-teeth",
help="The number of teeth for the gear.",
metavar="NUMER_OF_TEETH",
type="int",
)
p.add_option(
"-m",
"--module",
help="The metric module, in svg user unit, i.e. unit-less. [default: %default]",
metavar="MODULE",
type="float",
default=1,
)
p.add_option(
"-p",
"--pressure-angle",
help="The pressure angle, in degree. [default: %default]",
metavar="PRESSURE_ANGLE",
type="float",
default=20,
)
p.add_option(
"-i",
"--internal",
help=(
"Generates an internal gear, i.e. the addednum points towards the center "
"and the root fillet is at the outside. [default: %default]"
),
action="store_true",
default=False,
)
p.add_option(
"-a",
"--addendum",
help=(
"The tooth height above the pitch line, normalized by the MODULE. "
"[default: %default]"
),
metavar="ADDENDUM_COEFFICIENT",
type="float",
default=1,
)
p.add_option(
"-d",
"--dedendum",
help=(
"The tooth height from the root to the pitch line, normalized by the MODULE. "
"[default: %default]"
),
metavar="DEDENDUM_COEFFICIENT",
type="float",
default=1.25,
)
p.add_option(
"-f",
"--fillet-radius",
help=(
"The radius of the root fillet, normalized by the MODULE. "
"[default: %default]"
),
metavar="FILLET_RADIUS_COEFFICIENT",
type="float",
default=0.38,
)
opts, args = p.parse_args()
errors = []
if len(args) == 1:
opts.numer_of_teeth = int(args[0])
if len(args) == 2:
opts.module = float(args[0])
opts.numer_of_teeth = int(args[1])
if len(args) > 2:
errors.append("Too many arguments.")
if opts.numer_of_teeth is None:
errors.append("No number of teeth given.")
if len(errors) > 0:
errors.append("Try --help for more info.")
p.error("\n".join(errors))
w = SVGWireBuilder()
generator_func = CreateInternalGear if opts.internal else CreateExternalGear
generator_func(
w,
opts.module,
opts.numer_of_teeth,
opts.pressure_angle,
addCoeff=opts.addendum,
dedCoeff=opts.dedendum,
filletCoeff=opts.fillet_radius,
)
for l in w.svg:
print(l)
|
engines | spotify | # SPDX-License-Identifier: AGPL-3.0-or-later
"""
Spotify (Music)
"""
import base64
from json import loads
from urllib.parse import urlencode
import requests
# about
about = {
"website": "https://www.spotify.com",
"wikidata_id": "Q689141",
"official_api_documentation": "https://developer.spotify.com/web-api/search-item/",
"use_official_api": True,
"require_api_key": False,
"results": "JSON",
}
# engine dependent config
categories = ["music"]
paging = True
api_client_id = None
api_client_secret = None
timeout = 10.0
# search-url
url = "https://api.spotify.com/"
search_url = url + "v1/search?{query}&type=track&offset={offset}"
embedded_url = '<iframe data-src="https://embed.spotify.com/?uri=spotify:track:{audioid}"\
width="300" height="80" frameborder="0" allowtransparency="true"></iframe>'
# do search-request
def request(query, params):
offset = (params["pageno"] - 1) * 20
params["url"] = search_url.format(query=urlencode({"q": query}), offset=offset)
r = requests.post(
"https://accounts.spotify.com/api/token",
timeout=timeout,
data={"grant_type": "client_credentials"},
headers={
"Authorization": "Basic "
+ base64.b64encode(
"{}:{}".format(api_client_id, api_client_secret).encode(),
).decode()
},
)
j = loads(r.text)
params["headers"] = {"Authorization": "Bearer {}".format(j.get("access_token"))}
return params
# get response from search-request
def response(resp):
results = []
search_res = loads(resp.text)
# parse results
for result in search_res.get("tracks", {}).get("items", {}):
if result["type"] == "track":
title = result["name"]
url = result["external_urls"]["spotify"]
content = "{} - {} - {}".format(
result["artists"][0]["name"], result["album"]["name"], result["name"]
)
embedded = embedded_url.format(audioid=result["id"])
# append result
results.append(
{"url": url, "title": title, "embedded": embedded, "content": content}
)
# return results
return results
|
downloaders | PremiumizeMe | # -*- coding: utf-8 -*-
import json
import re
from ..base.multi_downloader import MultiDownloader
class PremiumizeMe(MultiDownloader):
__name__ = "PremiumizeMe"
__type__ = "downloader"
__version__ = "0.34"
__status__ = "testing"
__pattern__ = r"https?://(?:www\.)?premiumize\.me/file\?id=(?P<ID>\w+)"
__config__ = [
("enabled", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("fallback", "bool", "Fallback to free download if premium fails", False),
("chk_filesize", "bool", "Check file size", True),
("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10),
("revert_failed", "bool", "Revert to standard download if fails", True),
]
__description__ = """Premiumize.me multi-downloader plugin"""
__license__ = "GPLv3"
__authors__ = [
("Florian Franzen", "FlorianFranzen@gmail.com"),
("GammaC0de", "nitzo2001[AT]yahoo[DOT]com"),
]
DIRECT_LINK = False
# See https://www.premiumize.me/api
API_URL = "https://www.premiumize.me/api/"
def api_respond(self, method, **kwargs):
json_data = self.load(self.API_URL + method, get=kwargs)
return json.loads(json_data)
def handle_premium(self, pyfile):
m = re.search(self.__pattern__, pyfile.url)
if m is None:
res = self.api_respond(
"transfer/directdl",
src=pyfile.url,
apikey=self.account.info["login"]["password"],
)
if res["status"] == "success":
self.pyfile.name = res["content"][0]["path"]
self.pyfile.size = res["content"][0]["size"]
self.download(res["content"][0]["link"])
else:
self.fail(res["message"])
else:
res = self.api_respond(
"item/details",
id=m.group("ID"),
apikey=self.account.info["login"]["password"],
)
if res.get("status") != "error":
self.pyfile.name = res["name"]
self.pyfile.size = res["size"]
self.download(res["link"])
else:
self.fail(res["message"])
|
migrations | 0028_alter_sleep_options_remove_sleep_napping_sleep_nap | # Generated by Django 4.2 on 2023-05-07 00:28
from django.db import migrations, models
from django.utils import timezone
def set_sleep_nap_values(apps, schema_editor):
# The model must be imported to ensure its overridden `save` method is run.
from core.models import Sleep
for sleep in Sleep.objects.all():
sleep.nap = (
Sleep.settings.nap_start_min
<= timezone.localtime(sleep.start).time()
<= Sleep.settings.nap_start_max
)
sleep.save()
class Migration(migrations.Migration):
dependencies = [
("core", "0027_alter_timer_options_remove_timer_duration_and_more"),
("dbsettings", "0001_initial"),
]
operations = [
migrations.AlterModelOptions(
name="sleep",
options={
"default_permissions": ("view", "add", "change", "delete"),
"ordering": ["-start"],
"permissions": [("can_edit_sleep_settings", "Can edit Sleep settings")],
"verbose_name": "Sleep",
"verbose_name_plural": "Sleep",
},
),
migrations.RemoveField(
model_name="sleep",
name="napping",
),
migrations.AddField(
model_name="sleep",
name="nap",
field=models.BooleanField(null=True, verbose_name="Nap"),
),
migrations.RunPython(
set_sleep_nap_values, reverse_code=migrations.RunPython.noop
),
migrations.AlterField(
model_name="sleep",
name="nap",
field=models.BooleanField(blank=True, verbose_name="Nap"),
),
]
|
femtaskpanels | task_constraint_currentdensity | # ***************************************************************************
# * Copyright (c) 2023 Uwe Stöhr <uwestoehr@lyx.org> *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
__title__ = "FreeCAD FEM constraint current density task panel for the document object"
__author__ = "Uwe Stöhr"
__url__ = "https://www.freecad.org"
## @package task_constraint_currentdensity
# \ingroup FEM
# \brief task panel for constraint current density object
import FreeCAD
import FreeCADGui
from femguiutils import selection_widgets
from femtools import femutils, membertools
class _TaskPanel(object):
def __init__(self, obj):
self._obj = obj
self._paramWidget = FreeCADGui.PySideUic.loadUi(
FreeCAD.getHomePath() + "Mod/Fem/Resources/ui/CurrentDensity.ui"
)
self._initParamWidget()
# geometry selection widget
# start with Solid in list!
self._selectionWidget = selection_widgets.GeometryElementsSelection(
obj.References, ["Solid", "Face"], True, False
)
# form made from param and selection widget
self.form = [self._paramWidget, self._selectionWidget]
analysis = obj.getParentGroup()
self._mesh = None
self._part = None
if analysis is not None:
self._mesh = membertools.get_single_member(analysis, "Fem::FemMeshObject")
if self._mesh is not None:
self._part = femutils.get_part_to_mesh(self._mesh)
self._partVisible = None
self._meshVisible = None
def open(self):
if self._mesh is not None and self._part is not None:
self._meshVisible = self._mesh.ViewObject.isVisible()
self._partVisible = self._part.ViewObject.isVisible()
self._mesh.ViewObject.hide()
self._part.ViewObject.show()
def reject(self):
self._restoreVisibility()
FreeCADGui.ActiveDocument.resetEdit()
return True
def accept(self):
if self._obj.References != self._selectionWidget.references:
self._obj.References = self._selectionWidget.references
self._applyWidgetChanges()
self._obj.Document.recompute()
FreeCADGui.ActiveDocument.resetEdit()
self._restoreVisibility()
return True
def _restoreVisibility(self):
if self._mesh is not None and self._part is not None:
if self._meshVisible:
self._mesh.ViewObject.show()
else:
self._mesh.ViewObject.hide()
if self._partVisible:
self._part.ViewObject.show()
else:
self._part.ViewObject.hide()
def _initParamWidget(self):
self._paramWidget.realXQSB.setProperty("value", self._obj.CurrentDensity_re_1)
FreeCADGui.ExpressionBinding(self._paramWidget.realXQSB).bind(
self._obj, "CurrentDensity_re_1"
)
self._paramWidget.realYQSB.setProperty("value", self._obj.CurrentDensity_re_2)
FreeCADGui.ExpressionBinding(self._paramWidget.realYQSB).bind(
self._obj, "CurrentDensity_re_2"
)
self._paramWidget.realZQSB.setProperty("value", self._obj.CurrentDensity_re_3)
FreeCADGui.ExpressionBinding(self._paramWidget.realZQSB).bind(
self._obj, "CurrentDensity_re_3"
)
self._paramWidget.imagXQSB.setProperty("value", self._obj.CurrentDensity_im_1)
FreeCADGui.ExpressionBinding(self._paramWidget.imagXQSB).bind(
self._obj, "CurrentDensity_im_1"
)
self._paramWidget.imagYQSB.setProperty("value", self._obj.CurrentDensity_im_2)
FreeCADGui.ExpressionBinding(self._paramWidget.imagYQSB).bind(
self._obj, "CurrentDensity_im_2"
)
self._paramWidget.imagZQSB.setProperty("value", self._obj.CurrentDensity_im_3)
FreeCADGui.ExpressionBinding(self._paramWidget.imagZQSB).bind(
self._obj, "CurrentDensity_im_3"
)
self._paramWidget.reXunspecBox.setChecked(
self._obj.CurrentDensity_re_1_Disabled
)
self._paramWidget.reYunspecBox.setChecked(
self._obj.CurrentDensity_re_2_Disabled
)
self._paramWidget.reZunspecBox.setChecked(
self._obj.CurrentDensity_re_3_Disabled
)
self._paramWidget.imXunspecBox.setChecked(
self._obj.CurrentDensity_im_1_Disabled
)
self._paramWidget.imYunspecBox.setChecked(
self._obj.CurrentDensity_im_2_Disabled
)
self._paramWidget.imZunspecBox.setChecked(
self._obj.CurrentDensity_im_3_Disabled
)
def _applyCurrentDensityChanges(self, enabledBox, currentDensityQSB):
enabled = enabledBox.isChecked()
currentdensity = None
try:
currentdensity = currentDensityQSB.property("value")
except ValueError:
FreeCAD.Console.PrintMessage(
"Wrong input. Not recognised input: '{}' "
"Current density has not been set.\n".format(currentDensityQSB.text())
)
currentdensity = "0.0 A/m^2"
return enabled, currentdensity
def _applyWidgetChanges(self):
# apply the current densities and their enabled state
(
self._obj.CurrentDensity_re_1_Disabled,
self._obj.CurrentDensity_re_1,
) = self._applyCurrentDensityChanges(
self._paramWidget.reXunspecBox, self._paramWidget.realXQSB
)
(
self._obj.CurrentDensity_re_2_Disabled,
self._obj.CurrentDensity_re_2,
) = self._applyCurrentDensityChanges(
self._paramWidget.reYunspecBox, self._paramWidget.realYQSB
)
(
self._obj.CurrentDensity_re_3_Disabled,
self._obj.CurrentDensity_re_3,
) = self._applyCurrentDensityChanges(
self._paramWidget.reZunspecBox, self._paramWidget.realZQSB
)
(
self._obj.CurrentDensity_im_1_Disabled,
self._obj.CurrentDensity_im_1,
) = self._applyCurrentDensityChanges(
self._paramWidget.imXunspecBox, self._paramWidget.imagXQSB
)
(
self._obj.CurrentDensity_im_2_Disabled,
self._obj.CurrentDensity_im_2,
) = self._applyCurrentDensityChanges(
self._paramWidget.imYunspecBox, self._paramWidget.imagYQSB
)
(
self._obj.CurrentDensity_im_3_Disabled,
self._obj.CurrentDensity_im_3,
) = self._applyCurrentDensityChanges(
self._paramWidget.imZunspecBox, self._paramWidget.imagZQSB
)
|
community | cache | from __future__ import annotations
from asyncio import Future
from typing import TYPE_CHECKING
from ipv8.requestcache import RandomNumberCache
from tribler.core.components.bandwidth_accounting.db.transaction import (
BandwidthTransactionData,
)
if TYPE_CHECKING:
from tribler.core.components.bandwidth_accounting.community.bandwidth_accounting_community import (
BandwidthAccountingCommunity,
)
class BandwidthTransactionSignCache(RandomNumberCache):
"""
This cache keeps track of pending bandwidth transaction signature requests.
"""
def __init__(
self,
community: BandwidthAccountingCommunity,
transaction: BandwidthTransactionData,
) -> None:
"""
Initialize the cache.
:param community: The bandwidth community associated with this cache.
:param transaction: The transaction that will be signed by the counterparty.
"""
super().__init__(community.request_cache, "bandwidth-tx-sign")
self.transaction = transaction
self.future = Future()
self.register_future(self.future)
@property
def timeout_delay(self) -> float:
"""
:return The timeout of this sign cache, defaults to one hour.
"""
return 3600.0
def on_timeout(self) -> None:
"""
Invoked when the cache times out.
"""
self._logger.info(
"Sign request for transaction %s timed out!", self.transaction
)
|
gui-qt | add_translation_widget | from collections import namedtuple
from html import escape as html_escape
from os.path import split as os_path_split
from plover import _
from plover.engine import StartingStrokeState
from plover.formatting import RetroFormatter
from plover.gui_qt.add_translation_widget_ui import Ui_AddTranslationWidget
from plover.gui_qt.steno_validator import StenoValidator
from plover.misc import shorten_path
from plover.resource import resource_filename
from plover.steno import normalize_steno, sort_steno_strokes
from plover.translation import escape_translation, unescape_translation
from PyQt5.QtCore import QEvent, pyqtSignal
from PyQt5.QtWidgets import QApplication, QWidget
class AddTranslationWidget(QWidget, Ui_AddTranslationWidget):
# i18n: Widget: “AddTranslationWidget”, tooltip.
__doc__ = _("Add a new translation to the dictionary.")
EngineState = namedtuple(
"EngineState", "dictionary_filter translator starting_stroke"
)
mappingValid = pyqtSignal(bool)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setupUi(self)
engine = QApplication.instance().engine
self._engine = engine
self._dictionaries = []
self._reverse_order = False
self._selected_dictionary = None
self._mapping_is_valid = False
engine.signal_connect("config_changed", self.on_config_changed)
self.on_config_changed(engine.config)
engine.signal_connect("dictionaries_loaded", self.on_dictionaries_loaded)
self.on_dictionaries_loaded(self._engine.dictionaries)
self._special_fmt = '<span style="' + "font-family:monospace;" + '">%s</span>'
self._special_fmt_bold = (
'<span style="'
+ "font-family:monospace;"
+ "font-weight:bold;"
+ '">%s</span>'
)
self.strokes.setValidator(StenoValidator())
self.strokes.installEventFilter(self)
self.translation.installEventFilter(self)
with engine:
# Pre-populate the strokes or translations with last stroke/word.
last_translations = engine.translator_state.translations
translation = None
for t in reversed(last_translations):
# Find the last undoable stroke.
if t.has_undo():
translation = t
break
# Is it a raw stroke?
if translation is not None and not translation.english:
# Yes.
self.strokes.setText(translation.formatting[0].text)
self.on_strokes_edited()
self.strokes.selectAll()
else:
# No, grab the last-formatted word.
retro_formatter = RetroFormatter(last_translations)
last_words = retro_formatter.last_words(strip=True)
if last_words:
self.translation.setText(last_words[0])
self.on_translation_edited()
self._original_state = self.EngineState(
None, engine.translator_state, engine.starting_stroke_state
)
engine.clear_translator_state()
self._strokes_state = self.EngineState(
self._dictionary_filter,
engine.translator_state,
StartingStrokeState(True, False, "/"),
)
engine.clear_translator_state()
self._translations_state = self.EngineState(
None, engine.translator_state, StartingStrokeState(True, False, " ")
)
self._engine_state = self._original_state
self._focus = None
@property
def mapping_is_valid(self):
return self._mapping_is_valid
def select_dictionary(self, dictionary_path):
self._selected_dictionary = dictionary_path
self._update_items()
def eventFilter(self, watched, event):
if event.type() == QEvent.FocusIn:
if watched == self.strokes:
self._focus_strokes()
elif watched == self.translation:
self._focus_translation()
elif event.type() == QEvent.FocusOut:
if watched in (self.strokes, self.translation):
self._unfocus()
return False
def _set_engine_state(self, state):
with self._engine as engine:
prev_state = self._engine_state
if prev_state is not None and prev_state.dictionary_filter is not None:
engine.remove_dictionary_filter(prev_state.dictionary_filter)
engine.translator_state = state.translator
engine.starting_stroke_state = state.starting_stroke
if state.dictionary_filter is not None:
engine.add_dictionary_filter(state.dictionary_filter)
self._engine_state = state
@staticmethod
def _dictionary_filter(key, value):
# Allow undo...
if value == "=undo":
return False
# ...and translations with special entries. Do this by looking for
# braces but take into account escaped braces and slashes.
escaped = value.replace("\\\\", "").replace("\\{", "")
special = "{#" in escaped or "{PLOVER:" in escaped
return not special
def _unfocus(self):
self._unfocus_strokes()
self._unfocus_translation()
def _focus_strokes(self):
if self._focus == "strokes":
return
self._unfocus_translation()
self._set_engine_state(self._strokes_state)
self._focus = "strokes"
def _unfocus_strokes(self):
if self._focus != "strokes":
return
self._set_engine_state(self._original_state)
self._focus = None
def _focus_translation(self):
if self._focus == "translation":
return
self._unfocus_strokes()
self._set_engine_state(self._translations_state)
self._focus = "translation"
def _unfocus_translation(self):
if self._focus != "translation":
return
self._set_engine_state(self._original_state)
self._focus = None
def _strokes(self):
strokes = self.strokes.text().strip()
has_prefix = strokes.startswith("/")
strokes = "/".join(strokes.replace("/", " ").split())
if has_prefix:
strokes = "/" + strokes
strokes = normalize_steno(strokes)
return strokes
def _translation(self):
translation = self.translation.text().strip()
return unescape_translation(translation)
def _update_items(self, dictionaries=None, reverse_order=None):
if dictionaries is not None:
self._dictionaries = dictionaries
if reverse_order is not None:
self._reverse_order = reverse_order
iterable = self._dictionaries
if self._reverse_order:
iterable = reversed(iterable)
self.dictionary.clear()
for d in iterable:
item = shorten_path(d.path)
if not d.enabled:
# i18n: Widget: “AddTranslationWidget”.
item = _("{dictionary} (disabled)").format(dictionary=item)
self.dictionary.addItem(item)
selected_index = 0
if self._selected_dictionary is None:
# No user selection, select first enabled dictionary.
for n, d in enumerate(self._dictionaries):
if d.enabled:
selected_index = n
break
else:
# Keep user selection.
for n, d in enumerate(self._dictionaries):
if d.path == self._selected_dictionary:
selected_index = n
break
if self._reverse_order:
selected_index = self.dictionary.count() - selected_index - 1
self.dictionary.setCurrentIndex(selected_index)
def on_dictionaries_loaded(self, dictionaries):
# We only care about loaded writable dictionaries.
dictionaries = [d for d in dictionaries.dicts if not d.readonly]
if dictionaries != self._dictionaries:
self._update_items(dictionaries=dictionaries)
def on_config_changed(self, config_update):
if "classic_dictionaries_display_order" in config_update:
self._update_items(
reverse_order=config_update["classic_dictionaries_display_order"]
)
def on_dictionary_selected(self, index):
if self._reverse_order:
index = len(self._dictionaries) - index - 1
self._selected_dictionary = self._dictionaries[index].path
def _format_label(self, fmt, strokes, translation=None, filename=None):
if strokes:
strokes = ", ".join(
self._special_fmt % html_escape("/".join(s))
for s in sort_steno_strokes(strokes)
)
if translation:
translation = self._special_fmt_bold % html_escape(
escape_translation(translation)
)
if filename:
filename = html_escape(filename)
return fmt.format(strokes=strokes, translation=translation, filename=filename)
def on_strokes_edited(self):
mapping_is_valid = self.strokes.hasAcceptableInput()
if mapping_is_valid != self._mapping_is_valid:
self._mapping_is_valid = mapping_is_valid
self.mappingValid.emit(mapping_is_valid)
if not mapping_is_valid:
return
strokes = self._strokes()
if strokes:
translations = self._engine.raw_lookup_from_all(strokes)
if translations:
# i18n: Widget: “AddTranslationWidget”.
info = self._format_label(_("{strokes} maps to "), (strokes,))
entries = [
self._format_label(
("• " if i else "") + "<bf>{translation}<bf/>\t({filename})",
None,
translation,
os_path_split(resource_filename(dictionary.path))[1],
)
for i, (translation, dictionary) in enumerate(translations)
]
if len(entries) > 1:
# i18n: Widget: “AddTranslationWidget”.
entries.insert(1, "<br />" + _("Overwritten entries:"))
info += "<br />".join(entries)
else:
info = self._format_label(
# i18n: Widget: “AddTranslationWidget”.
_("{strokes} is not mapped in any dictionary"),
(strokes,),
)
else:
info = ""
self.strokes_info.setText(info)
def on_translation_edited(self):
translation = self._translation()
if translation:
strokes = self._engine.reverse_lookup(translation)
if strokes:
# i18n: Widget: “AddTranslationWidget”.
fmt = _("{translation} is mapped to: {strokes}")
else:
# i18n: Widget: “AddTranslationWidget”.
fmt = _("{translation} is not in the dictionary")
info = self._format_label(fmt, strokes, translation)
else:
info = ""
self.translation_info.setText(info)
def save_entry(self):
self._unfocus()
strokes = self._strokes()
translation = self._translation()
if strokes and translation:
index = self.dictionary.currentIndex()
if self._reverse_order:
index = -index - 1
dictionary = self._dictionaries[index]
old_translation = self._engine.dictionaries[dictionary.path].get(strokes)
self._engine.add_translation(
strokes, translation, dictionary_path=dictionary.path
)
return dictionary, strokes, old_translation, translation
def reject(self):
self._unfocus()
self._set_engine_state(self._original_state)
|
declaration | option | # -*- coding: utf-8 -*-
"""This module defines Option class and its helpers.
"""
from __future__ import annotations
import enum
from typing import Any, Generic, Optional, TypeVar, cast
from ckan.types import Validator, ValidatorFactory
from typing_extensions import Self
T = TypeVar("T")
class SectionMixin:
"""Mixin that allows adding objects to different sections of INI-file."""
_section = "app:main"
def set_section(self, section: str) -> Self:
"""Change the section of this annotation"""
self._section = section
return self
class Flag(enum.Flag):
"""Modifiers for :py:class:`~ckan.config.declaration.option.Option`
ignored: this option is ignored by CKAN(not used or unconditionally
overriden)
experimental: this option is not stabilized and can change in
future. Mainly exist for extension developers, as only stable features are
included into public CKAN release.
internal: this option is used internally by CKAN or Flask. Such options are
not documented and are not supposed to be modified by users. Think about
them as private class attributes.
required: this option cannot be missing/empty. Add such flag to the option
only if CKAN application won't even start without them and there is no
sensible default.
editable: this option is runtime editable. Technically, every option can be
modified. This flag means that there is an expectation that option will be
modified. For example, this option is exposed via configuration form in the
Admin UI.
commented: this option is commented by default in the config file. Use it
for optional settings that may break the application when default value is
used. Example of such option is a cookie domain. When it's missing, the
current domain is used, so this value is optional. But if you try to
provide default value, `example.com` or any other domain, CKAN
authentication will not work as long as application runs on different
domain. While it's similar to `placeholder` attribute of the
:py:class:`~ckan.config.declaration.option.Option`, their goals are
different.
Option.placeholer:
- shows an example of expectend value
- is ignored when config option is **missing** from the config file
- shown as a default value in the config file generated from template. For
example, `Option<key=a, placeholder=b, commented=False>` is added to the
config file as `a = b`. After this, `config.get('a')` returns `b`,
because it's explicitely written in the config file.
Flag.commented:
- Marks option as commented by default
- Does not changes behavior of `Option.default` and `Option.placeholder`.
- switches option to the commented state in the config file generated from
template. For example, `Option<key=a, placeholder=b, commented=True>` is
added to the config file as `# a = b`. After this,
`config.get('a')` returns `None`, because there is no option `a` in
the config file(it's commented, which makes this option non-existing)
If the option is missing from the config file, both `placeholder` and
`commented` are virtually ignored, having absolutely no impact on the value
of the config option.
reserved_*(01-10): these flags are added for extension developers. CKAN
doesn't treat them specially, neither includes them in groups, like
`not_safe`/`not_iterable`. These flags are completely ignored by CKAN. If
your extension enchances the behavior of config options using some sort of
boolean flags - use reserved markers. Always rely on a config option that
controls, which reserved marker to use, in order to avoid conflicts with
other extensions. Example:
>>> # BAD
>>> marker = Flag.reserved_02
>>>
>>> # GOOD
>>> ### config file
>>> # my_extension.feature_flag = reserved_02
>>> key = config.get('my_extension.feature_flag')
>>> marker = Flag[key]
This allows the end user to manually solve conflicts, when multiple
extensions are trying to use the same reserved flag.
"""
ignored = enum.auto()
experimental = enum.auto()
internal = enum.auto()
required = enum.auto()
editable = enum.auto()
commented = enum.auto()
reserved_01 = enum.auto()
reserved_02 = enum.auto()
reserved_03 = enum.auto()
reserved_04 = enum.auto()
reserved_05 = enum.auto()
reserved_06 = enum.auto()
reserved_07 = enum.auto()
reserved_08 = enum.auto()
reserved_09 = enum.auto()
reserved_10 = enum.auto()
@classmethod
def none(cls):
"""Return the base flag with no modifiers enabled."""
return cls(0)
@classmethod
def non_iterable(cls):
"""Return the union of flags that should not be iterated over.
If an option has any of these flags, it isn't listed by the majority of
serializers. For example, such option is not added to the documentation
and to the config template.
"""
return cls.ignored | cls.experimental | cls.internal
@classmethod
def not_safe(cls):
"""Return the union of flags marking an unsafe option.
It's never safe to use an unsafe option. For example, unsafe option
won't have any default value, so one should never try
`config[unsafe_option]`. Basically, unsafe options must be treated as
non-existing and never be used in code.
"""
return cls.ignored | cls.internal
class Annotation(SectionMixin, str):
"""Details that are not attached to any option.
Mainly serves documentation purposes. Can be used for creating section
separators or blocks of text with the recomendations, that are not
connected to any particular option and rather describle the whole section.
"""
pass
class Option(SectionMixin, Generic[T]):
"""All the known details about config option.
Option-objects are created from the config declaration and describe the
individual config options. They contain all the known details about config
option, such as default values, validators and visibility flags.
Avoid direct creation of Option-objects. Use corresponding
:py:class:`~ckan.config.declaration.Declaration` methods instead:
- declare
- declare_bool
- declare_int
- declare_list
- declare_dynamic
"""
__slots__ = (
"flags",
"default",
"description",
"validators",
"placeholder",
"example",
"legacy_key",
)
flags: Flag
default: Optional[T]
description: Optional[str]
placeholder: Optional[str]
example: Optional[Any]
validators: str
legacy_key: Optional[str]
def __init__(self, default: Optional[T] = None):
self.flags = Flag.none()
self.description = None
self.placeholder = None
self.example = None
self.validators = ""
self.default = default
self.legacy_key = None
def str_value(self) -> str:
"""Convert option's default value into the string.
If the option has `as_list` validator and default value is represented
by the Python's `list` object, result is a space-separated list of all
the members of the value. In other cases this method just does naive
string conversion.
If validators are doing complex transformations, for example string ID
turns into :py:class:`~ckan.model.User` object, this method won't
convert the user object back into ID. Instead it will just do something
like `str(user)` and give you `<User ...>`. So it's up to the person
who declares config option to add a combination of default value and
validators that won't throw an error after such conversion.
If more sophisticated logic cannot be avoided, consider creating a
subclass of :py:class:`~ckan.config.declaration.option.Option` with
custom `str_value` implemetation and declaring the option using
`declare_option` method of
:py:class:`~ckan.config.declaration.Declaration`.
"""
as_list = "as_list" in self.get_validators()
if isinstance(self.default, list) and as_list:
return " ".join(map(str, self.default))
return str(self.default) if self.has_default() else ""
def set_flag(self, flag: Flag) -> Self:
"""Enable specified flag."""
self.flags |= flag
return self
def has_flag(self, flag: Flag) -> bool:
"""Check if option has specified flag enabled."""
return bool(self.flags & flag)
def has_default(self) -> bool:
"""Check if option has configured default."""
return self.default is not None
def set_default(self, default: T) -> Self:
"""Change the default value of option.
The default value is used whenever the option is missing from the
config file.
"""
self.default = default
return self
def set_example(self, example: str) -> Self:
"""Provide an example(documentation) of the valid value for option."""
self.example = example
return self
def set_description(self, description: str) -> Self:
"""Change the description of option."""
self.description = description
return self
def set_placeholder(self, placeholder: str) -> Self:
"""Add a placeholder for option.
Placeholder is used during generation of the config template. It's
similar to the default value, because it will be shown in the generated
configuration file as a value for option. But, unlike the default
value, if the option is missing from the config file, no default value
is used.
Placeholder can be used for different kind of secrets and URLs, when
you want to show the user how the value should look like.
"""
self.placeholder = placeholder
return self
def set_validators(self, validators: str) -> Self:
"""Replace validators of the option.
Use a space-separated string with the names of validators that must be
applied to the value.
"""
self.validators = validators
return self
def append_validators(self, validators: str, before: bool = False) -> Self:
"""Add extra validators before or after the existing validators.
Use it together with `Declaration.declare_*` shortcuts in order to
define more specific common options::
>>> # Declare a mandatory boolean option
>>> declaration.declare_bool(...).append_validators(
"not_missing", before=True)
By default, validators are added after the existing validators. In
order to add a new validator before the other validators, pass
`before=True` argument.
"""
left = self.validators
right = validators
if before:
left, right = right, left
glue = " " if left and right else ""
self.validators = left + glue + right
return self
def get_validators(self) -> str:
"""Return the string with current validators."""
return self.validators
def experimental(self) -> Self:
"""Enable experimental-flag for value."""
self.set_flag(Flag.experimental)
return self
def required(self) -> Self:
"""Enable required-flag for value."""
self.set_flag(Flag.required)
return self
def normalize(self, value: Any) -> Any:
"""Return the value processed by option's validators."""
from ckan.lib.navl.dictization_functions import validate
data, _ = validate({"value": value}, {"value": self._parse_validators()})
return data.get("value")
def _parse_validators(self) -> list[Validator]:
"""Turn the string with validators into the list of functions."""
return _validators_from_string(self.get_validators())
# taken from ckanext-scheming
# (https://github.com/ckan/ckanext-scheming/blob/master/ckanext/scheming/validation.py#L332).
# This syntax is familiar for everyone and we can switch to the original
# when scheming become a part of core.
def _validators_from_string(s: str) -> list[Validator]:
"""
convert a schema validators string to a list of validators
e.g. "if_empty_same_as(name) unicode_safe" becomes:
[if_empty_same_as("name"), unicode_safe]
"""
import ast
from ckan.logic import get_validator
out = []
parts = s.split()
for p in parts:
if "(" in p and p[-1] == ")":
name, args = p.split("(", 1)
args = args[:-1] # trim trailing ')'
try:
parsed_args = ast.literal_eval(args)
if not isinstance(parsed_args, tuple) or not parsed_args:
# it's a signle argument. `not parsed_args` means that this
# single argument is an empty tuple,
# for example: "default(())"
parsed_args = (parsed_args,)
except (ValueError, TypeError, SyntaxError, MemoryError):
parsed_args = args.split(",")
factory = cast(ValidatorFactory, get_validator(name))
v = factory(*parsed_args)
else:
v = get_validator(p)
out.append(v)
return out
|
community | dispatcher | import random
from collections import defaultdict
from ipv8.messaging.anonymization.tunnel import (
CIRCUIT_ID_PORT,
CIRCUIT_STATE_READY,
CIRCUIT_TYPE_DATA,
CIRCUIT_TYPE_RP_DOWNLOADER,
CIRCUIT_TYPE_RP_SEEDER,
)
from ipv8.taskmanager import TaskManager, task
from tribler.core.components.socks_servers.socks5.conversion import (
UdpPacket,
socks5_serializer,
)
class TunnelDispatcher(TaskManager):
"""
This class is responsible for dispatching SOCKS5 traffic to the right circuits and vice versa.
This dispatcher acts as a "secondary" proxy between the SOCKS5 UDP session and the tunnel community.
"""
def __init__(self, tunnels):
super().__init__()
self.tunnels = tunnels
self.socks_servers = []
# Map to keep track of the circuits associated with each destination.
self.con_to_cir = defaultdict(dict)
# Map to keep track of the circuit id to UDP connection.
self.cid_to_con = {}
self.register_task("check_connections", self.check_connections, interval=30)
def set_socks_servers(self, socks_servers):
self.socks_servers = socks_servers
def on_incoming_from_tunnel(self, community, circuit, origin, data):
"""
We received some data from the tunnel community. Dispatch it to the right UDP SOCKS5 socket.
"""
if circuit.ctype in [CIRCUIT_TYPE_RP_DOWNLOADER, CIRCUIT_TYPE_RP_SEEDER]:
origin = (community.circuit_id_to_ip(circuit.circuit_id), CIRCUIT_ID_PORT)
try:
connection = self.cid_to_con[circuit.circuit_id]
except KeyError:
session_hops = (
circuit.goal_hops
if circuit.ctype != CIRCUIT_TYPE_RP_DOWNLOADER
else circuit.goal_hops - 1
)
if (
session_hops > len(self.socks_servers)
or not self.socks_servers[session_hops - 1].sessions
):
self._logger.error("No connection found for %d hops", session_hops)
return False
connection = next(
(
s
for s in self.socks_servers[session_hops - 1].sessions
if s.udp_connection and s.udp_connection.remote_udp_address
),
None,
)
if connection is None or connection.udp_connection is None:
self._logger.error(
"Connection has closed or has not gotten an UDP associate"
)
self.connection_dead(connection)
return False
packet = socks5_serializer.pack_serializable(UdpPacket(0, 0, origin, data))
connection.udp_connection.send_datagram(packet)
return True
def on_socks5_udp_data(self, udp_connection, request):
"""
We received some data from the SOCKS5 server (from the SOCKS5 client). This method
selects a circuit to send this data over to the final destination.
"""
connection = udp_connection.socksconnection
try:
circuit = self.con_to_cir[connection][request.destination]
except KeyError:
circuit = self.select_circuit(connection, request)
if circuit is None:
return False
if circuit.state != CIRCUIT_STATE_READY:
self._logger.debug(
"Circuit not ready, dropping %d bytes to %s",
len(request.data),
request.destination,
)
return False
self._logger.debug(
"Sending data over circuit %d destined for %r:%r",
circuit.circuit_id,
*request.destination,
)
self.tunnels.send_data(
circuit.peer,
circuit.circuit_id,
request.destination,
("0.0.0.0", 0),
request.data,
)
return True
@task
async def on_socks5_tcp_data(self, tcp_connection, destination, request):
self._logger.debug("Got request for %s: %s", destination, request)
hops = self.socks_servers.index(tcp_connection.socksserver) + 1
try:
response = await self.tunnels.perform_http_request(
destination, request, hops
)
self._logger.debug("Got response from %s: %s", destination, response)
except RuntimeError as e:
self._logger.info("Failed to get HTTP response using tunnels: %s", e)
return
if response:
tcp_connection.transport.write(response)
tcp_connection.transport.close()
def select_circuit(self, connection, request):
if request.destination[1] == CIRCUIT_ID_PORT:
circuit = self.tunnels.circuits.get(
self.tunnels.ip_to_circuit_id(request.destination[0])
)
if (
circuit
and circuit.state == CIRCUIT_STATE_READY
and circuit.ctype
in [CIRCUIT_TYPE_RP_DOWNLOADER, CIRCUIT_TYPE_RP_SEEDER]
):
return circuit
hops = self.socks_servers.index(connection.socksserver) + 1
options = [
c
for c in self.tunnels.circuits.values()
if c.goal_hops == hops
and c.state == CIRCUIT_STATE_READY
and c.ctype == CIRCUIT_TYPE_DATA
and self.cid_to_con.get(c.circuit_id, connection) == connection
]
if not options:
# We allow each connection to claim at least 1 circuit. If no such circuit exists we'll create one.
if connection in self.cid_to_con.values():
self._logger.debug(
"No circuit for sending data to %s", request.destination
)
return None
circuit = self.tunnels.create_circuit(goal_hops=hops)
if circuit is None:
self._logger.debug(
"Failed to create circuit for data to %s", request.destination
)
return None
self._logger.debug(
"Creating circuit for data to %s. Retrying later..", request.destination
)
self.cid_to_con[circuit.circuit_id] = connection
circuit.ready.add_done_callback(
lambda f,
c=connection.udp_connection,
r=request: self.on_socks5_udp_data(c, r) if f.result() else None
)
return None
circuit = random.choice(options)
self.cid_to_con[circuit.circuit_id] = connection
self.con_to_cir[connection][request.destination] = circuit
self._logger.debug(
"Select circuit %d for %s", circuit.circuit_id, request.destination
)
return circuit
def circuit_dead(self, broken_circuit):
"""
When a circuit dies, we update the destinations dictionary and remove all peers that are affected.
"""
con = self.cid_to_con.pop(broken_circuit.circuit_id, None)
destinations = set()
destination_to_circuit = self.con_to_cir.get(con, {})
for destination, circuit in list(destination_to_circuit.items()):
if circuit == broken_circuit:
destination_to_circuit.pop(destination, None)
destinations.add(destination)
self._logger.debug("Deleted %d peers from destination list", len(destinations))
return destinations
def connection_dead(self, connection):
self.con_to_cir.pop(connection, None)
for cid, con in list(self.cid_to_con.items()):
if con == connection:
self.cid_to_con.pop(cid, None)
self._logger.error("Detected closed connection")
def check_connections(self):
for connection in list(self.cid_to_con.values()):
if not connection.udp_connection:
self.connection_dead(connection)
|
extractor | blerp | # coding: utf-8
from __future__ import unicode_literals
import json
from ..utils import strip_or_none, traverse_obj
from .common import InfoExtractor
class BlerpIE(InfoExtractor):
IE_NAME = "blerp"
_VALID_URL = r"https?://(?:www\.)?blerp\.com/soundbites/(?P<id>[0-9a-zA-Z]+)"
_TESTS = [
{
"url": "https://blerp.com/soundbites/6320fe8745636cb4dd677a5a",
"info_dict": {
"id": "6320fe8745636cb4dd677a5a",
"title": "Samsung Galaxy S8 Over the Horizon Ringtone 2016",
"uploader": "luminousaj",
"uploader_id": "5fb81e51aa66ae000c395478",
"ext": "mp3",
"tags": [
"samsung",
"galaxy",
"s8",
"over the horizon",
"2016",
"ringtone",
],
},
},
{
"url": "https://blerp.com/soundbites/5bc94ef4796001000498429f",
"info_dict": {
"id": "5bc94ef4796001000498429f",
"title": "Yee",
"uploader": "179617322678353920",
"uploader_id": "5ba99cf71386730004552c42",
"ext": "mp3",
"tags": ["YEE", "YEET", "wo ha haah catchy tune yee", "yee"],
},
},
]
_GRAPHQL_OPERATIONNAME = "webBitePageGetBite"
_GRAPHQL_QUERY = """query webBitePageGetBite($_id: MongoID!) {
web {
biteById(_id: $_id) {
...bitePageFrag
__typename
}
__typename
}
}
fragment bitePageFrag on Bite {
_id
title
userKeywords
keywords
color
visibility
isPremium
owned
price
extraReview
isAudioExists
image {
filename
original {
url
__typename
}
__typename
}
userReactions {
_id
reactions
createdAt
__typename
}
topReactions
totalSaveCount
saved
blerpLibraryType
license
licenseMetaData
playCount
totalShareCount
totalFavoriteCount
totalAddedToBoardCount
userCategory
userAudioQuality
audioCreationState
transcription
userTranscription
description
createdAt
updatedAt
author
listingType
ownerObject {
_id
username
profileImage {
filename
original {
url
__typename
}
__typename
}
__typename
}
transcription
favorited
visibility
isCurated
sourceUrl
audienceRating
strictAudienceRating
ownerId
reportObject {
reportedContentStatus
__typename
}
giphy {
mp4
gif
__typename
}
audio {
filename
original {
url
__typename
}
mp3 {
url
__typename
}
__typename
}
__typename
}
"""
def _real_extract(self, url):
audio_id = self._match_id(url)
data = {
"operationName": self._GRAPHQL_OPERATIONNAME,
"query": self._GRAPHQL_QUERY,
"variables": {"_id": audio_id},
}
headers = {"Content-Type": "application/json"}
json_result = self._download_json(
"https://api.blerp.com/graphql",
audio_id,
data=json.dumps(data).encode("utf-8"),
headers=headers,
)
bite_json = json_result["data"]["web"]["biteById"]
info_dict = {
"id": bite_json["_id"],
"url": bite_json["audio"]["mp3"]["url"],
"title": bite_json["title"],
"uploader": traverse_obj(
bite_json, ("ownerObject", "username"), expected_type=strip_or_none
),
"uploader_id": traverse_obj(
bite_json, ("ownerObject", "_id"), expected_type=strip_or_none
),
"ext": "mp3",
"tags": list(
filter(
None,
map(
strip_or_none,
(
traverse_obj(bite_json, "userKeywords", expected_type=list)
or []
),
),
)
or None
),
}
return info_dict
|
views | social | """First cut at a notification system."""
from __future__ import annotations
from abilian.core.extensions import csrf, db
from abilian.core.models.subjects import User
from abilian.i18n import render_template_i18n
from abilian.sbe.apps.communities.security import require_admin
from abilian.sbe.apps.notifications import TOKEN_SERIALIZER_NAME
from abilian.services.auth.views import get_token_status
from flask import current_app as app
from flask import request
from flask_login import current_user
from werkzeug.exceptions import MethodNotAllowed
from ..tasks.social import make_message, send_daily_social_digest_to
from . import notifications
__all__ = ()
route = notifications.route
@require_admin
@route("/debug/social/")
def debug_social():
"""Send a digest to current user, or user with given email.
Also displays the email in the browser as a result.
"""
email = request.args.get("email")
if email:
user = User.query.filter(User.email == email).one()
else:
user = current_user
msg = make_message(user)
status = send_daily_social_digest_to(user)
if status:
return msg.html
else:
return "No message sent."
@route("/unsubscribe_sbe/<token>/", methods=["GET", "POST"])
@csrf.exempt
def unsubscribe_sbe(token: str) -> str:
expired, invalid, user = get_token_status(token, TOKEN_SERIALIZER_NAME)
if expired or invalid:
return render_template_i18n("notifications/invalid-token.html")
if request.method == "GET":
return render_template_i18n(
"notifications/confirm-unsubscribe.html", token=token
)
elif request.method == "POST":
preferences = app.services["preferences"]
preferences.set_preferences(user, **{"sbe:notifications:daily": False})
db.session.commit()
return render_template_i18n("notifications/unsubscribed.html", token=token)
else:
raise MethodNotAllowed()
|
builtinViews | implantEditor | import re
import gui.builtinMarketBrowser.pfSearchBox as SBox
import gui.display as d
# noinspection PyPackageRequirements
import wx
from gui.bitmap_loader import BitmapLoader
from gui.marketBrowser import SearchBox
from service.market import Market
# noinspection PyPackageRequirements
from wx.lib.buttons import GenBitmapButton
def stripHtml(text):
text = re.sub("<\s*br\s*/?\s*>", "\n", text)
text = re.sub("</?[^/]+?(/\s*)?>", "", text)
return text
class BaseImplantEditorView(wx.Panel):
def addMarketViewImage(self, iconFile):
if iconFile is None:
return -1
bitmap = BitmapLoader.getBitmap(iconFile, "icons")
if bitmap is None:
return -1
else:
return self.availableImplantsImageList.Add(bitmap)
def __init__(self, parent):
wx.Panel.__init__(
self,
parent,
id=wx.ID_ANY,
pos=wx.DefaultPosition,
size=wx.DefaultSize,
style=wx.TAB_TRAVERSAL,
)
self.SetBackgroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOW))
pmainSizer = wx.BoxSizer(wx.HORIZONTAL)
availableSizer = wx.BoxSizer(wx.VERTICAL)
self.searchBox = SearchBox(self)
self.itemView = ItemView(self)
self.itemView.Hide()
availableSizer.Add(self.searchBox, 0, wx.EXPAND)
availableSizer.Add(self.itemView, 1, wx.EXPAND)
self.availableImplantsTree = wx.TreeCtrl(
self, wx.ID_ANY, style=wx.TR_DEFAULT_STYLE | wx.TR_HIDE_ROOT
)
root = self.availableRoot = self.availableImplantsTree.AddRoot("Available")
self.availableImplantsImageList = wx.ImageList(16, 16)
self.availableImplantsTree.SetImageList(self.availableImplantsImageList)
availableSizer.Add(self.availableImplantsTree, 1, wx.EXPAND)
pmainSizer.Add(availableSizer, 1, wx.ALL | wx.EXPAND, 5)
buttonSizer = wx.BoxSizer(wx.VERTICAL)
buttonSizer.AddStretchSpacer()
self.btnAdd = GenBitmapButton(
self,
wx.ID_ADD,
BitmapLoader.getBitmap("fit_add_small", "gui"),
style=wx.BORDER_NONE,
)
buttonSizer.Add(self.btnAdd, 0)
self.btnRemove = GenBitmapButton(
self,
wx.ID_REMOVE,
BitmapLoader.getBitmap("fit_delete_small", "gui"),
style=wx.BORDER_NONE,
)
buttonSizer.Add(self.btnRemove, 0)
buttonSizer.AddStretchSpacer()
pmainSizer.Add(buttonSizer, 0, wx.EXPAND, 0)
characterImplantSizer = wx.BoxSizer(wx.VERTICAL)
self.pluggedImplantsTree = AvailableImplantsView(self)
characterImplantSizer.Add(self.pluggedImplantsTree, 1, wx.ALL | wx.EXPAND, 5)
pmainSizer.Add(characterImplantSizer, 1, wx.EXPAND, 5)
self.SetSizer(pmainSizer)
self.hoveredLeftTreeTypeID = None
self.hoveredRightListRow = None
# Populate the market tree
sMkt = Market.getInstance()
for mktGrp in sMkt.getImplantTree():
iconId = self.addMarketViewImage(sMkt.getIconByMarketGroup(mktGrp))
childId = self.availableImplantsTree.AppendItem(
root, mktGrp.name, iconId, data=mktGrp.ID
)
if sMkt.marketGroupHasTypesCheck(mktGrp) is False:
self.availableImplantsTree.AppendItem(childId, "dummy")
self.availableImplantsTree.SortChildren(self.availableRoot)
# Bind the event to replace dummies by real data
self.availableImplantsTree.Bind(wx.EVT_TREE_ITEM_EXPANDING, self.expandLookup)
self.availableImplantsTree.Bind(wx.EVT_TREE_ITEM_ACTIVATED, self.itemSelected)
self.availableImplantsTree.Bind(wx.EVT_MOTION, self.OnLeftTreeMouseMove)
self.availableImplantsTree.Bind(wx.EVT_LEAVE_WINDOW, self.OnLeftTreeMouseLeave)
self.itemView.Bind(wx.EVT_LIST_ITEM_ACTIVATED, self.itemSelected)
self.pluggedImplantsTree.Bind(wx.EVT_MOTION, self.OnRightListMouseMove)
# Bind add & remove buttons
self.btnAdd.Bind(wx.EVT_BUTTON, self.itemSelected)
self.btnRemove.Bind(wx.EVT_BUTTON, self.removeItem)
# We update with an empty list first to set the initial size for Layout(), then update later with actual
# implants for character. This helps with sizing issues.
self.pluggedImplantsTree.update([])
self.bindContext()
self.Layout()
self.update()
def bindContext(self):
# Binds self.contextChanged to whatever changes the context
raise NotImplementedError()
def getImplantsFromContext(self):
"""Gets list of implants from current context"""
raise NotImplementedError()
def addImplantToContext(self, item):
"""Adds implant to the current context"""
raise NotImplementedError()
def removeImplantFromContext(self, implant):
"""Removes implant from the current context"""
raise NotImplementedError()
def update(self):
"""Updates implant list based off the current context"""
self.implants = self.getImplantsFromContext()[:]
self.implants.sort(key=lambda i: int(i.getModifiedItemAttr("implantness")))
self.pluggedImplantsTree.update(self.implants)
def contextChanged(self, event):
self.update()
event.Skip()
def expandLookup(self, event):
tree = self.availableImplantsTree
sMkt = Market.getInstance()
parent = event.Item
child, _ = tree.GetFirstChild(parent)
text = tree.GetItemText(child)
if text == "dummy" or text == "itemdummy":
tree.Delete(child)
# if the dummy item is a market group, replace with actual market groups
if text == "dummy":
# Add 'real stoof!' instead
currentMktGrp = sMkt.getMarketGroup(
tree.GetItemData(parent), eager="children"
)
for childMktGrp in sMkt.getMarketGroupChildren(currentMktGrp):
iconId = self.addMarketViewImage(sMkt.getIconByMarketGroup(childMktGrp))
childId = tree.AppendItem(
parent, childMktGrp.name, iconId, data=childMktGrp.ID
)
if sMkt.marketGroupHasTypesCheck(childMktGrp) is False:
tree.AppendItem(childId, "dummy")
else:
tree.AppendItem(childId, "itemdummy")
# replace dummy with actual items
if text == "itemdummy":
currentMktGrp = sMkt.getMarketGroup(tree.GetItemData(parent))
items = sMkt.getItemsByMarketGroup(currentMktGrp)
for item in items:
iconId = self.addMarketViewImage(item.iconID)
tree.AppendItem(parent, item.name, iconId, data=item)
tree.SortChildren(parent)
def itemSelected(self, event):
if event.EventObject is self.btnAdd:
# janky fix that sets EventObject so that we don't have similar code elsewhere.
if self.itemView.IsShown():
event.EventObject = self.itemView
else:
event.EventObject = self.availableImplantsTree
if event.EventObject is self.itemView:
curr = event.EventObject.GetFirstSelected()
while curr != -1:
item = self.itemView.items[curr]
self.addImplantToContext(item)
curr = event.EventObject.GetNextSelected(curr)
else:
root = self.availableImplantsTree.GetSelection()
if not root.IsOk():
return
nchilds = self.availableImplantsTree.GetChildrenCount(root)
if nchilds == 0:
item = self.availableImplantsTree.GetItemData(root)
self.addImplantToContext(item)
else:
event.Skip()
return
self.update()
def removeItem(self, event):
pos = self.pluggedImplantsTree.GetFirstSelected()
if pos != -1:
self.removeImplantFromContext(self.implants[pos])
self.update()
# Due to https://github.com/wxWidgets/Phoenix/issues/1372 we cannot set tooltips on
# tree itself; work this around with following two methods, by setting tooltip to
# parent window
def OnLeftTreeMouseMove(self, event):
event.Skip()
treeItemId, _ = self.availableImplantsTree.HitTest(event.Position)
if not treeItemId:
if self.hoveredLeftTreeTypeID is not None:
self.hoveredLeftTreeTypeID = None
self.SetToolTip(None)
return
item = self.availableImplantsTree.GetItemData(treeItemId)
isImplant = getattr(item, "isImplant", False)
if not isImplant:
if self.hoveredLeftTreeTypeID is not None:
self.hoveredLeftTreeTypeID = None
self.SetToolTip(None)
return
if self.hoveredLeftTreeTypeID == item.ID:
return
if self.ToolTip is not None:
self.SetToolTip(None)
else:
self.hoveredLeftTreeTypeID = item.ID
toolTip = wx.ToolTip(stripHtml(item.description))
toolTip.SetMaxWidth(self.GetSize().Width)
self.SetToolTip(toolTip)
def OnLeftTreeMouseLeave(self, event):
event.Skip()
self.SetToolTip(None)
def OnRightListMouseMove(self, event):
event.Skip()
row, _, col = self.pluggedImplantsTree.HitTestSubItem(event.Position)
if row != self.hoveredRightListRow:
if self.pluggedImplantsTree.ToolTip is not None:
self.pluggedImplantsTree.SetToolTip(None)
else:
self.hoveredRightListRow = row
try:
implant = self.implants[row]
except IndexError:
self.pluggedImplantsTree.SetToolTip(None)
else:
toolTip = wx.ToolTip(stripHtml(implant.item.description))
toolTip.SetMaxWidth(self.pluggedImplantsTree.GetSize().Width)
self.pluggedImplantsTree.SetToolTip(toolTip)
class AvailableImplantsView(d.Display):
DEFAULT_COLS = ["attr:implantness", "Base Name"]
def __init__(self, parent):
d.Display.__init__(self, parent, style=wx.LC_SINGLE_SEL)
self.Bind(wx.EVT_LEFT_DCLICK, parent.removeItem)
class ItemView(d.Display):
DEFAULT_COLS = ["Base Icon", "Base Name"]
def __init__(self, parent):
d.Display.__init__(self, parent)
self.parent = parent
self.searchBox = parent.searchBox
self.hoveredRow = None
self.items = []
# Bind search actions
self.searchBox.Bind(SBox.EVT_TEXT_ENTER, self.scheduleSearch)
self.searchBox.Bind(SBox.EVT_SEARCH_BTN, self.scheduleSearch)
self.searchBox.Bind(SBox.EVT_CANCEL_BTN, self.clearSearch)
self.searchBox.Bind(SBox.EVT_TEXT, self.scheduleSearch)
self.Bind(wx.EVT_MOTION, self.OnMouseMove)
def clearSearch(self, event=None):
if self.IsShown():
self.parent.availableImplantsTree.Show()
self.Hide()
self.parent.Layout()
if event:
self.searchBox.Clear()
self.items = []
self.update(self.items)
def scheduleSearch(self, event=None):
sMkt = Market.getInstance()
search = self.searchBox.GetLineText(0)
# Make sure we do not count wildcards as search symbol
realsearch = search.replace("*", "").replace("?", "")
# Show nothing if query is too short
if len(realsearch) < 3:
self.clearSearch()
return
sMkt.searchItems(search, self.populateSearch, "implants")
def populateSearch(self, itemIDs):
if not self.IsShown():
self.parent.availableImplantsTree.Hide()
self.Show()
self.parent.Layout()
items = Market.getItems(itemIDs)
items = [i for i in items if i.group.name != "Booster"]
self.items = sorted(list(items), key=lambda i: i.name)
self.update(self.items)
def OnMouseMove(self, event):
event.Skip()
row, _, col = self.HitTestSubItem(event.Position)
if row != self.hoveredRow:
if self.ToolTip is not None:
self.SetToolTip(None)
else:
self.hoveredRow = row
try:
item = self.items[row]
except IndexError:
self.SetToolTip(None)
else:
toolTip = wx.ToolTip(stripHtml(item.description))
toolTip.SetMaxWidth(self.GetSize().Width)
self.SetToolTip(toolTip)
|
group | group | from django.db import models
class Group(models.Model):
class Meta:
constraints = [
models.UniqueConstraint(
fields=["team_id", "group_key", "group_type_index"],
name="unique team_id/group_key/group_type_index combo",
)
]
team: models.ForeignKey = models.ForeignKey("Team", on_delete=models.CASCADE)
group_key: models.CharField = models.CharField(
max_length=400, null=False, blank=False
)
group_type_index: models.IntegerField = models.IntegerField(null=False, blank=False)
group_properties: models.JSONField = models.JSONField(default=dict)
created_at: models.DateTimeField = models.DateTimeField(auto_now_add=True)
# used to prevent race conditions with set and set_once
properties_last_updated_at: models.JSONField = models.JSONField(default=dict)
# used for evaluating if we need to override the value or not (value: set or set_once)
properties_last_operation: models.JSONField = models.JSONField(default=dict)
# current version of the group, used to sync with ClickHouse and collapse rows correctly
version: models.BigIntegerField = models.BigIntegerField(null=False)
|
frescobaldi-app | guistyle | # This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2008 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
Sets the user interface style.
"""
import app
from PyQt5.QtCore import QSettings
from PyQt5.QtWidgets import QStyleFactory
_system_default = None # becomes app.qApp.style().objectName()
def keys():
return [name.lower() for name in QStyleFactory.keys()]
def setStyle():
global _system_default
style = QSettings().value("guistyle", "", str).lower()
if style not in keys():
style = _system_default
if style != app.qApp.style().objectName():
app.qApp.setStyle(QStyleFactory.create(style))
@app.oninit
def initialize():
"""Initializes the GUI style setup. Called op app startup."""
global _system_default
_system_default = app.qApp.style().objectName()
app.settingsChanged.connect(setStyle)
setStyle()
|
downloaders | RapidgatorNet | # -*- coding: utf-8 -*-
import json
import re
from datetime import timedelta
import pycurl
from pyload.core.network.http.exceptions import BadHeader
from pyload.core.utils import seconds
from ..anticaptchas.ReCaptcha import ReCaptcha
from ..anticaptchas.SolveMedia import SolveMedia
from ..base.simple_downloader import SimpleDownloader
class RapidgatorNet(SimpleDownloader):
__name__ = "RapidgatorNet"
__type__ = "downloader"
__version__ = "0.59"
__status__ = "testing"
__pattern__ = (
r"https?://(?:www\.)?(?:rapidgator\.(?:net|asia|)|rg\.to)/file/(?P<ID>\w+)"
)
__config__ = [
("enabled", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("fallback", "bool", "Fallback to free download if premium fails", True),
("chk_filesize", "bool", "Check file size", True),
("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10),
]
__description__ = """Rapidgator.net downloader plugin"""
__license__ = "GPLv3"
__authors__ = [
("zoidberg", "zoidberg@mujmail.cz"),
("chrox", None),
("stickell", "l.stickell@yahoo.it"),
("Walter Purcaro", "vuolter@gmail.com"),
("GammaCode", "nitzo2001[AT]yahoo[DOT]com"),
]
COOKIES = [("rapidgator.net", "lang", "en")]
NAME_PATTERN = r"<title>Download file (?P<N>.*)</title>"
SIZE_PATTERN = r"File size:\s*<strong>(?P<S>[\d.,]+) (?P<U>[\w^_]+)</strong>"
OFFLINE_PATTERN = r">(404 File not found|Error 404)"
JSVARS_PATTERN = r"\s+var\s*(startTimerUrl|getDownloadUrl|captchaUrl|fid|secs)\s*=\s*\'?(.*?)\'?;"
PREMIUM_ONLY_PATTERN = (
r"You can download files up to|This file can be downloaded by premium only<"
)
DOWNLOAD_LIMIT_ERROR_PATTERN = (
r"You have reached your (daily|hourly) downloads limit"
)
IP_BLOCKED_ERROR_PATTERN = (
r"You can`t download more than 1 file at a time in free mode\." ""
)
WAIT_PATTERN = r"(?:Delay between downloads must be not less than|Try again in).+"
LINK_FREE_PATTERN = r"return '(https?://\w+.rapidgator.net/.*)';"
RECAPTCHA_PATTERN = r'"http://api\.recaptcha\.net/challenge\?k=(.*?)"'
SOLVEMEDIA_PATTERN = r'http://api\.solvemedia\.com/papi/challenge\.script\?k=(.*?)"'
URL_REPLACEMENTS = [(__pattern__ + ".*", r"https://rapidgator.net/file/\g<ID>")]
API_URL = "https://rapidgator.net/api/"
def api_request(self, method, **kwargs):
try:
html = self.load(self.API_URL + method, get=kwargs)
json_data = json.loads(html)
status = json_data["response_status"]
message = json_data["response_details"]
except BadHeader as exc:
status = exc.code
message = exc.content
if status == 200:
return json_data["response"]
elif status == 404:
self.offline()
elif status == 423:
self.restart(message, premium=False)
else:
self.account.relogin()
self.retry(wait=60)
def setup(self):
self.resume_download = self.multi_dl = self.premium
self.chunk_limit = -1 if self.premium else 1
def handle_premium(self, pyfile):
json_data = self.api_request(
"file/info", sid=self.account.info["data"]["sid"], url=pyfile.url
)
self.info["md5"] = json_data["hash"]
pyfile.name = json_data["filename"]
pyfile.size = json_data["size"]
json_data = self.api_request(
"file/download", sid=self.account.info["data"]["sid"], url=pyfile.url
)
self.link = json_data["url"]
def check_errors(self):
super().check_errors()
m = re.search(self.DOWNLOAD_LIMIT_ERROR_PATTERN, self.data)
if m is not None:
self.log_warning(m.group(0))
if m.group(1) == "daily":
wait_time = seconds.to_midnight()
else:
wait_time = timedelta(hours=1).total_seconds()
self.retry(wait=wait_time, msg=m.group(0))
m = re.search(self.IP_BLOCKED_ERROR_PATTERN, self.data)
if m is not None:
msg = self._(
"You can't download more than one file within a certain time period in free mode"
)
self.log_warning(msg)
self.retry(wait=timedelta(hours=24).total_seconds(), msg=msg)
def handle_free(self, pyfile):
jsvars = dict(re.findall(self.JSVARS_PATTERN, self.data))
self.log_debug(jsvars)
url = "https://rapidgator.net{}?fid={}".format(
jsvars.get("startTimerUrl", "/download/AjaxStartTimer"), jsvars["fid"]
)
jsvars.update(self.get_json_response(url))
self.wait(jsvars.get("secs", 180), False)
url = "https://rapidgator.net{}?sid={}".format(
jsvars.get("getDownloadUrl", "/download/AjaxGetDownloadLink"), jsvars["sid"]
)
jsvars.update(self.get_json_response(url))
url = "https://rapidgator.net{}".format(
jsvars.get("captchaUrl", "/download/captcha")
)
self.data = self.load(url, ref=pyfile.url)
m = re.search(self.LINK_FREE_PATTERN, self.data)
if m is not None:
# self.link = m.group(1)
self.download(m.group(1), ref=url)
else:
captcha = self.handle_captcha()
if not captcha:
self.error(self._("Captcha pattern not found"))
if isinstance(captcha, ReCaptcha):
response = captcha.challenge()
post_params = {"g-recaptcha-response": response}
elif isinstance(captcha, SolveMedia):
response, challenge = captcha.challenge()
post_params = {
"adcopy_challenge": challenge,
"adcopy_response": response,
}
post_params["DownloadCaptchaForm[verifyCode]"] = response
self.data = self.load(url, post=post_params, ref=url)
if "The verification code is incorrect" in self.data:
self.retry_captcha()
else:
m = re.search(self.LINK_FREE_PATTERN, self.data)
if m is not None:
# self.link = m.group(1)
self.download(m.group(1), ref=url)
def handle_captcha(self):
for klass in (ReCaptcha, SolveMedia):
captcha = klass(self.pyfile)
if captcha.detect_key():
self.captcha = captcha
return captcha
def get_json_response(self, url):
self.req.http.c.setopt(pycurl.HTTPHEADER, ["X-Requested-With: XMLHttpRequest"])
res = self.load(url, ref=self.pyfile.url)
self.req.http.c.setopt(pycurl.HTTPHEADER, ["X-Requested-With:"])
if not res.startswith("{"):
self.retry()
self.log_debug(url, res)
return json.loads(res)
|
scripts | doctor | """Debugging tool for those finding bugs in Beancount.
This tool is able to dump lexer/parser state, and will provide other services in
the name of debugging.
"""
__copyright__ = "Copyright (C) 2014-2016 Martin Blais"
__license__ = "GNU GPLv2"
import collections
import logging
import os
import re
import sys
from typing import List, Tuple
import click
from beancount import loader
from beancount.core import (
account,
account_types,
compare,
convert,
data,
getters,
inventory,
prices,
realization,
)
from beancount.core.display_context import Align
from beancount.parser import lexer, options, parser, printer
from beancount.parser.context import render_file_context
from beancount.parser.version import VERSION
from beancount.scripts.directories import validate_directories
ledger_path = click.Path(resolve_path=True, exists=True)
class FileLocation(click.ParamType):
name = "location"
def convert(self, value, param, ctx):
match = re.match(r"(?:(.+):)?(\d+)$", value)
if not match:
self.fail("{!r} is not a valid location".format(value), param, ctx)
filename, lineno = match.groups()
if filename:
filename = os.path.abspath(filename)
return filename, int(lineno)
class FileRegion(click.ParamType):
name = "region"
def convert(self, value, param, ctx):
match = re.match(r"(?:(.+):)?(\d+):(\d+)$", value)
if not match:
self.fail("{!r} is not a valid region".format(value), param, ctx)
filename, start_lineno, end_lineno = match.groups()
if filename:
filename = os.path.abspath(filename)
return filename, int(start_lineno), int(end_lineno)
class Group(click.Group):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.aliases = {}
def command(self, *args, alias=None, **kwargs):
wrap = click.Group.command(self, *args, **kwargs)
def decorator(f):
cmd = wrap(f)
if alias:
self.aliases[alias] = cmd.name
return cmd
return decorator
def get_command(self, ctx, cmd_name):
# aliases
name = self.aliases.get(cmd_name, cmd_name)
# allow to use '_' or '-' in command names.
name = name.replace("_", "-")
return click.Group.get_command(self, ctx, name)
@click.command(cls=Group)
@click.version_option(message=VERSION)
def doctor():
pass
@doctor.command(alias="dump-lexer")
@click.argument("filename", type=ledger_path)
def lex(filename):
"""Dump the lexer output for a Beancount syntax file."""
for token, lineno, text, obj in lexer.lex_iter(filename):
sys.stdout.write(
"{:12} {:6d} {}\n".format(
"(None)" if token is None else token, lineno, repr(text)
)
)
@doctor.command()
@click.argument("filename", type=ledger_path)
def parse(filename):
"""Parse the a ledger in debug mode.
Run the parser on ledger FILENAME with debug mode active.
"""
entries, errors, _ = parser.parse_file(filename, debug=True)
@doctor.command()
@click.argument("filename", type=ledger_path)
def roundtrip(filename):
"""Round-trip test on arbitrary ledger.
Read transactions from ledger FILENAME, print them out, re-read
them again and compare them. Both sets of parsed entries should be
equal. Both printed files are output to disk, so you can also run
diff on them yourself afterwards.
"""
round1_filename = round2_filename = None
try:
logging.basicConfig(level=logging.INFO, format="%(levelname)-8s: %(message)s")
logging.info("Read the entries")
entries, errors, options_map = loader.load_file(filename)
printer.print_errors(errors, file=sys.stderr)
logging.info("Print them out to a file")
basename, extension = os.path.splitext(filename)
round1_filename = "".join([basename, ".roundtrip1", extension])
with open(round1_filename, "w") as outfile:
printer.print_entries(entries, file=outfile)
logging.info("Read the entries from that file")
# Note that we don't want to run any of the auto-generation here, but
# parsing now returns incomplete objects and we assume idempotence on a
# file that was output from the printer after having been processed, so
# it shouldn't add anything new. That is, a processed file printed and
# resolve when parsed again should contain the same entries, i.e.
# nothing new should be generated.
entries_roundtrip, errors, options_map = loader.load_file(round1_filename)
# Print out the list of errors from parsing the results.
if errors:
print(",----------------------------------------------------------------------")
printer.print_errors(errors, file=sys.stdout)
print("`----------------------------------------------------------------------")
logging.info("Print what you read to yet another file")
round2_filename = "".join([basename, ".roundtrip2", extension])
with open(round2_filename, "w") as outfile:
printer.print_entries(entries_roundtrip, file=outfile)
logging.info("Compare the original entries with the re-read ones")
same, missing1, missing2 = compare.compare_entries(entries, entries_roundtrip)
if same:
logging.info("Entries are the same. Congratulations.")
else:
logging.error("Entries differ!")
print()
print("\n\nMissing from original:")
for entry in entries:
print(entry)
print(compare.hash_entry(entry))
print(printer.format_entry(entry))
print()
print("\n\nMissing from round-trip:")
for entry in missing2:
print(entry)
print(compare.hash_entry(entry))
print(printer.format_entry(entry))
print()
finally:
for rfilename in (round1_filename, round2_filename):
if os.path.exists(rfilename):
os.remove(rfilename)
@doctor.command()
@click.argument("filename", type=ledger_path)
@click.argument(
"dirs", type=click.Path(resolve_path=True, exists=True, file_okay=False), nargs=-1
)
def directories(filename, dirs):
"""Validate a directory hierarchy against the ledger's account names.
Read a ledger's list of account names and check that all the capitalized
subdirectory names under the given roots match the account names.
Args:
filename: A string, the Beancount input filename.
args: The rest of the arguments provided on the command-line, which in this
case will be interpreted as the names of root directories to validate against
the accounts in the given ledger.
"""
entries, _, __ = loader.load_file(filename)
validate_directories(entries, dirs)
@doctor.command()
def list_options():
"""List available options."""
print(options.list_options())
@doctor.command()
@click.argument("filename", type=ledger_path)
def print_options(filename):
"""List options parsed from a ledger."""
_, __, options_map = loader.load_file(filename)
for key, value in sorted(options_map.items()):
print("{}: {}".format(key, value))
@doctor.command()
@click.argument("filename", type=ledger_path)
@click.argument("location", type=FileLocation())
def context(filename, location):
"""Describe transaction context.
The transaction is looked up in ledger FILENAME at LOCATION. The
LOCATION argument is either a line number or a filename:lineno
tuple to indicate a location in a ledger included from the main
input file.
"""
search_filename, lineno = location
if search_filename is None:
search_filename = filename
# Load the input files.
entries, errors, options_map = loader.load_file(filename)
str_context = render_file_context(entries, options_map, search_filename, lineno)
sys.stdout.write(str_context)
RenderError = collections.namedtuple("RenderError", "source message entry")
@doctor.command()
@click.argument("filename", type=ledger_path)
@click.argument("location_spec", metavar="[LINK|TAG|LOCATION|REGION]")
def linked(filename, location_spec):
"""List related transactions.
List all transaction in ledger FILENAME linked to LINK or tagged
with TAG, or linked to the one at LOCATION, or linked to any
transaction in REGION.
The LINK and TAG arguments must include the leading ^ or #
charaters. The LOCATION argument is either a line number or a
filename:lineno tuple to indicate a location in a ledger file
included from the main input file. The REGION argument is either a
stard:end line numbers tuple or a filename:start:end triplet to
indicate a region in a ledger file included from the main input
file.
"""
# Load the input file.
entries, errors, options_map = loader.load_file(filename)
# Link name.
if re.match(r"\^(.*)$", location_spec):
search_filename = options_map["filename"]
links = {location_spec[1:]}
linked_entries = find_linked_entries(entries, links, False)
# Tag name.
elif re.match(r"#(.*)$", location_spec):
search_filename = options_map["filename"]
tag = location_spec[1:]
linked_entries = find_tagged_entries(entries, tag)
else:
# Parse the argument as a line number or a
# "<filename>:<lineno>:<lineno>" spec to pull context from, with
# optional filename and optional last line number.
#
# If a filename is not provided, the ledger's top-level filename is used
# (this is the common case). An explicit filename is used to get context
# in included files.
#
# If a single line number is provided the closest transaction is
# selected. If an internal of line numbers is provided, the list of all
# transactions whose first line is inside the interval are selected.
match = re.match(r"(\d+)(?::(\d+))?$", location_spec)
if match:
included_filename = None
first_line, last_line = match.groups()
else:
match = re.match(r"(.+?):(\d+)(?::(\d+))?$", location_spec)
if match:
included_filename, first_line, last_line = match.groups()
else:
raise SystemExit("Invalid line number or link format for location.")
search_filename = (
os.path.abspath(included_filename)
if included_filename
else options_map["filename"]
)
lineno = int(first_line)
if last_line is None:
# Find the closest entry.
closest_entry = data.find_closest(entries, search_filename, lineno)
selected_entries = [closest_entry]
# Find its links.
if closest_entry is None:
raise SystemExit(
"No entry could be found before {}:{}".format(search_filename, lineno)
)
links = (
closest_entry.links
if isinstance(closest_entry, data.Transaction)
else data.EMPTY_SET
)
else:
# Find all the entries in the interval, following all links.
last_lineno = int(last_line)
links = set()
selected_entries = []
for entry in data.filter_txns(entries):
if (
entry.meta["filename"] == search_filename
and lineno <= entry.meta["lineno"] <= last_lineno
):
links.update(entry.links)
selected_entries.append(entry)
# Get the linked entries, or just the closest one, if no links.
linked_entries = (
find_linked_entries(entries, links, True) if links else selected_entries
)
render_mini_balances(linked_entries, options_map, None)
def resolve_region_to_entries(
entries: List[data.Entries], filename: str, region: Tuple[str, int, int]
) -> List[data.Entries]:
"""Resolve a filename and region to a list of entries."""
search_filename, first_lineno, last_lineno = region
if search_filename is None:
search_filename = filename
# Find all the entries in the region. (To be clear, this isn't like the
# 'linked' command, none of the links are followed.)
region_entries = [
entry
for entry in data.filter_txns(entries)
if (
entry.meta["filename"] == search_filename
and first_lineno <= entry.meta["lineno"] <= last_lineno
)
]
return region_entries
@doctor.command()
@click.argument("filename", type=ledger_path)
@click.argument("region", type=FileRegion())
@click.option(
"--conversion",
type=click.Choice(["value", "cost"]),
help="Convert balances output to market value or cost.",
)
def region(filename, region, conversion):
"""Print out a list of transactions within REGION and compute balances.
The REGION argument is either a stard:end line numbers tuple or a
filename:start:end triplet to indicate a region in a ledger file
included from the main input file.
"""
entries, errors, options_map = loader.load_file(filename)
region_entries = resolve_region_to_entries(entries, filename, region)
price_map = prices.build_price_map(entries) if conversion == "value" else None
render_mini_balances(region_entries, options_map, conversion, price_map)
def render_mini_balances(entries, options_map, conversion=None, price_map=None):
"""Render a treeified list of the balances for the given transactions.
Args:
entries: A list of selected transactions to render.
options_map: The parsed options.
conversion: Conversion method string, None, 'value' or 'cost'.
price_map: A price map from the original entries. If this isn't provided,
the inventories are rendered directly. If it is, their contents are
converted to market value.
"""
# Render linked entries (in date order) as errors (for Emacs).
errors = [RenderError(entry.meta, "", entry) for entry in entries]
printer.print_errors(errors)
# Print out balances.
real_root = realization.realize(entries)
dformat = options_map["dcontext"].build(alignment=Align.DOT, reserved=2)
# TODO(blais): I always want to be able to convert at cost. We need
# arguments capability.
#
# TODO(blais): Ideally this conversion inserts a new transactions to
# 'Unrealized' to account for the difference between cost and market value.
# Insert one and update the realization. Add an update() method to the
# realization, given a transaction.
acctypes = options.get_account_types(options_map)
if conversion == "value":
assert price_map is not None
# Warning: Mutate the inventories in-place, converting them to market
# value.
balance_diff = inventory.Inventory()
for real_account in realization.iter_children(real_root):
balance_cost = real_account.balance.reduce(convert.get_cost)
balance_value = real_account.balance.reduce(convert.get_value, price_map)
real_account.balance = balance_value
balance_diff.add_inventory(balance_cost)
balance_diff.add_inventory(-balance_value)
if not balance_diff.is_empty():
account_unrealized = account.join(
acctypes.income, options_map["account_unrealized_gains"]
)
unrealized = realization.get_or_create(real_root, account_unrealized)
unrealized.balance.add_inventory(balance_diff)
elif conversion == "cost":
for real_account in realization.iter_children(real_root):
real_account.balance = real_account.balance.reduce(convert.get_cost)
realization.dump_balances(real_root, dformat, file=sys.stdout)
# Print out net income change.
net_income = inventory.Inventory()
for real_node in realization.iter_children(real_root):
if account_types.is_income_statement_account(real_node.account, acctypes):
net_income.add_inventory(real_node.balance)
print()
print("Net Income: {}".format(-net_income))
def find_linked_entries(entries, links, follow_links: bool):
"""Find all linked entries.
Note that there is an option here: You can either just look at the links
on the closest entry, or you can include the links of the linked
transactions as well. Whichever one you want depends on how you use your
links. Best would be to query the user (in Emacs) when there are many
links present.
"""
linked_entries = []
if not follow_links:
linked_entries = [
entry
for entry in entries
if (isinstance(entry, data.Transaction) and entry.links and entry.links & links)
]
else:
links = set(links)
linked_entries = []
while True:
num_linked = len(linked_entries)
linked_entries = [
entry
for entry in entries
if (
isinstance(entry, data.Transaction)
and entry.links
and entry.links & links
)
]
if len(linked_entries) == num_linked:
break
for entry in linked_entries:
if entry.links:
links.update(entry.links)
return linked_entries
def find_tagged_entries(entries, tag):
"""Find all entries with the given tag."""
return [
entry
for entry in entries
if (isinstance(entry, data.Transaction) and entry.tags and tag in entry.tags)
]
@doctor.command()
@click.argument("filename", type=ledger_path)
def missing_open(filename):
"""Print Open directives missing in FILENAME.
This can be useful during demos in order to quickly generate all the
required Open directives without having to type them manually.
"""
entries, errors, options_map = loader.load_file(filename)
# Get accounts usage and open directives.
first_use_map, _ = getters.get_accounts_use_map(entries)
open_close_map = getters.get_account_open_close(entries)
new_entries = []
for account, first_use_date in first_use_map.items():
if account not in open_close_map:
new_entries.append(
data.Open(
data.new_metadata(filename, 0), first_use_date, account, None, None
)
)
dcontext = options_map["dcontext"]
printer.print_entries(data.sorted(new_entries), dcontext)
@doctor.command()
@click.argument("filename", type=ledger_path)
def display_context(filename):
"""Print the precision inferred from the parsed numbers in the input file."""
entries, errors, options_map = loader.load_file(filename)
dcontext = options_map["dcontext"]
sys.stdout.write(str(dcontext))
main = doctor
if __name__ == "__main__":
main()
|
migrations | 0117_alter_user_preferred_language | # Generated by Django 3.2.5 on 2021-11-15 18:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("bookwyrm", "0116_auto_20211114_1734"),
]
operations = [
migrations.AlterField(
model_name="user",
name="preferred_language",
field=models.CharField(
blank=True,
choices=[
("en-us", "English"),
("de-de", "Deutsch (German)"),
("es-es", "Español (Spanish)"),
("fr-fr", "Français (French)"),
("lt-lt", "lietuvių (Lithuanian)"),
("pt-br", "Português - Brasil (Brazilian Portuguese)"),
("zh-hans", "简体中文 (Simplified Chinese)"),
("zh-hant", "繁體中文 (Traditional Chinese)"),
],
max_length=255,
null=True,
),
),
]
|
extractor | viewlift | from __future__ import unicode_literals
import json
import re
from ..compat import compat_HTTPError
from ..utils import ExtractorError, int_or_none, parse_age_limit
from .common import InfoExtractor
class ViewLiftBaseIE(InfoExtractor):
_API_BASE = "https://prod-api.viewlift.com/"
_DOMAINS_REGEX = r"(?:(?:main\.)?snagfilms|snagxtreme|funnyforfree|kiddovid|winnersview|(?:monumental|lax)sportsnetwork|vayafilm|failarmy|ftfnext|lnppass\.legapallacanestro|moviespree|app\.myoutdoortv|neoufitness|pflmma|theidentitytb)\.com|(?:hoichoi|app\.horseandcountry|kronon|marquee|supercrosslive)\.tv"
_SITE_MAP = {
"ftfnext": "lax",
"funnyforfree": "snagfilms",
"hoichoi": "hoichoitv",
"kiddovid": "snagfilms",
"laxsportsnetwork": "lax",
"legapallacanestro": "lnp",
"marquee": "marquee-tv",
"monumentalsportsnetwork": "monumental-network",
"moviespree": "bingeflix",
"pflmma": "pfl",
"snagxtreme": "snagfilms",
"theidentitytb": "tampabay",
"vayafilm": "snagfilms",
}
_TOKENS = {}
def _call_api(self, site, path, video_id, query):
token = self._TOKENS.get(site)
if not token:
token_query = {"site": site}
email, password = self._get_login_info(netrc_machine=site)
if email:
resp = self._download_json(
self._API_BASE + "identity/signin",
video_id,
"Logging in",
query=token_query,
data=json.dumps(
{
"email": email,
"password": password,
}
).encode(),
)
else:
resp = self._download_json(
self._API_BASE + "identity/anonymous-token",
video_id,
"Downloading authorization token",
query=token_query,
)
self._TOKENS[site] = token = resp["authorizationToken"]
return self._download_json(
self._API_BASE + path,
video_id,
headers={"Authorization": token},
query=query,
)
class ViewLiftEmbedIE(ViewLiftBaseIE):
IE_NAME = "viewlift:embed"
_VALID_URL = (
r"https?://(?:(?:www|embed)\.)?(?P<domain>%s)/embed/player\?.*\bfilmId=(?P<id>[\da-f]{8}-(?:[\da-f]{4}-){3}[\da-f]{12})"
% ViewLiftBaseIE._DOMAINS_REGEX
)
_TESTS = [
{
"url": "http://embed.snagfilms.com/embed/player?filmId=74849a00-85a9-11e1-9660-123139220831&w=500",
"md5": "2924e9215c6eff7a55ed35b72276bd93",
"info_dict": {
"id": "74849a00-85a9-11e1-9660-123139220831",
"ext": "mp4",
"title": "#whilewewatch",
"description": "md5:b542bef32a6f657dadd0df06e26fb0c8",
"timestamp": 1334350096,
"upload_date": "20120413",
},
},
{
# invalid labels, 360p is better that 480p
"url": "http://www.snagfilms.com/embed/player?filmId=17ca0950-a74a-11e0-a92a-0026bb61d036",
"md5": "882fca19b9eb27ef865efeeaed376a48",
"info_dict": {
"id": "17ca0950-a74a-11e0-a92a-0026bb61d036",
"ext": "mp4",
"title": "Life in Limbo",
},
"skip": "The video does not exist",
},
{
"url": "http://www.snagfilms.com/embed/player?filmId=0000014c-de2f-d5d6-abcf-ffef58af0017",
"only_matching": True,
},
]
@staticmethod
def _extract_url(webpage):
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:embed\.)?(?:%s)/embed/player.+?)\1'
% ViewLiftBaseIE._DOMAINS_REGEX,
webpage,
)
if mobj:
return mobj.group("url")
def _real_extract(self, url):
domain, film_id = re.match(self._VALID_URL, url).groups()
site = domain.split(".")[-2]
if site in self._SITE_MAP:
site = self._SITE_MAP[site]
try:
content_data = self._call_api(
site, "entitlement/video/status", film_id, {"id": film_id}
)["video"]
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
error_message = self._parse_json(e.cause.read().decode(), film_id).get(
"errorMessage"
)
if (
error_message
== "User does not have a valid subscription or has not purchased this content."
):
self.raise_login_required()
raise ExtractorError(error_message, expected=True)
raise
gist = content_data["gist"]
title = gist["title"]
video_assets = content_data["streamingInfo"]["videoAssets"]
formats = []
mpeg_video_assets = video_assets.get("mpeg") or []
for video_asset in mpeg_video_assets:
video_asset_url = video_asset.get("url")
if not video_asset:
continue
bitrate = int_or_none(video_asset.get("bitrate"))
height = int_or_none(
self._search_regex(
r"^_?(\d+)[pP]$",
video_asset.get("renditionValue"),
"height",
default=None,
)
)
formats.append(
{
"url": video_asset_url,
"format_id": "http%s" % ("-%d" % bitrate if bitrate else ""),
"tbr": bitrate,
"height": height,
"vcodec": video_asset.get("codec"),
}
)
hls_url = video_assets.get("hls")
if hls_url:
formats.extend(
self._extract_m3u8_formats(
hls_url, film_id, "mp4", "m3u8_native", m3u8_id="hls", fatal=False
)
)
self._sort_formats(formats, ("height", "tbr", "format_id"))
info = {
"id": film_id,
"title": title,
"description": gist.get("description"),
"thumbnail": gist.get("videoImageUrl"),
"duration": int_or_none(gist.get("runtime")),
"age_limit": parse_age_limit(content_data.get("parentalRating")),
"timestamp": int_or_none(gist.get("publishDate"), 1000),
"formats": formats,
}
for k in ("categories", "tags"):
info[k] = [v["title"] for v in content_data.get(k, []) if v.get("title")]
return info
class ViewLiftIE(ViewLiftBaseIE):
IE_NAME = "viewlift"
_VALID_URL = (
r"https?://(?:www\.)?(?P<domain>%s)(?P<path>(?:/(?:films/title|show|(?:news/)?videos?|watch))?/(?P<id>[^?#]+))"
% ViewLiftBaseIE._DOMAINS_REGEX
)
_TESTS = [
{
"url": "http://www.snagfilms.com/films/title/lost_for_life",
"md5": "19844f897b35af219773fd63bdec2942",
"info_dict": {
"id": "0000014c-de2f-d5d6-abcf-ffef58af0017",
"display_id": "lost_for_life",
"ext": "mp4",
"title": "Lost for Life",
"description": "md5:ea10b5a50405ae1f7b5269a6ec594102",
"thumbnail": r"re:^https?://.*\.jpg",
"duration": 4489,
"categories": "mincount:3",
"age_limit": 14,
"upload_date": "20150421",
"timestamp": 1429656820,
},
},
{
"url": "http://www.snagfilms.com/show/the_world_cut_project/india",
"md5": "e6292e5b837642bbda82d7f8bf3fbdfd",
"info_dict": {
"id": "00000145-d75c-d96e-a9c7-ff5c67b20000",
"display_id": "the_world_cut_project/india",
"ext": "mp4",
"title": "India",
"description": "md5:5c168c5a8f4719c146aad2e0dfac6f5f",
"thumbnail": r"re:^https?://.*\.jpg",
"duration": 979,
"timestamp": 1399478279,
"upload_date": "20140507",
},
},
{
"url": "http://main.snagfilms.com/augie_alone/s_2_ep_12_love",
"info_dict": {
"id": "00000148-7b53-de26-a9fb-fbf306f70020",
"display_id": "augie_alone/s_2_ep_12_love",
"ext": "mp4",
"title": "S. 2 Ep. 12 - Love",
"description": "Augie finds love.",
"thumbnail": r"re:^https?://.*\.jpg",
"duration": 107,
"upload_date": "20141012",
"timestamp": 1413129540,
"age_limit": 17,
},
"params": {
"skip_download": True,
},
},
{
"url": "http://main.snagfilms.com/films/title/the_freebie",
"only_matching": True,
},
{
# Film is not playable in your area.
"url": "http://www.snagfilms.com/films/title/inside_mecca",
"only_matching": True,
},
{
# Film is not available.
"url": "http://www.snagfilms.com/show/augie_alone/flirting",
"only_matching": True,
},
{
"url": "http://www.winnersview.com/videos/the-good-son",
"only_matching": True,
},
{
# Was once Kaltura embed
"url": "https://www.monumentalsportsnetwork.com/videos/john-carlson-postgame-2-25-15",
"only_matching": True,
},
{
"url": "https://www.marquee.tv/watch/sadlerswells-sacredmonsters",
"only_matching": True,
},
]
@classmethod
def suitable(cls, url):
return (
False
if ViewLiftEmbedIE.suitable(url)
else super(ViewLiftIE, cls).suitable(url)
)
def _real_extract(self, url):
domain, path, display_id = re.match(self._VALID_URL, url).groups()
site = domain.split(".")[-2]
if site in self._SITE_MAP:
site = self._SITE_MAP[site]
modules = self._call_api(
site,
"content/pages",
display_id,
{
"includeContent": "true",
"moduleOffset": 1,
"path": path,
"site": site,
},
)["modules"]
film_id = next(
m["contentData"][0]["gist"]["id"]
for m in modules
if m.get("moduleType") == "VideoDetailModule"
)
return {
"_type": "url_transparent",
"url": "http://%s/embed/player?filmId=%s" % (domain, film_id),
"id": film_id,
"display_id": display_id,
"ie_key": "ViewLiftEmbed",
}
|
borg | version | import re
def parse_version(version):
"""
Simplistic parser for setuptools_scm versions.
Supports final versions and alpha ('a'), beta ('b') and release candidate ('rc') versions.
It does not try to parse anything else than that, even if there is more in the version string.
Output is a version tuple containing integers. It ends with one or two elements that ensure that relational
operators yield correct relations for alpha, beta and rc versions, too.
For final versions the last element is a -1.
For prerelease versions the last two elements are a smaller negative number and the number of e.g. the beta.
This version format is part of the remote protocol, don‘t change in breaking ways.
"""
version_re = r"""
(?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+) # version, e.g. 1.2.33
(?P<prerelease>(?P<ptype>a|b|rc)(?P<pnum>\d+))? # optional prerelease, e.g. a1 or b2 or rc33
"""
m = re.match(version_re, version, re.VERBOSE)
if m is None:
raise ValueError("Invalid version string %s" % version)
gd = m.groupdict()
version = [int(gd["major"]), int(gd["minor"]), int(gd["patch"])]
if m.lastgroup == "prerelease":
p_type = {"a": -4, "b": -3, "rc": -2}[gd["ptype"]]
p_num = int(gd["pnum"])
version += [p_type, p_num]
else:
version += [-1]
return tuple(version)
def format_version(version):
"""a reverse for parse_version (obviously without the dropped information)"""
f = []
it = iter(version)
while True:
part = next(it)
if part >= 0:
f.append(str(part))
elif part == -1:
break
else:
f[-1] = f[-1] + {-2: "rc", -3: "b", -4: "a"}[part] + str(next(it))
break
return ".".join(f)
|
gui | builderhacks | # -*- coding: utf-8 -*-
# This file is part of MyPaint.
# Copyright (C) 2013-2018 by the MyPaint Development Team.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Hacks for loading stuff from GtkBuilder files."""
## Imports
from __future__ import division, print_function
import lib.xml
from lib.gibindings import Gtk
## Public functions
def add_objects_from_template_string(builder, buffer_, object_ids, params):
"""Templatizes, parses, merges, and returns objects from a Builder UI-def
This function wraps `Gtk.Builder.add_objects_from_string()`, with the
addition that the `buffer_` parameter, and each element of `object_ids` is
formatted using `str.format()` using `params` before use. This templatizing
is required to produce a different result for the string buffer of XML
data, and for each object ID.
:param builder: a Gtk.Buider
:param buffer_: the string to templatize then parse
:param object_ids: list of object names to build (after templatizing)
:param params: dict of template params
:returns: a list of constructed objects
The constructed objects are returned in a Python list if this wrapped
method call is successful.
When templatizing the XML fragment, parameter values will be escaped using
`lib.xml.escape()`. Therefore `params` is limited to fairly simple
dicts.
"""
object_ids2 = []
for oid in object_ids:
oid2 = oid.format(**params)
if oid == oid2:
raise ValueError("object_id %s unchanged after .format()ing" % oid)
object_ids2.append(oid2)
params_esc = {}
for p, v in params.items():
params_esc[p] = lib.xml.escape(v)
buffer_2 = buffer_.format(**params_esc)
if buffer_2 == buffer_:
raise ValueError("buffer_ unchanged after .format()ing")
result = []
if builder.add_objects_from_string(buffer_2, object_ids2):
for oid2 in object_ids2:
obj2 = builder.get_object(oid2)
assert obj2 is not None
result.append(obj2)
return result
## Module testing
_TEST_TEMPLATE = """
<interface>
<object class="GtkLabel" id="never_instantiated">
<property name="label">This should never be instantiated</property>
</object>
<object class="GtkButton" id="button_{id}">
<property name="label">{label}</property>
<signal name="clicked" handler="button_{id}_clicked"/>
</object>
</interface>
"""
def _test():
"""Interactive module test function"""
import os
import sys
vbox = Gtk.VBox()
builder = Gtk.Builder()
# Handlers can find out about their template values by parsing their
# name (using the GtkBuildable interface). Alternatively, you can set
# up private attributes in the instantiation loop.
def _test_button_clicked_cb(widget):
id_ = Gtk.Buildable.get_name(widget)
if isinstance(id_, bytes):
id_ = id_.decode("utf-8")
print("Clicked: id=%r" % (id_,))
print(" i=%r" % (widget._i,))
# Unicode is supported in IDs and template values.
# The XML template may be plain ASCII since escape() is used when
# filling it.
object_ids = ["button_{id}"]
words = ["à", "chacun", "son", "goût"]
for i in words:
params = {"id": i, "label": i.upper()}
objs = add_objects_from_template_string(
builder, _TEST_TEMPLATE, object_ids, params
)
for w in objs:
w.connect("clicked", _test_button_clicked_cb)
vbox.pack_start(w, True, True, 0)
w._i = i
# The label should never be instantiated by this code. In fact, only
# the four buttons should.
for obj in builder.get_objects():
assert isinstance(obj, Gtk.Button)
# Remainder of the demo code
window = Gtk.Window()
window.add(vbox)
window.set_title(os.path.basename(sys.argv[0]))
window.connect("destroy", lambda *a: Gtk.main_quit())
window.set_size_request(250, 200)
window.show_all()
Gtk.main()
if __name__ == "__main__":
_test()
|
examples | pyqt_time_f | #!/usr/bin/env python
#
# Copyright 2011,2012,2015 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
import sys
from gnuradio import blocks, gr
try:
import sip
from gnuradio import qtgui
from PyQt5 import Qt, QtWidgets
except ImportError:
sys.stderr.write("Error: Program requires PyQt5 and gr-qtgui.\n")
sys.exit(1)
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
class dialog_box(QtWidgets.QWidget):
def __init__(self, display, control):
QtWidgets.QWidget.__init__(self, None)
self.setWindowTitle("PyQt Test GUI")
self.boxlayout = QtWidgets.QBoxLayout(QtWidgets.QBoxLayout.LeftToRight, self)
self.boxlayout.addWidget(display, 1)
self.boxlayout.addWidget(control)
self.resize(800, 500)
class control_box(QtWidgets.QWidget):
def __init__(self, parent=None):
QtWidgets.QWidget.__init__(self, parent)
self.setWindowTitle("Control Panel")
self.setToolTip("Control the signals")
QtWidgets.QToolTip.setFont(Qt.QFont("OldEnglish", 10))
self.layout = QtWidgets.QFormLayout(self)
# Control the first signal
self.freq1Edit = QtWidgets.QLineEdit(self)
self.freq1Edit.setMinimumWidth(100)
self.layout.addRow("Signal 1 Frequency:", self.freq1Edit)
self.freq1Edit.editingFinished.connect(self.freq1EditText)
self.amp1Edit = QtWidgets.QLineEdit(self)
self.amp1Edit.setMinimumWidth(100)
self.layout.addRow("Signal 1 Amplitude:", self.amp1Edit)
self.amp1Edit.editingFinished.connect(self.amp1EditText)
# Control the second signal
self.freq2Edit = QtWidgets.QLineEdit(self)
self.freq2Edit.setMinimumWidth(100)
self.layout.addRow("Signal 2 Frequency:", self.freq2Edit)
self.freq2Edit.editingFinished.connect(self.freq2EditText)
self.amp2Edit = QtWidgets.QLineEdit(self)
self.amp2Edit.setMinimumWidth(100)
self.layout.addRow("Signal 2 Amplitude:", self.amp2Edit)
self.amp2Edit.editingFinished.connect(self.amp2EditText)
self.quit = QtWidgets.QPushButton("Close", self)
self.quit.setMinimumWidth(100)
self.layout.addWidget(self.quit)
self.quit.clicked.connect(QtWidgets.qApp.quit)
def attach_signal1(self, signal):
self.signal1 = signal
self.freq1Edit.setText(("{0}").format(self.signal1.frequency()))
self.amp1Edit.setText(("{0}").format(self.signal1.amplitude()))
def attach_signal2(self, signal):
self.signal2 = signal
self.freq2Edit.setText(("{0}").format(self.signal2.frequency()))
self.amp2Edit.setText(("{0}").format(self.signal2.amplitude()))
def freq1EditText(self):
try:
newfreq = float(self.freq1Edit.text())
self.signal1.set_frequency(newfreq)
except ValueError:
print("Bad frequency value entered")
def amp1EditText(self):
try:
newamp = float(self.amp1Edit.text())
self.signal1.set_amplitude(newamp)
except ValueError:
print("Bad amplitude value entered")
def freq2EditText(self):
try:
newfreq = float(self.freq2Edit.text())
self.signal2.set_frequency(newfreq)
except ValueError:
print("Bad frequency value entered")
def amp2EditText(self):
try:
newamp = float(self.amp2Edit.text())
self.signal2.set_amplitude(newamp)
except ValueError:
print("Bad amplitude value entered")
class my_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
Rs = 8000
f1 = 100
f2 = 200
npts = 2048
self.qapp = QtWidgets.QApplication(sys.argv)
src1 = analog.sig_source_f(Rs, analog.GR_SIN_WAVE, f1, 0.1, 0)
src2 = analog.sig_source_f(Rs, analog.GR_SIN_WAVE, f2, 0.1, 0)
src = blocks.add_ff()
thr = blocks.throttle(gr.sizeof_float, 100 * npts)
noise = analog.noise_source_f(analog.GR_GAUSSIAN, 0.001)
add = blocks.add_ff()
self.snk1 = qtgui.time_sink_f(npts, Rs, "Complex Time Example", 3, None)
self.connect(src1, (src, 0))
self.connect(src2, (src, 1))
self.connect(src, thr, (add, 0))
self.connect(noise, (add, 1))
self.connect(add, self.snk1)
self.connect(src1, (self.snk1, 1))
self.connect(src2, (self.snk1, 2))
self.ctrl_win = control_box()
self.ctrl_win.attach_signal1(src1)
self.ctrl_win.attach_signal2(src2)
# Get the reference pointer to the SpectrumDisplayForm QWidget
pyQt = self.snk1.qwidget()
# Wrap the pointer as a PyQt SIP object
# This can now be manipulated as a PyQt5.QtWidgets.QWidget
pyWin = sip.wrapinstance(pyQt, QtWidgets.QWidget)
# Example of using signal/slot to set the title of a curve
# FIXME: update for Qt5
# pyWin.setLineLabel.connect(pyWin.setLineLabel)
# pyWin.emit(QtCore.SIGNAL("setLineLabel(int, QString)"), 0, "Re{sum}")
self.snk1.set_line_label(0, "Re{sum}")
self.snk1.set_line_label(1, "src1")
self.snk1.set_line_label(2, "src2")
# Can also set the color of a curve
# self.snk1.set_color(5, "blue")
# pyWin.show()
self.main_box = dialog_box(pyWin, self.ctrl_win)
self.main_box.show()
if __name__ == "__main__":
tb = my_top_block()
tb.start()
tb.qapp.exec_()
tb.stop()
|
extractor | xboxclips | # coding: utf-8
from __future__ import unicode_literals
import re
from ..compat import compat_parse_qs, compat_urllib_parse_urlparse
from ..utils import int_or_none, month_by_abbreviation, parse_filesize
from .common import InfoExtractor
class XboxClipsIE(InfoExtractor):
_VALID_URL = r"https?://(?:www\.)?(?:xboxclips\.com|gameclips\.io)/(?:video\.php\?.*vid=|[^/]+/)(?P<id>[\da-f]{8}-(?:[\da-f]{4}-){3}[\da-f]{12})"
_TESTS = [
{
"url": "http://xboxclips.com/video.php?uid=2533274823424419&gamertag=Iabdulelah&vid=074a69a9-5faf-46aa-b93b-9909c1720325",
"md5": "fbe1ec805e920aeb8eced3c3e657df5d",
"info_dict": {
"id": "074a69a9-5faf-46aa-b93b-9909c1720325",
"ext": "mp4",
"title": "iAbdulElah playing Titanfall",
"filesize_approx": 26800000,
"upload_date": "20140807",
"duration": 56,
},
},
{
"url": "https://gameclips.io/iAbdulElah/074a69a9-5faf-46aa-b93b-9909c1720325",
"only_matching": True,
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
if "/video.php" in url:
qs = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
url = "https://gameclips.io/%s/%s" % (qs["gamertag"][0], qs["vid"][0])
webpage = self._download_webpage(url, video_id)
info = self._parse_html5_media_entries(url, webpage, video_id)[0]
title = self._html_search_meta(["og:title", "twitter:title"], webpage)
upload_date = None
mobj = re.search(
r">Recorded: (\d{2})-(Jan|Feb|Mar|Apr|May|Ju[nl]|Aug|Sep|Oct|Nov|Dec)-(\d{4})",
webpage,
)
if mobj:
upload_date = "%s%.2d%s" % (
mobj.group(3),
month_by_abbreviation(mobj.group(2)),
mobj.group(1),
)
filesize = parse_filesize(
self._html_search_regex(
r">Size: ([^<]+)<", webpage, "file size", fatal=False
)
)
duration = int_or_none(
self._html_search_regex(
r">Duration: (\d+) Seconds<", webpage, "duration", fatal=False
)
)
view_count = int_or_none(
self._html_search_regex(
r">Views: (\d+)<", webpage, "view count", fatal=False
)
)
info.update(
{
"id": video_id,
"title": title,
"upload_date": upload_date,
"filesize_approx": filesize,
"duration": duration,
"view_count": view_count,
}
)
return info
|
extractor | teamcoco | # coding: utf-8
from __future__ import unicode_literals
import json
from ..utils import (
ExtractorError,
determine_ext,
int_or_none,
mimetype2ext,
parse_duration,
parse_iso8601,
qualities,
)
from .turner import TurnerBaseIE
class TeamcocoIE(TurnerBaseIE):
_VALID_URL = r"https?://(?:\w+\.)?teamcoco\.com/(?P<id>([^/]+/)*[^/?#]+)"
_TESTS = [
{
"url": "http://teamcoco.com/video/mary-kay-remote",
"md5": "55d532f81992f5c92046ad02fec34d7d",
"info_dict": {
"id": "80187",
"ext": "mp4",
"title": "Conan Becomes A Mary Kay Beauty Consultant",
"description": "Mary Kay is perhaps the most trusted name in female beauty, so of course Conan is a natural choice to sell their products.",
"duration": 495.0,
"upload_date": "20140402",
"timestamp": 1396407600,
},
},
{
"url": "http://teamcoco.com/video/louis-ck-interview-george-w-bush",
"md5": "cde9ba0fa3506f5f017ce11ead928f9a",
"info_dict": {
"id": "19705",
"ext": "mp4",
"description": "Louis C.K. got starstruck by George W. Bush, so what? Part one.",
"title": "Louis C.K. Interview Pt. 1 11/3/11",
"duration": 288,
"upload_date": "20111104",
"timestamp": 1320405840,
},
},
{
"url": "http://teamcoco.com/video/timothy-olyphant-drinking-whiskey",
"info_dict": {
"id": "88748",
"ext": "mp4",
"title": "Timothy Olyphant Raises A Toast To “Justified”",
"description": "md5:15501f23f020e793aeca761205e42c24",
"upload_date": "20150415",
"timestamp": 1429088400,
},
"params": {
"skip_download": True, # m3u8 downloads
},
},
{
"url": "http://teamcoco.com/video/full-episode-mon-6-1-joel-mchale-jake-tapper-and-musical-guest-courtney-barnett?playlist=x;eyJ0eXBlIjoidGFnIiwiaWQiOjl9",
"info_dict": {
"id": "89341",
"ext": "mp4",
"title": "Full Episode - Mon. 6/1 - Joel McHale, Jake Tapper, And Musical Guest Courtney Barnett",
"description": "Guests: Joel McHale, Jake Tapper, And Musical Guest Courtney Barnett",
},
"params": {
"skip_download": True, # m3u8 downloads
},
"skip": "This video is no longer available.",
},
{
"url": "http://teamcoco.com/video/the-conan-audiencey-awards-for-04/25/18",
"only_matching": True,
},
{
"url": "http://teamcoco.com/italy/conan-jordan-schlansky-hit-the-streets-of-florence",
"only_matching": True,
},
{
"url": "http://teamcoco.com/haiti/conan-s-haitian-history-lesson",
"only_matching": True,
},
{
"url": "http://teamcoco.com/israel/conan-hits-the-streets-beaches-of-tel-aviv",
"only_matching": True,
},
{
"url": "https://conan25.teamcoco.com/video/ice-cube-kevin-hart-conan-share-lyft",
"only_matching": True,
},
]
_RECORD_TEMPL = """id
title
teaser
publishOn
thumb {
preview
}
tags {
name
}
duration
turnerMediaId
turnerMediaAuthToken"""
def _graphql_call(self, query_template, object_type, object_id):
find_object = "find" + object_type
return self._download_json(
"https://teamcoco.com/graphql",
object_id,
data=json.dumps(
{"query": query_template % (find_object, object_id)}
).encode(),
headers={
"Content-Type": "application/json",
},
)["data"][find_object]
def _real_extract(self, url):
display_id = self._match_id(url)
response = self._graphql_call(
"""{
%%s(slug: "%%s") {
... on RecordSlug {
record {
%s
}
}
... on PageSlug {
child {
id
}
}
... on NotFoundSlug {
status
}
}
}"""
% self._RECORD_TEMPL,
"Slug",
display_id,
)
if response.get("status"):
raise ExtractorError("This video is no longer available.", expected=True)
child = response.get("child")
if child:
record = self._graphql_call(
"""{
%%s(id: "%%s") {
... on Video {
%s
}
}
}"""
% self._RECORD_TEMPL,
"Record",
child["id"],
)
else:
record = response["record"]
video_id = record["id"]
info = {
"id": video_id,
"display_id": display_id,
"title": record["title"],
"thumbnail": record.get("thumb", {}).get("preview"),
"description": record.get("teaser"),
"duration": parse_duration(record.get("duration")),
"timestamp": parse_iso8601(record.get("publishOn")),
}
media_id = record.get("turnerMediaId")
if media_id:
self._initialize_geo_bypass(
{
"countries": ["US"],
}
)
info.update(
self._extract_ngtv_info(
media_id,
{
"accessToken": record["turnerMediaAuthToken"],
"accessTokenType": "jws",
},
)
)
else:
video_sources = self._download_json(
"https://teamcoco.com/_truman/d/" + video_id, video_id
)["meta"]["src"]
if isinstance(video_sources, dict):
video_sources = video_sources.values()
formats = []
get_quality = qualities(["low", "sd", "hd", "uhd"])
for src in video_sources:
if not isinstance(src, dict):
continue
src_url = src.get("src")
if not src_url:
continue
format_id = src.get("label")
ext = determine_ext(src_url, mimetype2ext(src.get("type")))
if format_id == "hls" or ext == "m3u8":
# compat_urllib_parse.urljoin does not work here
if src_url.startswith("/"):
src_url = "http://ht.cdn.turner.com/tbs/big/teamcoco" + src_url
formats.extend(
self._extract_m3u8_formats(
src_url, video_id, "mp4", m3u8_id=format_id, fatal=False
)
)
else:
if src_url.startswith("/mp4:protected/"):
# TODO Correct extraction for these files
continue
tbr = int_or_none(
self._search_regex(r"(\d+)k\.mp4", src_url, "tbr", default=None)
)
formats.append(
{
"url": src_url,
"ext": ext,
"tbr": tbr,
"format_id": format_id,
"quality": get_quality(format_id),
}
)
self._sort_formats(formats)
info["formats"] = formats
return info
|
saveddata | queries | # ===============================================================================
# Copyright (C) 2010 Diego Duclos
#
# This file is part of eos.
#
# eos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# eos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with eos. If not, see <http://www.gnu.org/licenses/>.
# ===============================================================================
import sys
import eos.config
from eos.db import saveddata_session, sd_lock
from eos.db.saveddata.fit import fits_table, projectedFits_table
from eos.db.util import processEager, processWhere
from eos.saveddata.character import Character
from eos.saveddata.damagePattern import DamagePattern
from eos.saveddata.fit import Fit, FitLite
from eos.saveddata.implantSet import ImplantSet
from eos.saveddata.miscData import MiscData
from eos.saveddata.module import Module
from eos.saveddata.override import Override
from eos.saveddata.price import Price
from eos.saveddata.ssocharacter import SsoCharacter
from eos.saveddata.targetProfile import TargetProfile
from eos.saveddata.user import User
from sqlalchemy import desc, func, select
from sqlalchemy.sql import and_
configVal = getattr(eos.config, "saveddataCache", None)
if configVal is True:
import weakref
itemCache = {}
queryCache = {}
def cachedQuery(type, amount, *keywords):
itemCache[type] = localItemCache = weakref.WeakValueDictionary()
queryCache[type] = typeQueryCache = {}
def deco(function):
localQueryCache = typeQueryCache[function] = {}
def setCache(cacheKey, args, kwargs):
items = function(*args, **kwargs)
IDs = set()
localQueryCache[cacheKey] = (isinstance(items, list), IDs)
stuff = items if isinstance(items, list) else (items,)
for item in stuff:
ID = getattr(item, "ID", None)
if ID is None:
# Some uncachable data, don't cache this query
del localQueryCache[cacheKey]
break
localItemCache[ID] = item
IDs.add(ID)
return items
def checkAndReturn(*args, **kwargs):
useCache = kwargs.pop("useCache", True)
cacheKey = []
items = None
cacheKey.extend(args)
for keyword in keywords:
cacheKey.append(kwargs.get(keyword))
cacheKey = tuple(cacheKey)
info = localQueryCache.get(cacheKey)
if info is None or not useCache:
items = setCache(cacheKey, args, kwargs)
else:
l, IDs = info
if l:
items = []
for ID in IDs:
data = localItemCache.get(ID)
if data is None:
# Fuck, some of our stuff isn't cached it seems.
items = setCache(cacheKey, args, kwargs)
break
items.append(data)
else:
for ID in IDs:
items = localItemCache.get(ID)
if items is None:
items = setCache(cacheKey, args, kwargs)
break
return items
return checkAndReturn
return deco
def removeCachedEntry(type, ID):
if type not in queryCache:
return
functionCache = queryCache[type]
for _, localCache in functionCache.items():
toDelete = set()
for cacheKey, info in localCache.items():
IDs = info[1]
if ID in IDs:
toDelete.add(cacheKey)
for cacheKey in toDelete:
del localCache[cacheKey]
if ID in itemCache[type]:
del itemCache[type][ID]
elif callable(configVal):
cachedQuery, removeCachedEntry = eos.config.gamedataCache
else:
def cachedQuery(amount, *keywords):
def deco(function):
def checkAndReturn(*args, **kwargs):
return function(*args, **kwargs)
return checkAndReturn
return deco
def removeCachedEntry(*args, **kwargs):
return
def sqlizeString(line):
# Escape backslashes first, as they will be as escape symbol in queries
# Then escape percent and underscore signs
# Finally, replace generic wildcards with sql-style wildcards
line = (
line.replace("\\", "\\\\")
.replace("%", "\\%")
.replace("_", "\\_")
.replace("*", "%")
)
return line
@cachedQuery(User, 1, "lookfor")
def getUser(lookfor, eager=None):
if isinstance(lookfor, int):
if eager is None:
with sd_lock:
user = saveddata_session.query(User).get(lookfor)
else:
eager = processEager(eager)
with sd_lock:
user = (
saveddata_session.query(User)
.options(*eager)
.filter(User.ID == lookfor)
.first()
)
elif isinstance(lookfor, str):
eager = processEager(eager)
with sd_lock:
user = (
saveddata_session.query(User)
.options(*eager)
.filter(User.username == lookfor)
.first()
)
else:
raise TypeError("Need integer or string as argument")
return user
@cachedQuery(Character, 1, "lookfor")
def getCharacter(lookfor, eager=None):
if isinstance(lookfor, int):
if eager is None:
with sd_lock:
character = saveddata_session.query(Character).get(lookfor)
else:
eager = processEager(eager)
with sd_lock:
character = (
saveddata_session.query(Character)
.options(*eager)
.filter(Character.ID == lookfor)
.first()
)
elif isinstance(lookfor, str):
eager = processEager(eager)
with sd_lock:
character = (
saveddata_session.query(Character)
.options(*eager)
.filter(Character.savedName == lookfor)
.first()
)
else:
raise TypeError("Need integer or string as argument")
return character
def getCharacterList(eager=None):
eager = processEager(eager)
with sd_lock:
characters = saveddata_session.query(Character).options(*eager).all()
return characters
def getCharactersForUser(lookfor, eager=None):
if isinstance(lookfor, int):
eager = processEager(eager)
with sd_lock:
characters = (
saveddata_session.query(Character)
.options(*eager)
.filter(Character.ownerID == lookfor)
.all()
)
else:
raise TypeError("Need integer as argument")
return characters
@cachedQuery(Fit, 1, "lookfor")
def getFit(lookfor, eager=None):
if isinstance(lookfor, int):
if eager is None:
with sd_lock:
fit = saveddata_session.query(Fit).get(lookfor)
else:
eager = processEager(eager)
with sd_lock:
fit = (
saveddata_session.query(Fit)
.options(*eager)
.filter(Fit.ID == lookfor)
.first()
)
else:
raise TypeError("Need integer as argument")
if fit and fit.isInvalid:
with sd_lock:
removeInvalid([fit])
return None
return fit
def getFitsWithShip(shipID, ownerID=None, where=None, eager=None):
"""
Get all the fits using a certain ship.
If no user is passed, do this for all users.
"""
if isinstance(shipID, int):
if ownerID is not None and not isinstance(ownerID, int):
raise TypeError("OwnerID must be integer")
filter = Fit.shipID == shipID
if ownerID is not None:
filter = and_(filter, Fit.ownerID == ownerID)
filter = processWhere(filter, where)
eager = processEager(eager)
with sd_lock:
fits = removeInvalid(
saveddata_session.query(Fit).options(*eager).filter(filter).all()
)
else:
raise TypeError("ShipID must be integer")
return fits
def getRecentFits(ownerID=None, where=None, eager=None):
eager = processEager(eager)
with sd_lock:
q = (
select(
(
Fit.ID,
Fit.shipID,
Fit.name,
Fit.modified,
Fit.created,
Fit.timestamp,
Fit.notes,
)
)
.order_by(desc(Fit.modified), desc(Fit.timestamp))
.limit(50)
)
fits = eos.db.saveddata_session.execute(q).fetchall()
return fits
def getFitsWithModules(typeIDs, eager=None):
"""
Get all the fits that have typeIDs fitted to them
"""
if not hasattr(typeIDs, "__iter__"):
typeIDs = (typeIDs,)
eager = processEager(eager)
with sd_lock:
fits = removeInvalid(
saveddata_session.query(Fit)
.join(Module)
.options(*eager)
.filter(Module.itemID.in_(typeIDs))
.all()
)
return fits
def countAllFits():
with sd_lock:
count = saveddata_session.query(Fit).count()
return count
def countFitGroupedByShip():
with sd_lock:
count = (
eos.db.saveddata_session.query(Fit.shipID, func.count(Fit.shipID))
.group_by(Fit.shipID)
.all()
)
return count
def countFitsWithShip(lookfor, ownerID=None, where=None, eager=None):
"""
Get all the fits using a certain ship.
If no user is passed, do this for all users.
"""
if ownerID is not None and not isinstance(ownerID, int):
raise TypeError("OwnerID must be integer")
if isinstance(lookfor, int):
filter = Fit.shipID == lookfor
elif isinstance(lookfor, list):
if len(lookfor) == 0:
return 0
filter = Fit.shipID.in_(lookfor)
else:
raise TypeError("You must supply either an integer or ShipID must be integer")
if ownerID is not None:
filter = and_(filter, Fit.ownerID == ownerID)
filter = processWhere(filter, where)
eager = processEager(eager)
with sd_lock:
count = saveddata_session.query(Fit).options(*eager).filter(filter).count()
return count
def getFitList(eager=None):
eager = processEager(eager)
with sd_lock:
fits = removeInvalid(saveddata_session.query(Fit).options(*eager).all())
return fits
def getFitListLite():
with sd_lock:
stmt = select([fits_table.c.ID, fits_table.c.name, fits_table.c.shipID])
data = eos.db.saveddata_session.execute(stmt).fetchall()
fits = []
for fitID, fitName, shipID in data:
fit = FitLite(id=fitID, name=fitName, shipID=shipID)
fits.append(fit)
return fits
@cachedQuery(Price, 1, "typeID")
def getPrice(typeID):
if isinstance(typeID, int):
with sd_lock:
price = saveddata_session.query(Price).get(typeID)
else:
raise TypeError("Need integer as argument")
return price
def clearPrices():
with sd_lock:
deleted_rows = saveddata_session.query(Price).delete()
commit()
return deleted_rows
def getMiscData(field):
if isinstance(field, str):
with sd_lock:
data = saveddata_session.query(MiscData).get(field)
else:
raise TypeError("Need string as argument")
return data
def getDamagePatternList(eager=None):
eager = processEager(eager)
with sd_lock:
patterns = saveddata_session.query(DamagePattern).options(*eager).all()
return patterns
def clearDamagePatterns():
with sd_lock:
deleted_rows = (
saveddata_session.query(DamagePattern)
.filter(DamagePattern.name != "Uniform")
.delete()
)
commit()
return deleted_rows
def getTargetProfileList(eager=None):
eager = processEager(eager)
with sd_lock:
patterns = saveddata_session.query(TargetProfile).options(*eager).all()
return patterns
def clearTargetProfiles():
with sd_lock:
deleted_rows = saveddata_session.query(TargetProfile).delete()
commit()
return deleted_rows
def getImplantSetList(eager=None):
eager = processEager(eager)
with sd_lock:
sets = saveddata_session.query(ImplantSet).options(*eager).all()
return sets
@cachedQuery(DamagePattern, 1, "lookfor")
def getDamagePattern(lookfor, eager=None):
if isinstance(lookfor, int):
if eager is None:
with sd_lock:
pattern = saveddata_session.query(DamagePattern).get(lookfor)
else:
eager = processEager(eager)
with sd_lock:
pattern = (
saveddata_session.query(DamagePattern)
.options(*eager)
.filter(DamagePattern.ID == lookfor)
.first()
)
elif isinstance(lookfor, str):
eager = processEager(eager)
with sd_lock:
pattern = (
saveddata_session.query(DamagePattern)
.options(*eager)
.filter(DamagePattern.rawName == lookfor)
.first()
)
else:
raise TypeError("Need integer or string as argument")
return pattern
@cachedQuery(TargetProfile, 1, "lookfor")
def getTargetProfile(lookfor, eager=None):
if isinstance(lookfor, int):
if eager is None:
with sd_lock:
pattern = saveddata_session.query(TargetProfile).get(lookfor)
else:
eager = processEager(eager)
with sd_lock:
pattern = (
saveddata_session.query(TargetProfile)
.options(*eager)
.filter(TargetProfile.ID == lookfor)
.first()
)
elif isinstance(lookfor, str):
eager = processEager(eager)
with sd_lock:
pattern = (
saveddata_session.query(TargetProfile)
.options(*eager)
.filter(TargetProfile.rawName == lookfor)
.first()
)
else:
raise TypeError("Need integer or string as argument")
return pattern
@cachedQuery(ImplantSet, 1, "lookfor")
def getImplantSet(lookfor, eager=None):
if isinstance(lookfor, int):
if eager is None:
with sd_lock:
pattern = saveddata_session.query(ImplantSet).get(lookfor)
else:
eager = processEager(eager)
with sd_lock:
pattern = (
saveddata_session.query(ImplantSet)
.options(*eager)
.filter(TargetProfile.ID == lookfor)
.first()
)
elif isinstance(lookfor, str):
eager = processEager(eager)
with sd_lock:
pattern = (
saveddata_session.query(ImplantSet)
.options(*eager)
.filter(TargetProfile.name == lookfor)
.first()
)
else:
raise TypeError("Improper argument")
return pattern
def searchFits(nameLike, where=None, eager=None):
if not isinstance(nameLike, str):
raise TypeError("Need string as argument")
# Prepare our string for request
nameLike = "%{0}%".format(sqlizeString(nameLike))
# Add any extra components to the search to our where clause
filter = processWhere(Fit.name.like(nameLike, escape="\\"), where)
eager = processEager(eager)
with sd_lock:
fits = removeInvalid(
saveddata_session.query(Fit).options(*eager).filter(filter).limit(100).all()
)
return fits
def getProjectedFits(fitID):
if isinstance(fitID, int):
with sd_lock:
filter = and_(
projectedFits_table.c.sourceID == fitID,
Fit.ID == projectedFits_table.c.victimID,
)
fits = saveddata_session.query(Fit).filter(filter).all()
return fits
else:
raise TypeError("Need integer as argument")
def getSsoCharacters(clientHash, eager=None):
eager = processEager(eager)
with sd_lock:
characters = (
saveddata_session.query(SsoCharacter)
.filter(SsoCharacter.client == clientHash)
.options(*eager)
.all()
)
return characters
@cachedQuery(SsoCharacter, 1, "lookfor", "clientHash")
def getSsoCharacter(lookfor, clientHash, eager=None):
filter = SsoCharacter.client == clientHash
if isinstance(lookfor, int):
filter = and_(filter, SsoCharacter.ID == lookfor)
elif isinstance(lookfor, str):
filter = and_(filter, SsoCharacter.characterName == lookfor)
else:
raise TypeError("Need integer or string as argument")
eager = processEager(eager)
with sd_lock:
character = (
saveddata_session.query(SsoCharacter).options(*eager).filter(filter).first()
)
return character
def getOverrides(itemID, eager=None):
if isinstance(itemID, int):
return saveddata_session.query(Override).filter(Override.itemID == itemID).all()
else:
raise TypeError("Need integer as argument")
def clearOverrides():
with sd_lock:
deleted_rows = saveddata_session.query(Override).delete()
commit()
return deleted_rows
def getAllOverrides(eager=None):
return saveddata_session.query(Override).all()
def removeInvalid(fits):
invalids = [f for f in fits if f.isInvalid]
if invalids:
list(map(fits.remove, invalids))
list(map(saveddata_session.delete, invalids))
saveddata_session.commit()
return fits
def add(stuff):
with sd_lock:
saveddata_session.add(stuff)
def save(stuff):
add(stuff)
commit()
def remove(stuff):
removeCachedEntry(type(stuff), stuff.ID)
with sd_lock:
saveddata_session.delete(stuff)
commit()
def commit():
with sd_lock:
try:
saveddata_session.commit()
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
saveddata_session.rollback()
exc_info = sys.exc_info()
raise exc_info[0](exc_info[1]).with_traceback(exc_info[2])
def flush():
with sd_lock:
try:
saveddata_session.flush()
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
saveddata_session.rollback()
exc_info = sys.exc_info()
raise exc_info[0](exc_info[1]).with_traceback(exc_info[2])
|
saveddata | miscData | # ===============================================================================
# Copyright (C) 2011 Anton Vorobyov
#
# This file is part of eos.
#
# eos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# eos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with eos. If not, see <http://www.gnu.org/licenses/>.
# ===============================================================================
from eos.db import saveddata_meta
from eos.saveddata.miscData import MiscData
from sqlalchemy import Column, String, Table
from sqlalchemy.orm import mapper
miscdata_table = Table(
"miscdata",
saveddata_meta,
Column("fieldName", String, primary_key=True),
Column("fieldValue", String),
)
mapper(MiscData, miscdata_table)
|
extractor | shared | from __future__ import unicode_literals
from ..compat import compat_b64decode, compat_urllib_parse_unquote_plus
from ..utils import (
KNOWN_EXTENSIONS,
ExtractorError,
determine_ext,
int_or_none,
js_to_json,
parse_filesize,
rot47,
url_or_none,
urlencode_postdata,
)
from .common import InfoExtractor
class SharedBaseIE(InfoExtractor):
def _real_extract(self, url):
video_id = self._match_id(url)
webpage, urlh = self._download_webpage_handle(url, video_id)
if self._FILE_NOT_FOUND in webpage:
raise ExtractorError("Video %s does not exist" % video_id, expected=True)
video_url = self._extract_video_url(webpage, video_id, url)
title = self._extract_title(webpage)
filesize = int_or_none(self._extract_filesize(webpage))
return {
"id": video_id,
"url": video_url,
"ext": "mp4",
"filesize": filesize,
"title": title,
}
def _extract_title(self, webpage):
return compat_b64decode(
self._html_search_meta("full:title", webpage, "title")
).decode("utf-8")
def _extract_filesize(self, webpage):
return self._html_search_meta("full:size", webpage, "file size", fatal=False)
class SharedIE(SharedBaseIE):
IE_DESC = "shared.sx"
_VALID_URL = r"https?://shared\.sx/(?P<id>[\da-z]{10})"
_FILE_NOT_FOUND = ">File does not exist<"
_TEST = {
"url": "http://shared.sx/0060718775",
"md5": "106fefed92a8a2adb8c98e6a0652f49b",
"info_dict": {
"id": "0060718775",
"ext": "mp4",
"title": "Bmp4",
"filesize": 1720110,
},
}
def _extract_video_url(self, webpage, video_id, url):
download_form = self._hidden_inputs(webpage)
video_page = self._download_webpage(
url,
video_id,
"Downloading video page",
data=urlencode_postdata(download_form),
headers={
"Content-Type": "application/x-www-form-urlencoded",
"Referer": url,
},
)
video_url = self._html_search_regex(
r'data-url=(["\'])(?P<url>(?:(?!\1).)+)\1',
video_page,
"video URL",
group="url",
)
return video_url
class VivoIE(SharedBaseIE):
IE_DESC = "vivo.sx"
_VALID_URL = r"https?://vivo\.s[xt]/(?P<id>[\da-z]{10})"
_FILE_NOT_FOUND = ">The file you have requested does not exists or has been removed"
_TESTS = [
{
"url": "http://vivo.sx/d7ddda0e78",
"md5": "15b3af41be0b4fe01f4df075c2678b2c",
"info_dict": {
"id": "d7ddda0e78",
"ext": "mp4",
"title": "Chicken",
"filesize": 515659,
},
},
{
"url": "http://vivo.st/d7ddda0e78",
"only_matching": True,
},
]
def _extract_title(self, webpage):
title = self._html_search_regex(
r'data-name\s*=\s*(["\'])(?P<title>(?:(?!\1).)+)\1',
webpage,
"title",
default=None,
group="title",
)
if title:
ext = determine_ext(title)
if ext.lower() in KNOWN_EXTENSIONS:
title = title.rpartition("." + ext)[0]
return title
return self._og_search_title(webpage)
def _extract_filesize(self, webpage):
return parse_filesize(
self._search_regex(
r'data-type=["\']video["\'][^>]*>Watch.*?<strong>\s*\((.+?)\)',
webpage,
"filesize",
fatal=False,
)
)
def _extract_video_url(self, webpage, video_id, url):
def decode_url_old(encoded_url):
return compat_b64decode(encoded_url).decode("utf-8")
stream_url = self._search_regex(
r'data-stream\s*=\s*(["\'])(?P<url>(?:(?!\1).)+)\1',
webpage,
"stream url",
default=None,
group="url",
)
if stream_url:
stream_url = url_or_none(decode_url_old(stream_url))
if stream_url:
return stream_url
def decode_url(encoded_url):
return rot47(compat_urllib_parse_unquote_plus(encoded_url))
return decode_url(
self._parse_json(
self._search_regex(
r"(?s)InitializeStream\s*\(\s*({.+?})\s*\)\s*;", webpage, "stream"
),
video_id,
transform_source=js_to_json,
)["source"]
)
|
versions | 078_ae821876532a_remove_old_authz_model | # encoding: utf-8
"""078 Remove old authz model
Revision ID: ae821876532a
Revises: 51171a04d86d
Create Date: 2018-09-04 18:49:15.812926
"""
import sqlalchemy as sa
from alembic import op
from ckan.migration import skip_based_on_legacy_engine_version
# revision identifiers, used by Alembic.
revision = "ae821876532a"
down_revision = "51171a04d86d"
branch_labels = None
depends_on = None
indexes = (
("idx_uor_id", "user_object_role", ["id"]),
("idx_uor_user_id", "user_object_role", ["user_id"]),
("idx_uor_context", "user_object_role", ["context"]),
("idx_uor_role", "user_object_role", ["role"]),
("idx_uor_user_id_role", "user_object_role", ["user_id", "role"]),
("idx_ra_role", "role_action", ["role"]),
("idx_ra_action", "role_action", ["action"]),
("idx_ra_role_action", "role_action", ["action", "role"]),
)
def upgrade():
if skip_based_on_legacy_engine_version(op, __name__):
return
op.drop_table("role_action")
op.drop_table("package_role")
op.drop_table("group_role")
op.drop_table("system_role")
op.drop_table("authorization_group_role")
op.drop_table("user_object_role")
def downgrade():
op.create_table(
"user_object_role",
sa.Column("id", sa.UnicodeText, primary_key=True),
sa.Column("user_id", sa.UnicodeText, sa.ForeignKey("user.id")),
sa.Column("context", sa.UnicodeText, nullable=False),
sa.Column("role", sa.UnicodeText),
)
op.create_table(
"authorization_group_role",
sa.Column(
"user_object_role_id",
sa.UnicodeText,
sa.ForeignKey("user_object_role.id"),
primary_key=True,
),
sa.Column(
"authorization_group_id",
sa.UnicodeText,
sa.ForeignKey("authorization_group.id"),
),
)
op.create_table(
"system_role",
sa.Column(
"user_object_role_id",
sa.UnicodeText,
sa.ForeignKey("user_object_role.id"),
primary_key=True,
),
)
op.create_table(
"group_role",
sa.Column(
"user_object_role_id",
sa.UnicodeText,
sa.ForeignKey("user_object_role.id"),
primary_key=True,
),
sa.Column("group_id", sa.UnicodeText, sa.ForeignKey("group.id")),
)
op.create_table(
"package_role",
sa.Column(
"user_object_role_id",
sa.UnicodeText,
sa.ForeignKey("user_object_role.id"),
primary_key=True,
),
sa.Column("package_id", sa.UnicodeText, sa.ForeignKey("package.id")),
)
op.create_table(
"role_action",
sa.Column("id", sa.UnicodeText, primary_key=True),
sa.Column("role", sa.UnicodeText),
sa.Column("context", sa.UnicodeText, nullable=False),
sa.Column("action", sa.UnicodeText),
)
op.add_column(
"user_object_role",
sa.Column(
"authorized_group_id",
sa.UnicodeText,
sa.ForeignKey("authorization_group.id"),
nullable=True,
),
)
for name, table, columns in indexes:
op.create_index(name, table, columns)
|
plugins | smartlink | # Smart Link Plugin
# by Jack Miller
# v1.0
# Allow links to be fetched to disk and then run a smarter handler on it
# Use :fetch instead of :goto to use these handlers.
# HANDLERS is a list of handlers, specified like:
HANDLERS = [
{
"match-url": ".*\\.mp3$",
"handler": "mplayer",
"regex": True,
},
{
"match-file": "image data",
"handler": "feh",
},
{
"match-file": "PDF",
"handler": "evince",
},
]
# Each handler will either have a 'match-url' setting or a 'match-file' setting.
# These will be considered regexes if 'regex' is True, it defaults to False,
# which will use a basic string search.
# 'match-url' should be used for network capable helpers, like mplayer, and if
# a match-url match is found, canto-curses won't download the file.
# If nothing matches ANY 'match-url' handler, then the file is downloaded and
# `file` is run on it. 'match-file' handlers are then checked against that
# output.
# If no handlers match, then your default browser is used, as if you had used
# :goto instead of :fetch.
# Matches are searched in order (except url matches always come before file
# matches). So if you want to, say, open PNGs in one sort of handler, and all
# other images in another handler, you could list something like
# { "match-file" : "PNG image data",
# "handler" : "png-helper",
# },
# { "match-file" : "image data",
# "handler" : "other-helper",
# }
# You may also specify a "pause" setting if you want to use other terminal
# programs without canto getting in the way.
# { "match-file" : "Audio data",
# "handler" : "mocp",
# "pause" : True,
# }
# If you have no match-file handlers listed, the file will not be downloaded.
from canto_next.plugins import check_program
check_program("canto-curses")
import logging
import os
import re
import shlex
import subprocess
import sys
import tempfile
import urllib
from threading import Thread
from canto_curses.command import register_commands
from canto_curses.reader import ReaderPlugin
from canto_curses.taglist import TagListPlugin
from canto_next.hooks import on_hook
log = logging.getLogger("SMARTLINK")
class SmartLinkThread(Thread):
def __init__(self, base_obj, href):
Thread.__init__(self, name="Smart Link: %s" % href)
self.base_obj = base_obj
self.href = href
self.daemon = True
def run(self):
got_handler = None
file_handlers = []
for handler in HANDLERS:
# If there's no handler defined, we don't care if it's a match
if "handler" not in handler:
log.error("No handler binary defined for: %s" % handler)
continue
if "regex" in handler and handler["regex"] == True:
handler["regex"] = True
else:
handler["regex"] = False
if "match-url" in handler:
got_handler = self.try_handler(handler, "url", self.href)
if got_handler:
break
elif "match-file" in handler:
file_handlers.append(handler)
else:
log.error("No match-url or match-file in handler %s" % handler)
else:
# We didn't find a matching URL handler.
# No file_handlers present, don't bother to download, create
# a default browser handler.
if file_handlers:
try:
tmpnam = self.grab_it()
except Exception as e:
log.error("Couldn't download file: %s" % e)
# If we couldn't get the file, skip all of these
file_handlers = []
else:
try:
fileoutput = subprocess.check_output(
"file %s" % shlex.quote(tmpnam), shell=True
)
fileoutput = fileoutput.decode()
except Exception as e:
log.error("Couldn't get file output: %s" % e)
# If we couldn't get the `file` output, also skip
file_handlers = []
for f_handler in file_handlers:
log.debug("f_handler: %s", f_handler)
got_handler = self.try_handler(f_handler, "file", fileoutput)
if got_handler:
self.href = tmpnam
break
else:
conf = self.base_obj.callbacks["get_conf"]()
got_handler = {
"handler": conf["browser"]["path"],
"text": conf["browser"]["text"],
}
# Okay, so at this point we have self.href, which is either the URL or
# the temporary file path, and got_handler telling us what to invoke an
# how.
log.info("Opening %s with %s" % (self.href, got_handler["handler"]))
# Make sure that we quote href such that malicious URLs like
# "http://example.com & rm -rf ~/" won't be interpreted by the shell.
href = shlex.quote(self.href)
pause = False
if "pause" in got_handler and got_handler["pause"]:
self.base_obj.callbacks["pause_interface"]()
pause = True
path = got_handler["handler"]
if "%u" in path:
path = path.replace("%u", href)
elif href:
path = path + " " + href
pid = os.fork()
if not pid:
# A lot of programs don't appreciate having their fds closed, so
# instead we dup them to /dev/null.
fd = os.open("/dev/null", os.O_RDWR)
os.dup2(fd, sys.stderr.fileno())
if not pause:
os.setpgid(os.getpid(), os.getpid())
os.dup2(fd, sys.stdout.fileno())
os.dup2(fd, sys.stdin.fileno())
os.execv("/bin/sh", ["/bin/sh", "-c", path])
# Just in case.
sys.exit(0)
# Parent process only cares if we should wait for the process to finish
elif pause:
os.waitpid(pid, 0)
self.base_obj.callbacks["unpause_interface"]()
def try_handler(self, handler, suffix, content):
got_match = False
element = "match-" + suffix
# We know these elements exist, from above
if not handler["regex"]:
got_match = handler[element] in content
else:
try:
got_match = re.match(handler[element], content)
except Exception as e:
log.error("Failed to do %s match: $s" % (suffix, e))
if got_match:
return handler
def grab_it(self):
# Prepare temporary files
# Get a base filename (sans query strings, etc.) from the URL
tmppath = urllib.parse.urlparse(self.href).path
fname = os.path.basename(tmppath)
# Grab a temporary directory. This allows us to create a file with
# an unperturbed filename so scripts can freely use regex /
# extension matching in addition to mimetype detection.
tmpdir = tempfile.mkdtemp(prefix="canto-")
tmpnam = tmpdir + "/" + fname
log.debug("Downloading %s to %s", self.href, tmpnam)
on_hook("curses_exit", lambda: (os.unlink(tmpnam)))
on_hook("curses_exit", lambda: (os.rmdir(tmpdir)))
tmp = open(tmpnam, "w+b")
# Set these because some sites think python's urllib is a scraper and
# 403 it.
extra_headers = {"User-Agent": "Canto/0.9.0 + http://codezen.org/canto-ng"}
request = urllib.request.Request(self.href, headers=extra_headers)
# Grab the HTTP info / prepare to read.
response = urllib.request.urlopen(request)
# Grab in kilobyte chunks to avoid wasting memory on something
# that's going to be immediately written to disk.
while True:
r = response.read(1024)
if not r:
break
tmp.write(r)
response.close()
tmp.close()
return tmpnam
class TagListSmartLink(TagListPlugin):
def __init__(self, taglist):
self.taglist = taglist
self.plugin_attrs = {}
cmds = {
"fetch": (self.cmd_fetch_link, ["item-list"], "Fetch link"),
}
register_commands(taglist, cmds)
taglist.bind("f", "fetch")
def cmd_fetch_link(self, items):
SmartLinkThread(self.taglist, items[0].content["link"]).start()
class ReaderSmartLink(ReaderPlugin):
def __init__(self, reader):
self.reader = reader
self.plugin_attrs = {}
cmds = {
"fetch": (self.cmd_fetch_link, ["link-list"], "Fetch link"),
}
register_commands(reader, cmds)
reader.bind("f", "fetch")
def cmd_fetch_link(self, link):
SmartLinkThread(self.reader, link[0][1]).start()
|
utils | encoding | """
raven.utils.encoding
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, unicode_literals
import warnings
from raven.utils.compat import PY2, binary_type, integer_types, string_types, text_type
def is_protected_type(obj):
"""Determine if the object instance is of a protected type.
Objects of protected types are preserved as-is when passed to
force_text(strings_only=True).
"""
import datetime
import Decimal
return isinstance(
obj,
integer_types
+ (type(None), float, Decimal, datetime.datetime, datetime.date, datetime.time),
)
def force_text(s, encoding="utf-8", strings_only=False, errors="strict"):
"""
Similar to smart_text, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# Handle the common case first, saves 30-40% when s is an instance of
# text_type. This function gets called often in that setting.
if isinstance(s, text_type):
return s
if strings_only and is_protected_type(s):
return s
try:
if not isinstance(s, string_types):
if hasattr(s, "__unicode__"):
s = s.__unicode__()
else:
if not PY2:
if isinstance(s, bytes):
s = text_type(s, encoding, errors)
else:
s = text_type(s)
else:
s = text_type(bytes(s), encoding, errors)
else:
# Note: We use .decode() here, instead of text_type(s, encoding,
# errors), so that if s is a SafeBytes, it ends up being a
# SafeText at the end.
s = s.decode(encoding, errors)
except UnicodeDecodeError as e:
if not isinstance(s, Exception):
raise UnicodeDecodeError(*e.args)
else:
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII bytestring data without a
# working unicode method. Try to handle this without raising a
# further exception by individually forcing the exception args
# to unicode.
s = " ".join([force_text(arg, encoding, strings_only, errors) for arg in s])
return s
def transform(value):
from raven.utils.serializer import transform
warnings.warn(
"You should switch to raven.utils.serializer." "transform", DeprecationWarning
)
return transform(value)
def to_unicode(value):
try:
value = text_type(force_text(value))
except (UnicodeEncodeError, UnicodeDecodeError):
value = "(Error decoding value)"
except Exception: # in some cases we get a different exception
try:
value = text_type(force_text(repr(type(value))))
except Exception:
value = "(Error decoding value)"
return value
def to_string(value):
try:
return binary_type(value.decode("utf-8").encode("utf-8"))
except Exception:
return to_unicode(value).encode("utf-8")
|
heartbeat | models | import logging
import typing
from urllib.parse import urljoin
from common.public_primary_keys import (
generate_public_primary_key,
increase_public_primary_key_length,
)
from django.conf import settings
from django.core.validators import MinLengthValidator
from django.db import models
from django.utils import timezone
logger = logging.getLogger(__name__)
def generate_public_primary_key_for_integration_heart_beat():
prefix = "B"
new_public_primary_key = generate_public_primary_key(prefix)
failure_counter = 0
while IntegrationHeartBeat.objects.filter(
public_primary_key=new_public_primary_key
).exists():
new_public_primary_key = increase_public_primary_key_length(
failure_counter=failure_counter,
prefix=prefix,
model_name="IntegrationHeartBeat",
)
failure_counter += 1
return new_public_primary_key
class IntegrationHeartBeat(models.Model):
TIMEOUT_CHOICES = (
(60, "1 minute"),
(120, "2 minutes"),
(180, "3 minutes"),
(300, "5 minutes"),
(600, "10 minutes"),
(900, "15 minutes"),
(1800, "30 minutes"),
(3600, "1 hour"),
(43200, "12 hours"),
(86400, "1 day"),
)
created_at = models.DateTimeField(auto_now_add=True)
timeout_seconds = models.IntegerField(default=0)
last_heartbeat_time = models.DateTimeField(default=None, null=True)
"""
Stores the latest received heartbeat signal time
"""
last_checkup_task_time = models.DateTimeField(default=None, null=True)
"""
Deprecated. This field is not used. TODO: remove it
"""
actual_check_up_task_id = models.CharField(max_length=100)
"""
Deprecated. Stored the latest scheduled `integration_heartbeat_checkup` task id. TODO: remove it
"""
previous_alerted_state_was_life = models.BooleanField(default=True)
"""
Last status of the heartbeat. Determines if integration was alive on latest checkup
"""
public_primary_key = models.CharField(
max_length=20,
validators=[MinLengthValidator(settings.PUBLIC_PRIMARY_KEY_MIN_LENGTH + 1)],
unique=True,
default=generate_public_primary_key_for_integration_heart_beat,
)
alert_receive_channel = models.OneToOneField(
"alerts.AlertReceiveChannel",
on_delete=models.CASCADE,
related_name="integration_heartbeat",
)
@property
def is_expired(self) -> bool:
if self.last_heartbeat_time is None:
# else heartbeat flow was not received, so heartbeat can't expire.
return False
# if heartbeat signal was received check timeout
return (
self.last_heartbeat_time + timezone.timedelta(seconds=self.timeout_seconds)
< timezone.now()
)
@property
def status(self) -> bool:
"""
Return bool indicates heartbeat status.
True if first heartbeat signal was sent and flow is ok else False.
If first heartbeat signal was not send it means that configuration was not finished and status not ok.
"""
if self.last_heartbeat_time is None:
return False
return not self.is_expired
@property
def link(self) -> str:
return urljoin(self.alert_receive_channel.integration_url, "heartbeat/")
# Insight logs
@property
def insight_logs_type_verbal(self) -> str:
return "integration_heartbeat"
@property
def insight_logs_verbal(self) -> str:
return f"Integration Heartbeat for {self.alert_receive_channel.insight_logs_verbal}"
@property
def insight_logs_serialized(self) -> typing.Dict[str, str | int]:
return {
"timeout": self.timeout_seconds,
}
@property
def insight_logs_metadata(self) -> typing.Dict[str, str]:
return {
"integration": self.alert_receive_channel.insight_logs_verbal,
"integration_id": self.alert_receive_channel.public_primary_key,
}
|
lastfm | client | # code under public domain
"""
The main client API you'll be working with most often. You'll need to
configure a lastfm.session.LastfmSession for this to work, but otherwise
it's fairly self-explanatory.
"""
from __future__ import absolute_import
import re
try:
import json
except ImportError:
import simplejson as json
from .rest import ErrorResponse, RESTClient
def format_path(path):
"""Normalize path for use with the Lastfm API.
This function turns multiple adjacent slashes into single
slashes, then ensures that there's a leading slash but
not a trailing slash.
"""
if not path:
return path
path = re.sub(r"/+", "/", path)
if path == "/":
return "" if isinstance(path, unicode) else ""
else:
return "/" + path.strip("/")
# see http://www.last.fm/api/scrobbling
class LastfmClient(object):
"""
The main access point of doing REST calls on Lastfm. You should
first create and configure a lastfm.session.LastfmSession object,
and then pass it into LastfmClient's constructor. LastfmClient
then does all the work of properly calling each API method
with the correct OAuth authentication.
You should be aware that any of these methods can raise a
rest.ErrorResponse exception if the server returns a non-200
or invalid HTTP response. Note that a 401 return status at any
point indicates that the user needs to be reauthenticated.
"""
def __init__(self, session, rest_client=RESTClient):
"""Initialize the LastfmClient object.
Args:
session: A lastfm.session.LastfmSession object to use for making requests.
rest_client: A lastfm.rest.RESTClient-like object to use for making requests. [optional]
"""
self.session = session
self.rest_client = rest_client
def request(self, target, params=None, method="POST"):
"""Make an HTTP request to a target API method.
This is an internal method used to properly craft the url, headers, and
params for a Lastfm API request. It is exposed for you in case you
need craft other API calls not in this library or if you want to debug it.
Args:
target: The target URL with leading slash (e.g. '/files')
params: A dictionary of parameters to add to the request
method: An HTTP method (e.g. 'GET' or 'POST')
content_server: A boolean indicating whether the request is to the
API content server, for example to fetch the contents of a file
rather than its metadata.
Returns:
A tuple of (url, params, headers) that should be used to make the request.
OAuth authentication information will be added as needed within these fields.
"""
assert method in [
"GET",
"POST",
"PUT",
], "Only 'GET', 'POST', and 'PUT' are allowed."
if params is None:
params = {}
host = self.session.API_HOST
base = self.session.build_url(host, target)
headers, params = self.session.build_access_headers(params=params)
if method in ("GET", "PUT"):
url = self.session.build_url(host, target, params)
else:
url = self.session.build_url(host, target)
return url, params, headers
# http://www.last.fm/api/show/track.updateNowPlaying
def updateNowPlaying(self, artist, track, duration=None):
params = {
"method": "track.updateNowPlaying",
"artist": artist,
"track": track,
}
if duration and duration > 0:
params["duration"] = str(int(duration))
url, params, headers = self.request("/", method="POST", params=params)
ret = self.rest_client.POST(url, headers=headers, params=params)
# assert "error" not in ret
return ret
# http://www.last.fm/api/show/track.scrobble
def scrobble(self, artist, track, duration=None, timestamp=None):
if not timestamp:
import time
timestamp = int(time.time())
if duration:
timestamp -= duration
else:
timestamp -= 5 * 60 # assume the song was 5min long
timestamp = str(int(timestamp))
params = {
"method": "track.scrobble",
"artist": artist,
"track": track,
"timestamp": timestamp,
}
if duration:
params["duration"] = str(int(duration))
url, params, headers = self.request("/", method="POST", params=params)
ret = self.rest_client.POST(url, headers=headers, params=params)
# assert "error" not in ret
return ret
|
meshes | mesh_rc_wall_2d_tria6 | def create_nodes(femmesh):
# nodes
femmesh.addNode(0.0, 0.0, 0.0, 1)
femmesh.addNode(0.0, 2000.0, 0.0, 2)
femmesh.addNode(500.0, 0.0, 0.0, 3)
femmesh.addNode(4000.0, 2000.0, 0.0, 4)
femmesh.addNode(3500.0, 0.0, 0.0, 5)
femmesh.addNode(4000.0, 0.0, 0.0, 6)
femmesh.addNode(0.0, -2000.0, 0.0, 7)
femmesh.addNode(500.0, -2000.0, 0.0, 8)
femmesh.addNode(3500.0, -2000.0, 0.0, 9)
femmesh.addNode(4000.0, -2000.0, 0.0, 10)
femmesh.addNode(0.0, 250.0, 0.0, 11)
femmesh.addNode(0.0, 500.0, 0.0, 12)
femmesh.addNode(0.0, 750.0, 0.0, 13)
femmesh.addNode(0.0, 1000.0, 0.0, 14)
femmesh.addNode(0.0, 1250.0, 0.0, 15)
femmesh.addNode(0.0, 1500.0, 0.0, 16)
femmesh.addNode(0.0, 1750.0, 0.0, 17)
femmesh.addNode(0.0, 125.0, 0.0, 18)
femmesh.addNode(0.0, 375.0, 0.0, 19)
femmesh.addNode(0.0, 625.0, 0.0, 20)
femmesh.addNode(0.0, 875.0, 0.0, 21)
femmesh.addNode(0.0, 1125.0, 0.0, 22)
femmesh.addNode(0.0, 1375.0, 0.0, 23)
femmesh.addNode(0.0, 1625.0, 0.0, 24)
femmesh.addNode(0.0, 1875.0, 0.0, 25)
femmesh.addNode(250.0, 0.0, 0.0, 26)
femmesh.addNode(125.0, 0.0, 0.0, 27)
femmesh.addNode(375.0, 0.0, 0.0, 28)
femmesh.addNode(250.0, 2000.0, 0.0, 29)
femmesh.addNode(500.0, 2000.0, 0.0, 30)
femmesh.addNode(750.0, 2000.0, 0.0, 31)
femmesh.addNode(1000.0, 2000.0, 0.0, 32)
femmesh.addNode(1250.0, 2000.0, 0.0, 33)
femmesh.addNode(1500.0, 2000.0, 0.0, 34)
femmesh.addNode(1750.0, 2000.0, 0.0, 35)
femmesh.addNode(2000.0, 2000.0, 0.0, 36)
femmesh.addNode(2250.0, 2000.0, 0.0, 37)
femmesh.addNode(2500.0, 2000.0, 0.0, 38)
femmesh.addNode(2750.0, 2000.0, 0.0, 39)
femmesh.addNode(3000.0, 2000.0, 0.0, 40)
femmesh.addNode(3250.0, 2000.0, 0.0, 41)
femmesh.addNode(3500.0, 2000.0, 0.0, 42)
femmesh.addNode(3750.0, 2000.0, 0.0, 43)
femmesh.addNode(125.0, 2000.0, 0.0, 44)
femmesh.addNode(375.0, 2000.0, 0.0, 45)
femmesh.addNode(625.0, 2000.0, 0.0, 46)
femmesh.addNode(875.0, 2000.0, 0.0, 47)
femmesh.addNode(1125.0, 2000.0, 0.0, 48)
femmesh.addNode(1375.0, 2000.0, 0.0, 49)
femmesh.addNode(1625.0, 2000.0, 0.0, 50)
femmesh.addNode(1875.0, 2000.0, 0.0, 51)
femmesh.addNode(2125.0, 2000.0, 0.0, 52)
femmesh.addNode(2375.0, 2000.0, 0.0, 53)
femmesh.addNode(2625.0, 2000.0, 0.0, 54)
femmesh.addNode(2875.0, 2000.0, 0.0, 55)
femmesh.addNode(3125.0, 2000.0, 0.0, 56)
femmesh.addNode(3375.0, 2000.0, 0.0, 57)
femmesh.addNode(3625.0, 2000.0, 0.0, 58)
femmesh.addNode(3875.0, 2000.0, 0.0, 59)
femmesh.addNode(750.0, 0.0, 0.0, 60)
femmesh.addNode(1000.0, 0.0, 0.0, 61)
femmesh.addNode(1250.0, 0.0, 0.0, 62)
femmesh.addNode(1500.0, 0.0, 0.0, 63)
femmesh.addNode(1750.0, 0.0, 0.0, 64)
femmesh.addNode(2000.0, 0.0, 0.0, 65)
femmesh.addNode(2250.0, 0.0, 0.0, 66)
femmesh.addNode(2500.0, 0.0, 0.0, 67)
femmesh.addNode(2750.0, 0.0, 0.0, 68)
femmesh.addNode(3000.0, 0.0, 0.0, 69)
femmesh.addNode(3250.0, 0.0, 0.0, 70)
femmesh.addNode(625.0, 0.0, 0.0, 71)
femmesh.addNode(875.0, 0.0, 0.0, 72)
femmesh.addNode(1125.0, 0.0, 0.0, 73)
femmesh.addNode(1375.0, 0.0, 0.0, 74)
femmesh.addNode(1625.0, 0.0, 0.0, 75)
femmesh.addNode(1875.0, 0.0, 0.0, 76)
femmesh.addNode(2125.0, 0.0, 0.0, 77)
femmesh.addNode(2375.0, 0.0, 0.0, 78)
femmesh.addNode(2625.0, 0.0, 0.0, 79)
femmesh.addNode(2875.0, 0.0, 0.0, 80)
femmesh.addNode(3125.0, 0.0, 0.0, 81)
femmesh.addNode(3375.0, 0.0, 0.0, 82)
femmesh.addNode(4000.0, 250.0, 0.0, 83)
femmesh.addNode(4000.0, 500.0, 0.0, 84)
femmesh.addNode(4000.0, 750.0, 0.0, 85)
femmesh.addNode(4000.0, 1000.0, 0.0, 86)
femmesh.addNode(4000.0, 1250.0, 0.0, 87)
femmesh.addNode(4000.0, 1500.0, 0.0, 88)
femmesh.addNode(4000.0, 1750.0, 0.0, 89)
femmesh.addNode(4000.0, 125.0, 0.0, 90)
femmesh.addNode(4000.0, 375.0, 0.0, 91)
femmesh.addNode(4000.0, 625.0, 0.0, 92)
femmesh.addNode(4000.0, 875.0, 0.0, 93)
femmesh.addNode(4000.0, 1125.0, 0.0, 94)
femmesh.addNode(4000.0, 1375.0, 0.0, 95)
femmesh.addNode(4000.0, 1625.0, 0.0, 96)
femmesh.addNode(4000.0, 1875.0, 0.0, 97)
femmesh.addNode(3750.0, 0.0, 0.0, 98)
femmesh.addNode(3625.0, 0.0, 0.0, 99)
femmesh.addNode(3875.0, 0.0, 0.0, 100)
femmesh.addNode(0.0, -1750.0, 0.0, 101)
femmesh.addNode(0.0, -1500.0, 0.0, 102)
femmesh.addNode(0.0, -1250.0, 0.0, 103)
femmesh.addNode(0.0, -1000.0, 0.0, 104)
femmesh.addNode(0.0, -750.0, 0.0, 105)
femmesh.addNode(0.0, -500.0, 0.0, 106)
femmesh.addNode(0.0, -250.0, 0.0, 107)
femmesh.addNode(0.0, -1875.0, 0.0, 108)
femmesh.addNode(0.0, -1625.0, 0.0, 109)
femmesh.addNode(0.0, -1375.0, 0.0, 110)
femmesh.addNode(0.0, -1125.0, 0.0, 111)
femmesh.addNode(0.0, -875.0, 0.0, 112)
femmesh.addNode(0.0, -625.0, 0.0, 113)
femmesh.addNode(0.0, -375.0, 0.0, 114)
femmesh.addNode(0.0, -125.0, 0.0, 115)
femmesh.addNode(250.0, -2000.0, 0.0, 116)
femmesh.addNode(125.0, -2000.0, 0.0, 117)
femmesh.addNode(375.0, -2000.0, 0.0, 118)
femmesh.addNode(500.0, -1750.0, 0.0, 119)
femmesh.addNode(500.0, -1500.0, 0.0, 120)
femmesh.addNode(500.0, -1250.0, 0.0, 121)
femmesh.addNode(500.0, -1000.0, 0.0, 122)
femmesh.addNode(500.0, -750.0, 0.0, 123)
femmesh.addNode(500.0, -500.0, 0.0, 124)
femmesh.addNode(500.0, -250.0, 0.0, 125)
femmesh.addNode(500.0, -1875.0, 0.0, 126)
femmesh.addNode(500.0, -1625.0, 0.0, 127)
femmesh.addNode(500.0, -1375.0, 0.0, 128)
femmesh.addNode(500.0, -1125.0, 0.0, 129)
femmesh.addNode(500.0, -875.0, 0.0, 130)
femmesh.addNode(500.0, -625.0, 0.0, 131)
femmesh.addNode(500.0, -375.0, 0.0, 132)
femmesh.addNode(500.0, -125.0, 0.0, 133)
femmesh.addNode(3500.0, -1750.0, 0.0, 134)
femmesh.addNode(3500.0, -1500.0, 0.0, 135)
femmesh.addNode(3500.0, -1250.0, 0.0, 136)
femmesh.addNode(3500.0, -1000.0, 0.0, 137)
femmesh.addNode(3500.0, -750.0, 0.0, 138)
femmesh.addNode(3500.0, -500.0, 0.0, 139)
femmesh.addNode(3500.0, -250.0, 0.0, 140)
femmesh.addNode(3500.0, -1875.0, 0.0, 141)
femmesh.addNode(3500.0, -1625.0, 0.0, 142)
femmesh.addNode(3500.0, -1375.0, 0.0, 143)
femmesh.addNode(3500.0, -1125.0, 0.0, 144)
femmesh.addNode(3500.0, -875.0, 0.0, 145)
femmesh.addNode(3500.0, -625.0, 0.0, 146)
femmesh.addNode(3500.0, -375.0, 0.0, 147)
femmesh.addNode(3500.0, -125.0, 0.0, 148)
femmesh.addNode(3750.0, -2000.0, 0.0, 149)
femmesh.addNode(3625.0, -2000.0, 0.0, 150)
femmesh.addNode(3875.0, -2000.0, 0.0, 151)
femmesh.addNode(4000.0, -1750.0, 0.0, 152)
femmesh.addNode(4000.0, -1500.0, 0.0, 153)
femmesh.addNode(4000.0, -1250.0, 0.0, 154)
femmesh.addNode(4000.0, -1000.0, 0.0, 155)
femmesh.addNode(4000.0, -750.0, 0.0, 156)
femmesh.addNode(4000.0, -500.0, 0.0, 157)
femmesh.addNode(4000.0, -250.0, 0.0, 158)
femmesh.addNode(4000.0, -1875.0, 0.0, 159)
femmesh.addNode(4000.0, -1625.0, 0.0, 160)
femmesh.addNode(4000.0, -1375.0, 0.0, 161)
femmesh.addNode(4000.0, -1125.0, 0.0, 162)
femmesh.addNode(4000.0, -875.0, 0.0, 163)
femmesh.addNode(4000.0, -625.0, 0.0, 164)
femmesh.addNode(4000.0, -375.0, 0.0, 165)
femmesh.addNode(4000.0, -125.0, 0.0, 166)
femmesh.addNode(1151.2155027143724, 991.1595002148733, 0.0, 167)
femmesh.addNode(2345.552182966278, 992.5521461426597, 0.0, 168)
femmesh.addNode(3153.541159295079, 1199.5398685297591, 0.0, 169)
femmesh.addNode(1750.0, 695.3125, 0.0, 170)
femmesh.addNode(1681.25, 1347.65625, 0.0, 171)
femmesh.addNode(2881.435948455562, 621.2099677847261, 0.0, 172)
femmesh.addNode(606.0449604906953, 635.4082348210767, 0.0, 173)
femmesh.addNode(657.2976568366603, 1353.3222602999324, 0.0, 174)
femmesh.addNode(3445.1692479821986, 652.3366992631719, 0.0, 175)
femmesh.addNode(2674.4322928661404, 1434.2147374924186, 0.0, 176)
femmesh.addNode(2185.539194525211, 1473.115044788804, 0.0, 177)
femmesh.addNode(1273.2425124792012, 510.71781405990015, 0.0, 178)
femmesh.addNode(2215.6598753905773, 520.7910541541594, 0.0, 179)
femmesh.addNode(1115.670904262049, 1485.8186742572047, 0.0, 180)
femmesh.addNode(3519.3278588807784, 1519.3278588807786, 0.0, 181)
femmesh.addNode(388.34501905880387, 1026.9203157654522, 0.0, 182)
femmesh.addNode(3614.4372160624785, 1020.2250489643089, 0.0, 183)
femmesh.addNode(3105.4482841646395, 1593.0462413828066, 0.0, 184)
femmesh.addNode(2818.493830446949, 990.0975624837915, 0.0, 185)
femmesh.addNode(1959.2775916812711, 1064.8265300145413, 0.0, 186)
femmesh.addNode(1546.3069778764673, 1011.232872978363, 0.0, 187)
femmesh.addNode(2591.0841961277283, 363.09805979524884, 0.0, 188)
femmesh.addNode(900.8498339227292, 372.74375656728824, 0.0, 189)
femmesh.addNode(3159.4114197567123, 364.99779587913827, 0.0, 190)
femmesh.addNode(390.87897826169245, 345.62828737063484, 0.0, 191)
femmesh.addNode(413.062332090631, 1650.5023139707077, 0.0, 192)
femmesh.addNode(1599.1990546119257, 331.5972138397307, 0.0, 193)
femmesh.addNode(1495.152113940753, 1619.2944541079414, 0.0, 194)
femmesh.addNode(1863.5454857801321, 1658.2389001513154, 0.0, 195)
femmesh.addNode(778.2580283531327, 931.2966961410348, 0.0, 196)
femmesh.addNode(3674.5511265257346, 338.9844927685506, 0.0, 197)
femmesh.addNode(3172.7789481491654, 832.1374169291745, 0.0, 198)
femmesh.addNode(784.7082624860842, 1653.7264044731728, 0.0, 199)
femmesh.addNode(2554.739497815064, 712.365487790745, 0.0, 200)
femmesh.addNode(1943.1567961046958, 330.9370445232647, 0.0, 201)
femmesh.addNode(981.3704439777061, 695.0014769598008, 0.0, 202)
femmesh.addNode(2453.2517051286445, 1672.8747213645538, 0.0, 203)
femmesh.addNode(907.6249789310948, 1264.8279056360939, 0.0, 204)
femmesh.addNode(1342.8357754800325, 1305.982903143703, 0.0, 205)
femmesh.addNode(301.68045436748366, 1278.0614848964112, 0.0, 206)
femmesh.addNode(321.4019295935336, 712.8743177284927, 0.0, 207)
femmesh.addNode(2426.6601885758864, 1313.9261459477857, 0.0, 208)
femmesh.addNode(2867.9421371538983, 263.92048664402716, 0.0, 209)
femmesh.addNode(2046.149001386518, 725.1823701492324, 0.0, 210)
femmesh.addNode(2854.1111443273658, 1741.083702840498, 0.0, 211)
femmesh.addNode(3695.4452236106526, 1279.177794976091, 0.0, 212)
femmesh.addNode(3685.7292713722686, 751.032843822758, 0.0, 213)
femmesh.addNode(624.8652096522102, 258.70949318509355, 0.0, 214)
femmesh.addNode(1466.6263015417012, 719.2247946131447, 0.0, 215)
femmesh.addNode(3719.1293832862143, 1719.129383286214, 0.0, 216)
femmesh.addNode(2945.872854070339, 1363.49471903432, 0.0, 217)
femmesh.addNode(3344.6356052877895, 1737.2354549848008, 0.0, 218)
femmesh.addNode(1942.1605314429821, 1315.7220947444227, 0.0, 219)
femmesh.addNode(2093.97656761822, 1743.5731735527538, 0.0, 220)
femmesh.addNode(2337.404696236552, 259.86552863747835, 0.0, 221)
femmesh.addNode(1136.4178616265353, 256.9280165667089, 0.0, 222)
femmesh.addNode(3436.9983781650485, 1261.4961473823978, 0.0, 223)
femmesh.addNode(3406.2893160086924, 251.07304786541548, 0.0, 224)
femmesh.addNode(2215.636485028124, 1177.419912858442, 0.0, 225)
femmesh.addNode(1126.6678439977832, 1759.6448940404007, 0.0, 226)
femmesh.addNode(2335.9075546219287, 765.8443516431239, 0.0, 227)
femmesh.addNode(623.3677071526471, 1794.4665663127212, 0.0, 228)
femmesh.addNode(1183.6674808624416, 743.4840698227251, 0.0, 229)
femmesh.addNode(676.4452885380255, 1103.2769600640684, 0.0, 230)
femmesh.addNode(1747.334612316145, 938.7081036305343, 0.0, 231)
femmesh.addNode(1403.1870558121493, 254.37888532175242, 0.0, 232)
femmesh.addNode(3314.847513203245, 987.8243030056708, 0.0, 233)
femmesh.addNode(1640.5623138995034, 1785.4652572759182, 0.0, 234)
femmesh.addNode(2694.302594697601, 1191.5326623293304, 0.0, 235)
femmesh.addNode(3249.075663998931, 1405.1235272166903, 0.0, 236)
femmesh.addNode(3194.7171333591295, 605.4257226147388, 0.0, 237)
femmesh.addNode(3787.3499723629325, 540.6602594003787, 0.0, 238)
femmesh.addNode(185.04426967129564, 1086.6700079903421, 0.0, 239)
femmesh.addNode(3046.934155784866, 1000.6169552050135, 0.0, 240)
femmesh.addNode(1969.4468096267958, 523.2169628726109, 0.0, 241)
femmesh.addNode(2129.5909553861516, 192.09252274746703, 0.0, 242)
femmesh.addNode(1136.6994499219468, 1211.2946783273387, 0.0, 243)
femmesh.addNode(2597.352264247392, 1808.9701221137507, 0.0, 244)
femmesh.addNode(3819.1744978371717, 1088.9949533284503, 0.0, 245)
femmesh.addNode(2584.2256241005716, 935.7579854262561, 0.0, 246)
femmesh.addNode(3125.0, 1815.2509614904245, 0.0, 247)
femmesh.addNode(3777.6248611905694, 1504.537741959381, 0.0, 248)
femmesh.addNode(588.8115450690148, 859.6463157782961, 0.0, 249)
femmesh.addNode(2143.2683627665697, 936.9869149561475, 0.0, 250)
femmesh.addNode(2754.7204686764762, 780.5893488071242, 0.0, 251)
femmesh.addNode(178.93890438705535, 903.482590983227, 0.0, 252)
femmesh.addNode(1747.982107629327, 1148.2128944222316, 0.0, 253)
femmesh.addNode(1666.5850159712468, 1563.4019198648652, 0.0, 254)
femmesh.addNode(851.0519754307722, 1495.0619168135465, 0.0, 255)
femmesh.addNode(3481.0728671542547, 837.1465204467868, 0.0, 256)
femmesh.addNode(2443.601401640797, 530.7979808791658, 0.0, 257)
femmesh.addNode(205.94642509722854, 499.4693567890037, 0.0, 258)
femmesh.addNode(206.07459453719548, 1499.857298977669, 0.0, 259)
femmesh.addNode(834.6180802078031, 567.491198370818, 0.0, 260)
femmesh.addNode(1766.7929131536002, 212.0366200123763, 0.0, 261)
femmesh.addNode(1372.868119293763, 1786.5908878427776, 0.0, 262)
femmesh.addNode(217.93466600459956, 214.41626585864105, 0.0, 263)
femmesh.addNode(211.16821383061668, 1789.0339304012352, 0.0, 264)
femmesh.addNode(1346.4369194052335, 1104.0442859810998, 0.0, 265)
femmesh.addNode(2625.0, 168.47319683138994, 0.0, 266)
femmesh.addNode(875.0000000000001, 168.27854442880616, 0.0, 267)
femmesh.addNode(3124.9999999999995, 166.32624951132104, 0.0, 268)
femmesh.addNode(2725.0455335697234, 531.4266954931735, 0.0, 269)
femmesh.addNode(1522.1536134860664, 504.98683031131714, 0.0, 270)
femmesh.addNode(3802.9015660741943, 197.09843392580635, 0.0, 271)
femmesh.addNode(1505.6503066232626, 1428.2716333977896, 0.0, 272)
femmesh.addNode(1051.6943688460692, 524.3943887245401, 0.0, 273)
femmesh.addNode(3359.4878194613807, 477.16337384383746, 0.0, 274)
femmesh.addNode(1528.4103478357204, 1224.233473320531, 0.0, 275)
femmesh.addNode(3535.438527288789, 1801.7330078935504, 0.0, 276)
femmesh.addNode(434.67162780804375, 542.3401347012916, 0.0, 277)
femmesh.addNode(451.9072533191967, 1434.6301952460099, 0.0, 278)
femmesh.addNode(2981.795085118534, 448.0055367511777, 0.0, 279)
femmesh.addNode(2978.1800808120247, 805.0128289707615, 0.0, 280)
femmesh.addNode(935.1388659912074, 1056.3754597693553, 0.0, 281)
femmesh.addNode(1754.4192556759315, 451.41573980441683, 0.0, 282)
femmesh.addNode(2361.398753416636, 1810.7310101916673, 0.0, 283)
femmesh.addNode(2422.5421676233855, 1516.583426074803, 0.0, 284)
femmesh.addNode(880.47047742822, 1807.7722912938007, 0.0, 285)
femmesh.addNode(630.2325312193318, 1563.618276186015, 0.0, 286)
femmesh.addNode(444.11067943331193, 184.20265254642115, 0.0, 287)
femmesh.addNode(399.0489951122346, 1843.0986785146765, 0.0, 288)
femmesh.addNode(1326.4741772662737, 1564.2672411316362, 0.0, 289)
femmesh.addNode(1873.5987279934327, 1848.0564063658549, 0.0, 290)
femmesh.addNode(2857.8120840696374, 1538.7765004950957, 0.0, 291)
femmesh.addNode(3615.420858496385, 153.92013164518346, 0.0, 292)
femmesh.addNode(1986.4322329809784, 1519.8742672628473, 0.0, 293)
femmesh.addNode(795.7903110696158, 761.2330506728068, 0.0, 294)
femmesh.addNode(3827.887082573013, 916.2471888959267, 0.0, 295)
femmesh.addNode(2931.8289188589665, 1149.0563535164429, 0.0, 296)
femmesh.addNode(3857.0724217345637, 373.58588179238143, 0.0, 297)
femmesh.addNode(1603.8358047155352, 159.6025438347719, 0.0, 298)
femmesh.addNode(2632.697761232849, 1624.134712542077, 0.0, 299)
femmesh.addNode(981.8039384323351, 874.5293213047062, 0.0, 300)
femmesh.addNode(680.2099424162964, 475.33856352911357, 0.0, 301)
femmesh.addNode(3559.7627922855345, 501.8751194940187, 0.0, 302)
femmesh.addNode(974.4233579675891, 1639.6167774462617, 0.0, 303)
femmesh.addNode(495.13513442403405, 1239.242243254375, 0.0, 304)
femmesh.addNode(1377.2739687571882, 899.7018057699061, 0.0, 305)
femmesh.addNode(3867.2823458215535, 1867.2823458215535, 0.0, 306)
femmesh.addNode(3343.8770189511474, 1557.6498333315424, 0.0, 307)
femmesh.addNode(1824.8290135155744, 1476.484523517676, 0.0, 308)
femmesh.addNode(1577.5083720983002, 852.8360153983897, 0.0, 309)
femmesh.addNode(1914.9321930628041, 875.9179316201704, 0.0, 310)
femmesh.addNode(2284.573533854269, 1640.5742507791397, 0.0, 311)
femmesh.addNode(2119.0518265489545, 365.3806225869961, 0.0, 312)
femmesh.addNode(2453.275415073692, 1122.2377705408946, 0.0, 313)
femmesh.addNode(3322.401080864958, 774.4507864829744, 0.0, 314)
femmesh.addNode(666.8714726873429, 1228.2996101820004, 0.0, 315)
femmesh.addNode(792.0351337345601, 1184.052432850081, 0.0, 316)
femmesh.addNode(782.4613178838775, 1309.0750829680132, 0.0, 317)
femmesh.addNode(108.96733300229978, 232.20813292932053, 0.0, 318)
femmesh.addNode(211.94054555091407, 356.9428113238224, 0.0, 319)
femmesh.addNode(102.97321254861427, 374.7346783945019, 0.0, 320)
femmesh.addNode(208.62140418390607, 1644.4456146894522, 0.0, 321)
femmesh.addNode(105.58410691530834, 1769.5169652006175, 0.0, 322)
femmesh.addNode(103.03729726859774, 1624.9286494888345, 0.0, 323)
femmesh.addNode(1854.974854629148, 271.4868322678205, 0.0, 324)
femmesh.addNode(1883.3964565768001, 106.01831000618814, 0.0, 325)
femmesh.addNode(1971.578398052348, 165.46852226163236, 0.0, 326)
femmesh.addNode(2802.055572163683, 1870.541851420249, 0.0, 327)
femmesh.addNode(2927.055572163683, 1870.541851420249, 0.0, 328)
femmesh.addNode(2684.367443781871, 1312.8736999108746, 0.0, 329)
femmesh.addNode(2820.08772438397, 1277.5136906818252, 0.0, 330)
femmesh.addNode(2810.15257346824, 1398.8547282633695, 0.0, 331)
femmesh.addNode(3888.8124305952847, 1377.2688709796905, 0.0, 332)
femmesh.addNode(3736.535042400611, 1391.8577684677362, 0.0, 333)
femmesh.addNode(3847.7226118053263, 1264.5888974880454, 0.0, 334)
femmesh.addNode(3086.4486070855774, 705.2192757927501, 0.0, 335)
femmesh.addNode(2929.8080146337934, 713.1113983777439, 0.0, 336)
femmesh.addNode(3038.0765409073456, 613.3178451997325, 0.0, 337)
femmesh.addNode(2989.555572163683, 1778.1673321654612, 0.0, 338)
femmesh.addNode(3062.5, 1907.6254807452124, 0.0, 339)
femmesh.addNode(2924.8686111362163, 355.96301169760244, 0.0, 340)
femmesh.addNode(2853.420309344129, 489.7161161221756, 0.0, 341)
femmesh.addNode(2796.493835361811, 397.67359106860033, 0.0, 342)
femmesh.addNode(2813.0657567782837, 1170.2945079228866, 0.0, 343)
femmesh.addNode(2938.850886464653, 1256.2755362753815, 0.0, 344)
femmesh.addNode(102.97321254861427, 499.7346783945019, 0.0, 345)
femmesh.addNode(103.03729726859774, 1499.9286494888345, 0.0, 346)
femmesh.addNode(2658.064864848726, 447.26237764421114, 0.0, 347)
femmesh.addNode(2729.5131666408133, 313.509273219638, 0.0, 348)
femmesh.addNode(529.768418730127, 400.5248139431926, 0.0, 349)
femmesh.addNode(412.7753030348681, 443.9842110359632, 0.0, 350)
femmesh.addNode(507.8720939569513, 302.1688902778642, 0.0, 351)
femmesh.addNode(652.5375760342533, 367.0240283571036, 0.0, 352)
femmesh.addNode(557.44078511217, 508.8393491152026, 0.0, 353)
femmesh.addNode(3888.8124305952847, 1502.2688709796905, 0.0, 354)
femmesh.addNode(2064.7954776930756, 96.04626137373351, 0.0, 355)
femmesh.addNode(2036.3738757454237, 261.51478363536586, 0.0, 356)
femmesh.addNode(983.3614398464106, 1490.4402955353758, 0.0, 357)
femmesh.addNode(879.3384771809335, 1379.9449112248203, 0.0, 358)
femmesh.addNode(1011.6479415965719, 1375.3232899466493, 0.0, 359)
femmesh.addNode(102.97321254861427, 624.7346783945019, 0.0, 360)
femmesh.addNode(263.67417734538105, 606.1718372587482, 0.0, 361)
femmesh.addNode(160.7009647967668, 731.4371588642464, 0.0, 362)
femmesh.addNode(253.87752445233957, 1388.95939193704, 0.0, 363)
femmesh.addNode(103.03729726859774, 1374.9286494888345, 0.0, 364)
femmesh.addNode(150.84022718374183, 1264.0307424482057, 0.0, 365)
femmesh.addNode(2569.482560957818, 824.0617366085005, 0.0, 366)
femmesh.addNode(2460.0665893612504, 850.8011685346901, 0.0, 367)
femmesh.addNode(2445.323526218496, 739.1049197169344, 0.0, 368)
femmesh.addNode(2464.888903533425, 964.155065784458, 0.0, 369)
femmesh.addNode(2340.7298687941034, 879.1982488928918, 0.0, 370)
femmesh.addNode(3748.377122238392, 1611.8335626227977, 0.0, 371)
femmesh.addNode(3888.8124305952847, 1627.2688709796905, 0.0, 372)
femmesh.addNode(3859.564691643107, 1734.564691643107, 0.0, 373)
femmesh.addNode(2200.5878397766674, 1325.2674788236232, 0.0, 374)
femmesh.addNode(2321.1483368020054, 1245.673029403114, 0.0, 375)
femmesh.addNode(2306.0996915505484, 1393.5205953682948, 0.0, 376)
femmesh.addNode(1436.4340596468815, 1893.2954439213888, 0.0, 377)
femmesh.addNode(1506.7152165966331, 1786.028072559348, 0.0, 378)
femmesh.addNode(1570.2811569497517, 1892.7326286379591, 0.0, 379)
femmesh.addNode(2933.971068576949, 131.96024332201358, 0.0, 380)
femmesh.addNode(2808.971068576949, 131.96024332201358, 0.0, 381)
femmesh.addNode(2931.6155167870484, 534.6077522679519, 0.0, 382)
femmesh.addNode(3088.256109238832, 526.7156296829583, 0.0, 383)
femmesh.addNode(2548.487230244763, 1475.3990817836107, 0.0, 384)
femmesh.addNode(2424.601178099636, 1415.2547860112943, 0.0, 385)
femmesh.addNode(2550.5462407210134, 1374.0704417201023, 0.0, 386)
femmesh.addNode(1226.5710106320284, 1661.9560675860184, 0.0, 387)
femmesh.addNode(1349.6711482800183, 1675.429064487207, 0.0, 388)
femmesh.addNode(1249.767981645773, 1773.1178909415892, 0.0, 389)
femmesh.addNode(1022.1622144265208, 1238.0612919817163, 0.0, 390)
femmesh.addNode(1126.185177091998, 1348.5566762922717, 0.0, 391)
femmesh.addNode(805.7920772646164, 1079.8262099167118, 0.0, 392)
femmesh.addNode(921.3819224611511, 1160.6016827027247, 0.0, 393)
femmesh.addNode(1239.7676127009895, 1258.6387907355208, 0.0, 394)
femmesh.addNode(1229.2533398710407, 1395.9007887004539, 0.0, 395)
femmesh.addNode(597.428252779855, 747.5272752996864, 0.0, 396)
femmesh.addNode(455.1067373312742, 786.2603167533944, 0.0, 397)
femmesh.addNode(463.72344504211446, 674.1412762747847, 0.0, 398)
femmesh.addNode(488.57828206390934, 943.2833157718742, 0.0, 399)
femmesh.addNode(354.8734743261688, 869.8973167469724, 0.0, 400)
femmesh.addNode(687.432604826105, 129.35474659254677, 0.0, 401)
femmesh.addNode(562.432604826105, 129.35474659254677, 0.0, 402)
femmesh.addNode(1334.6549763731532, 1435.1250721376696, 0.0, 403)
femmesh.addNode(1221.0725407641612, 1525.0429576944205, 0.0, 404)
femmesh.addNode(1494.389957513884, 612.1058124622309, 0.0, 405)
femmesh.addNode(1636.076806743033, 600.1496651556586, 0.0, 406)
femmesh.addNode(1608.3131507708506, 707.2686473065723, 0.0, 407)
femmesh.addNode(754.1748161337163, 1424.1920885567395, 0.0, 408)
femmesh.addNode(1462.6703346491079, 379.6828578165348, 0.0, 409)
femmesh.addNode(1397.698062982634, 507.8523221856086, 0.0, 410)
femmesh.addNode(1338.2147841456754, 382.5483496908263, 0.0, 411)
femmesh.addNode(378.03677870078866, 627.6072262148921, 0.0, 412)
femmesh.addNode(520.3582941493695, 588.8741847611841, 0.0, 413)
femmesh.addNode(1269.8024587193422, 255.65345094423066, 0.0, 414)
femmesh.addNode(1204.8301870528683, 383.8229153133045, 0.0, 415)
femmesh.addNode(3736.5396218676005, 645.8465516115683, 0.0, 416)
femmesh.addNode(3893.674986181466, 645.3301297001893, 0.0, 417)
femmesh.addNode(3842.8646356861345, 750.516421911379, 0.0, 418)
femmesh.addNode(1638.286434580999, 478.201285057867, 0.0, 419)
femmesh.addNode(1752.2096278379659, 573.3641199022084, 0.0, 420)
femmesh.addNode(3115.2241420823198, 1704.1486014366155, 0.0, 421)
femmesh.addNode(2979.7797142460026, 1667.0649721116524, 0.0, 422)
femmesh.addNode(2653.565027049495, 1529.174725017248, 0.0, 423)
femmesh.addNode(2527.6199644281173, 1570.35906930844, 0.0, 424)
femmesh.addNode(2007.797905506657, 624.1996665109216, 0.0, 425)
femmesh.addNode(2092.5533425086865, 522.0040085133851, 0.0, 426)
femmesh.addNode(2130.9044383885475, 622.9867121516959, 0.0, 427)
femmesh.addNode(1434.010116617258, 1702.9426709753595, 0.0, 428)
femmesh.addNode(1567.8572139201283, 1702.3798556919298, 0.0, 429)
femmesh.addNode(1005.7089308132677, 212.6032804977575, 0.0, 430)
femmesh.addNode(937.5, 84.13927221440308, 0.0, 431)
femmesh.addNode(1068.2089308132677, 128.46400828335445, 0.0, 432)
femmesh.addNode(1121.169374129916, 1622.7317841488027, 0.0, 433)
femmesh.addNode(3297.3178026438945, 1868.6177274924003, 0.0, 434)
femmesh.addNode(3422.3178026438945, 1868.6177274924003, 0.0, 435)
femmesh.addNode(2562.5, 84.23659841569497, 0.0, 436)
femmesh.addNode(2481.202348118276, 214.16936273443415, 0.0, 437)
femmesh.addNode(2418.702348118276, 129.93276431873917, 0.0, 438)
femmesh.addNode(283.64196172292964, 965.2014533743396, 0.0, 439)
femmesh.addNode(250.17041699029448, 808.1784543558599, 0.0, 440)
femmesh.addNode(3566.221800887851, 1270.3369711792443, 0.0, 441)
femmesh.addNode(3525.7177971137635, 1140.8605981733533, 0.0, 442)
femmesh.addNode(3654.9412198365653, 1149.7014219702, 0.0, 443)
femmesh.addNode(3478.1631185229135, 1390.4120031315883, 0.0, 444)
femmesh.addNode(3607.3865412457153, 1399.2528269284348, 0.0, 445)
femmesh.addNode(92.52213483564782, 1168.335003995171, 0.0, 446)
femmesh.addNode(243.36236201938965, 1182.3657464433768, 0.0, 447)
femmesh.addNode(1193.2089308132677, 128.46400828335445, 0.0, 448)
femmesh.addNode(2293.702348118276, 129.93276431873917, 0.0, 449)
femmesh.addNode(2078.898508235553, 1246.5710038014324, 0.0, 450)
femmesh.addNode(2063.8498629840965, 1394.4185697666135, 0.0, 451)
femmesh.addNode(2430.6993767083177, 1905.3655050958337, 0.0, 452)
femmesh.addNode(2479.375508832014, 1809.850566152709, 0.0, 453)
femmesh.addNode(2548.6761321236963, 1904.4850610568753, 0.0, 454)
femmesh.addNode(3617.1569594056346, 420.42980613128464, 0.0, 455)
femmesh.addNode(3483.0260541471134, 376.47408367971707, 0.0, 456)
femmesh.addNode(3540.4202212672135, 295.028770316983, 0.0, 457)
femmesh.addNode(3459.6253058734574, 489.5192466689281, 0.0, 458)
femmesh.addNode(3382.8885677350363, 364.11821085462645, 0.0, 459)
femmesh.addNode(2803.2407410126425, 576.3183316389498, 0.0, 460)
femmesh.addNode(2407.32522927264, 1741.8028657781106, 0.0, 461)
femmesh.addNode(2525.3019846880184, 1740.9224217391522, 0.0, 462)
femmesh.addNode(1117.6809248542554, 633.9392292736327, 0.0, 463)
femmesh.addNode(1162.4684406626352, 517.5561013922202, 0.0, 464)
femmesh.addNode(1228.4549966708214, 627.1009419413126, 0.0, 465)
femmesh.addNode(3075.479514480595, 818.575122949968, 0.0, 466)
femmesh.addNode(3183.7480407541475, 718.7815697719566, 0.0, 467)
femmesh.addNode(3100.2376575399726, 1100.0784118673864, 0.0, 468)
femmesh.addNode(3180.8908344940555, 994.2206291053421, 0.0, 469)
femmesh.addNode(3234.194336249162, 1093.6820857677149, 0.0, 470)
femmesh.addNode(3109.8565519670155, 916.377186067094, 0.0, 471)
femmesh.addNode(3243.8132306762054, 909.9808599674227, 0.0, 472)
femmesh.addNode(89.46945219352767, 826.7412954916135, 0.0, 473)
femmesh.addNode(286.6946443650497, 1056.795161877897, 0.0, 474)
femmesh.addNode(345.0127367131438, 1152.4909003309317, 0.0, 475)
femmesh.addNode(2275.7837150062533, 643.3177028986416, 0.0, 476)
femmesh.addNode(2191.0282780042235, 745.5133608961781, 0.0, 477)
femmesh.addNode(441.740076741419, 1133.0812795099137, 0.0, 478)
femmesh.addNode(398.40779439575886, 1258.651864075393, 0.0, 479)
femmesh.addNode(751.9190922904336, 1801.119428803261, 0.0, 480)
femmesh.addNode(815.23523871411, 1903.8861456469003, 0.0, 481)
femmesh.addNode(686.6838535763236, 1897.2332831563606, 0.0, 482)
femmesh.addNode(1758.3964565768001, 106.01831000618814, 0.0, 483)
femmesh.addNode(1016.5324064118877, 609.6979328421705, 0.0, 484)
femmesh.addNode(1082.5189624200739, 719.242773391263, 0.0, 485)
femmesh.addNode(2304.040681074298, 1494.8492354318034, 0.0, 486)
femmesh.addNode(3453.144658004346, 125.53652393270774, 0.0, 487)
femmesh.addNode(3328.144658004346, 125.53652393270774, 0.0, 488)
femmesh.addNode(3201.308411647005, 1302.3316978732246, 0.0, 489)
femmesh.addNode(3097.474259034635, 1384.3091231255053, 0.0, 490)
femmesh.addNode(3049.707006682709, 1281.5172937820396, 0.0, 491)
femmesh.addNode(3177.2619740817854, 1499.0848842997484, 0.0, 492)
femmesh.addNode(3025.660569117489, 1478.2704802085632, 0.0, 493)
femmesh.addNode(856.6984471721701, 993.836077955195, 0.0, 494)
femmesh.addNode(727.351658445579, 1017.2868281025516, 0.0, 495)
femmesh.addNode(3265.6446580043457, 208.69964868836826, 0.0, 496)
femmesh.addNode(3187.5, 83.16312475566052, 0.0, 497)
femmesh.addNode(534.487944542761, 221.45607286575733, 0.0, 498)
femmesh.addNode(472.05533971665596, 92.10132627321057, 0.0, 499)
femmesh.addNode(2746.471068576949, 216.19684173770855, 0.0, 500)
femmesh.addNode(2687.5, 84.23659841569497, 0.0, 501)
femmesh.addNode(3893.674986181466, 520.3301297001893, 0.0, 502)
femmesh.addNode(2608.042098063864, 265.7856283133194, 0.0, 503)
femmesh.addNode(3062.5, 83.16312475566052, 0.0, 504)
femmesh.addNode(2996.4710685769487, 215.12336807767412, 0.0, 505)
femmesh.addNode(3142.205709878356, 265.66202269522967, 0.0, 506)
femmesh.addNode(3013.6767784553053, 314.4591412615827, 0.0, 507)
femmesh.addNode(887.9249169613647, 270.5111504980472, 0.0, 508)
femmesh.addNode(1018.6338477746323, 314.83588656699857, 0.0, 509)
femmesh.addNode(3384.2017614398546, 1462.2256930487345, 0.0, 510)
femmesh.addNode(3343.0370210819897, 1333.3098372995441, 0.0, 511)
femmesh.addNode(2560.481391636744, 1252.729404138558, 0.0, 512)
femmesh.addNode(3431.602438915963, 1538.4888461061605, 0.0, 513)
femmesh.addNode(3296.476341475039, 1481.3866802741163, 0.0, 514)
femmesh.addNode(2464.24444618214, 311.4817942163636, 0.0, 515)
femmesh.addNode(2087.4570383546975, 1121.1232214364918, 0.0, 516)
femmesh.addNode(1950.7190615621266, 1190.2743123794821, 0.0, 517)
femmesh.addNode(2399.413799019985, 1057.394958341777, 0.0, 518)
femmesh.addNode(2334.4559500509076, 1149.8288416996684, 0.0, 519)
femmesh.addNode(2280.594333997201, 1084.986029500551, 0.0, 520)
femmesh.addNode(1325.1468912020714, 731.3544322179349, 0.0, 521)
femmesh.addNode(1369.9344070104512, 614.9713043365224, 0.0, 522)
femmesh.addNode(561.6838535763236, 1897.2332831563606, 0.0, 523)
femmesh.addNode(3756.8081769726405, 833.6400163593423, 0.0, 524)
femmesh.addNode(3721.1621493177454, 968.2361189301178, 0.0, 525)
femmesh.addNode(3650.0832437173735, 885.6289463935334, 0.0, 526)
femmesh.addNode(2439.967801824789, 1218.08195824434, 0.0, 527)
femmesh.addNode(3070.6032524376233, 406.501666315158, 0.0, 528)
femmesh.addNode(3440.037066288289, 1769.4842314391756, 0.0, 529)
femmesh.addNode(3517.7192636443942, 1900.866503946775, 0.0, 530)
femmesh.addNode(2046.98828380911, 1871.786586776377, 0.0, 531)
femmesh.addNode(2171.98828380911, 1871.786586776377, 0.0, 532)
femmesh.addNode(312.1152729606238, 1719.7681221859716, 0.0, 533)
femmesh.addNode(309.56846331391324, 1575.1798064741884, 0.0, 534)
femmesh.addNode(304.406822133146, 280.0222766146379, 0.0, 535)
femmesh.addNode(298.4127016794605, 422.5488220798193, 0.0, 536)
femmesh.addNode(3375.9229456841467, 1124.6602251940344, 0.0, 537)
femmesh.addNode(3464.6423646328617, 1004.0246759849899, 0.0, 538)
femmesh.addNode(3295.269768730064, 1230.5180079560785, 0.0, 539)
femmesh.addNode(1747.658359972736, 1043.4604990263829, 0.0, 540)
femmesh.addNode(1647.144542752897, 1079.7228837002972, 0.0, 541)
femmesh.addNode(1646.820795096306, 974.9704883044487, 0.0, 542)
femmesh.addNode(832.589369957152, 1730.7493478834867, 0.0, 543)
femmesh.addNode(704.0379848193656, 1724.096485392947, 0.0, 544)
femmesh.addNode(3642.7192636443942, 1900.866503946775, 0.0, 545)
femmesh.addNode(3627.2839552875016, 1760.4311955898822, 0.0, 546)
femmesh.addNode(3734.564691643107, 1859.564691643107, 0.0, 547)
femmesh.addNode(3757.309860723912, 1184.0863741522708, 0.0, 548)
femmesh.addNode(3909.587248918586, 1169.4974766642251, 0.0, 549)
femmesh.addNode(1045.047131114819, 1562.7177258517331, 0.0, 550)
femmesh.addNode(912.7376666991806, 1567.3393471299041, 0.0, 551)
femmesh.addNode(632.6284168035202, 981.4616379211823, 0.0, 552)
femmesh.addNode(532.3951537984146, 1065.0986379147603, 0.0, 553)
femmesh.addNode(683.5347867110737, 895.4715059596654, 0.0, 554)
femmesh.addNode(2517.3427988842627, 446.94802033720737, 0.0, 555)
femmesh.addNode(2390.5030489386745, 395.33175475832206, 0.0, 556)
femmesh.addNode(2329.630638515687, 525.7945175166626, 0.0, 557)
femmesh.addNode(2276.5322858135646, 390.32829139581884, 0.0, 558)
femmesh.addNode(1638.1962277325238, 1186.2231838713813, 0.0, 559)
femmesh.addNode(1537.3586628560938, 1117.7331731494469, 0.0, 560)
femmesh.addNode(1859.7234048133978, 609.2647314363055, 0.0, 561)
femmesh.addNode(1898.0745006932589, 710.2474350746162, 0.0, 562)
femmesh.addNode(3648.4763600356737, 1511.9328004200797, 0.0, 563)
femmesh.addNode(1326.5935279060745, 127.18944266087621, 0.0, 564)
femmesh.addNode(3282.8503678827024, 308.03542187227686, 0.0, 565)
femmesh.addNode(1980.540597224661, 800.5501508847015, 0.0, 566)
femmesh.addNode(1832.466096531402, 785.6152158100851, 0.0, 567)
femmesh.addNode(3502.4660201338666, 577.1059093785952, 0.0, 568)
femmesh.addNode(3622.7460318289013, 626.4539816583883, 0.0, 569)
femmesh.addNode(3565.4492596772334, 701.6847715429649, 0.0, 570)
femmesh.addNode(3527.3831930847837, 1660.5304333871645, 0.0, 571)
femmesh.addNode(3619.2286210834964, 1619.2286210834964, 0.0, 572)
femmesh.addNode(1560.676334048996, 418.29202207552396, 0.0, 573)
femmesh.addNode(1501.1930552120375, 292.9880495807416, 0.0, 574)
femmesh.addNode(749.9326048261051, 213.49401880694984, 0.0, 575)
femmesh.addNode(762.8575217874698, 315.7266248761909, 0.0, 576)
femmesh.addNode(812.5, 84.13927221440308, 0.0, 577)
femmesh.addNode(3431.981732084284, 1628.2816569327897, 0.0, 578)
femmesh.addNode(2044.249318087875, 444.2987927298035, 0.0, 579)
femmesh.addNode(2167.355850969766, 443.08583837057773, 0.0, 580)
femmesh.addNode(2673.6761321236963, 1904.4850610568753, 0.0, 581)
femmesh.addNode(2725.731704287379, 1775.0269124771244, 0.0, 582)
femmesh.addNode(108.96733300229978, 107.20813292932053, 0.0, 583)
femmesh.addNode(233.96733300229977, 107.20813292932053, 0.0, 584)
femmesh.addNode(105.58410691530834, 1894.5169652006175, 0.0, 585)
femmesh.addNode(230.58410691530833, 1894.5169652006175, 0.0, 586)
femmesh.addNode(2855.9616141985016, 1639.9301016677969, 0.0, 587)
femmesh.addNode(2981.6301841171385, 1565.9113709389512, 0.0, 588)
femmesh.addNode(521.6474316549813, 1607.0602950783614, 0.0, 589)
femmesh.addNode(626.8001191859894, 1679.042421249368, 0.0, 590)
femmesh.addNode(518.215019621639, 1722.4844401417145, 0.0, 591)
femmesh.addNode(1063.3339219988916, 1879.8224470202003, 0.0, 592)
femmesh.addNode(1188.3339219988916, 1879.8224470202003, 0.0, 593)
femmesh.addNode(1848.7880258903137, 391.1763921638408, 0.0, 594)
femmesh.addNode(1760.606084414766, 331.72617990839655, 0.0, 595)
femmesh.addNode(3547.755041608367, 928.6857847055478, 0.0, 596)
femmesh.addNode(3583.401069263262, 794.0896821347724, 0.0, 597)
femmesh.addNode(3463.1210575682267, 744.7416098549793, 0.0, 598)
femmesh.addNode(3716.8058569498253, 1054.6100011463795, 0.0, 599)
femmesh.addNode(417.49482884750216, 264.915469958528, 0.0, 600)
femmesh.addNode(1410.8131456035135, 1591.7808476197888, 0.0, 601)
femmesh.addNode(2639.264109399086, 1063.6453238777933, 0.0, 602)
femmesh.addNode(2701.35972727376, 962.9277739550239, 0.0, 603)
femmesh.addNode(2756.398212572275, 1090.815112406561, 0.0, 604)
femmesh.addNode(1924.9888593805554, 1589.0565837070812, 0.0, 605)
femmesh.addNode(2040.2044002995992, 1631.7237204078006, 0.0, 606)
femmesh.addNode(1978.761026699176, 1700.9060368520345, 0.0, 607)
femmesh.addNode(541.0698922692643, 1499.1242357160124, 0.0, 608)
femmesh.addNode(432.48479270491384, 1542.5662546083588, 0.0, 609)
femmesh.addNode(347.05533971665596, 92.10132627321057, 0.0, 610)
femmesh.addNode(331.02267271895573, 199.30945920253112, 0.0, 611)
femmesh.addNode(1451.5935279060745, 127.18944266087621, 0.0, 612)
femmesh.addNode(1663.7541860491501, 774.0742576991948, 0.0, 613)
femmesh.addNode(1522.0673368200007, 786.0304050057672, 0.0, 614)
femmesh.addNode(790.5298881695128, 424.04116004820094, 0.0, 615)
femmesh.addNode(2085.9857137530944, 1496.4946560258256, 0.0, 616)
femmesh.addNode(2139.7578810717155, 1608.344109170779, 0.0, 617)
femmesh.addNode(3913.9435412865064, 833.1235944479633, 0.0, 618)
femmesh.addNode(1695.2811569497517, 1892.7326286379591, 0.0, 619)
femmesh.addNode(2898.3369556294865, 897.5551957272764, 0.0, 620)
femmesh.addNode(3012.5571182984454, 902.8148920878875, 0.0, 621)
femmesh.addNode(2932.713993115907, 995.3572588444025, 0.0, 622)
femmesh.addNode(1983.7876478058265, 1795.8147899593043, 0.0, 623)
femmesh.addNode(1868.5721068867824, 1753.1476532585853, 0.0, 624)
femmesh.addNode(3823.530790205092, 1002.6210711121885, 0.0, 625)
femmesh.addNode(2542.9747331807466, 1648.5047169533154, 0.0, 626)
femmesh.addNode(2437.896936376015, 1594.7290737196784, 0.0, 627)
femmesh.addNode(1936.7993639967162, 1924.0282031829274, 0.0, 628)
femmesh.addNode(3042.685039077023, 1174.298111023101, 0.0, 629)
femmesh.addNode(2573.789004885646, 1156.8852164351124, 0.0, 630)
femmesh.addNode(2518.7505195871317, 1028.9978779835753, 0.0, 631)
femmesh.addNode(1845.0713195361545, 1231.9674945833272, 0.0, 632)
femmesh.addNode(1853.629849655299, 1106.5197122183863, 0.0, 633)
femmesh.addNode(1714.6160538146635, 1247.9345722111157, 0.0, 634)
femmesh.addNode(1811.705265721491, 1331.6891723722115, 0.0, 635)
femmesh.addNode(1094.0561152363023, 390.6612026456245, 0.0, 636)
femmesh.addNode(976.2721013843992, 448.56907264591416, 0.0, 637)
femmesh.addNode(92.52213483564782, 1043.335003995171, 0.0, 638)
femmesh.addNode(3933.6411729107767, 1808.6411729107767, 0.0, 639)
femmesh.addNode(3793.2058645538837, 1793.2058645538837, 0.0, 640)
femmesh.addNode(3808.6411729107767, 1933.6411729107767, 0.0, 641)
femmesh.addNode(2866.4502747442502, 792.8010888889428, 0.0, 642)
femmesh.addNode(2786.6071495617125, 885.3434556454579, 0.0, 643)
femmesh.addNode(3901.450783037097, 98.54921696290317, 0.0, 644)
femmesh.addNode(3776.450783037097, 98.54921696290317, 0.0, 645)
femmesh.addNode(3901.450783037097, 223.5492169629032, 0.0, 646)
femmesh.addNode(3187.5, 1907.6254807452124, 0.0, 647)
femmesh.addNode(3234.8178026438945, 1776.2432082376126, 0.0, 648)
femmesh.addNode(3225.0419447262148, 1665.1408481838037, 0.0, 649)
femmesh.addNode(2179.4524238973468, 1057.2034139072948, 0.0, 650)
femmesh.addNode(2244.410272866424, 964.7695305494036, 0.0, 651)
femmesh.addNode(1676.8091551439286, 391.50647682207375, 0.0, 652)
femmesh.addNode(1682.995983882763, 271.8169169260535, 0.0, 653)
femmesh.addNode(3909.587248918586, 1044.4974766642251, 0.0, 654)
femmesh.addNode(181.9915870291755, 995.0762994867846, 0.0, 655)
femmesh.addNode(3344.2563121194685, 1647.4426441581716, 0.0, 656)
femmesh.addNode(2233.4978258113515, 225.9790256924727, 0.0, 657)
femmesh.addNode(2189.7954776930756, 96.04626137373351, 0.0, 658)
femmesh.addNode(2901.8424690699885, 1451.1356097647079, 0.0, 659)
femmesh.addNode(2051.2729772239204, 1000.9067224853444, 0.0, 660)
femmesh.addNode(2389.7544781313627, 648.3211662611449, 0.0, 661)
femmesh.addNode(2499.1704497279306, 621.5817343349554, 0.0, 662)
femmesh.addNode(3177.064276557921, 485.2117592469385, 0.0, 663)
femmesh.addNode(940.23523871411, 1903.8861456469003, 0.0, 664)
femmesh.addNode(1003.5691607130016, 1783.7085926671007, 0.0, 665)
femmesh.addNode(3224.6626515578937, 1575.3480373571745, 0.0, 666)
femmesh.addNode(2766.122188467889, 1486.495618993757, 0.0, 667)
femmesh.addNode(3673.5563823242337, 521.2676894471987, 0.0, 668)
femmesh.addNode(2228.228261392753, 312.62307561223724, 0.0, 669)
femmesh.addNode(2305.6993767083177, 1905.3655050958337, 0.0, 670)
femmesh.addNode(1311.4340596468815, 1893.2954439213888, 0.0, 671)
femmesh.addNode(2875.1613746529574, 1069.5769580001172, 0.0, 672)
femmesh.addNode(2639.8925156923938, 621.8960916419592, 0.0, 673)
femmesh.addNode(2739.8830011231, 656.0080221501489, 0.0, 674)
femmesh.addNode(2654.72998324577, 746.4774182989346, 0.0, 675)
femmesh.addNode(1883.4947724792783, 1396.1033091310494, 0.0, 676)
femmesh.addNode(1753.0395067577872, 1412.0703867588381, 0.0, 677)
femmesh.addNode(1964.2963822119802, 1417.798181003635, 0.0, 678)
femmesh.addNode(2239.587958694249, 851.4156332996357, 0.0, 679)
femmesh.addNode(2094.708682076544, 831.08464255269, 0.0, 680)
femmesh.addNode(2235.0563641897397, 1556.8446477839718, 0.0, 681)
femmesh.addNode(2189.2750507362443, 1692.0737121659467, 0.0, 682)
femmesh.addNode(1477.3911704277443, 876.2689105841479, 0.0, 683)
femmesh.addNode(1421.9501351494446, 809.4633001915254, 0.0, 684)
femmesh.addNode(1561.9076749873839, 932.0344441883764, 0.0, 685)
femmesh.addNode(1461.7904733168277, 955.4673393741346, 0.0, 686)
femmesh.addNode(1035.919157956577, 1133.8350690483471, 0.0, 687)
femmesh.addNode(1043.17718435279, 1023.7674799921143, 0.0, 688)
femmesh.addNode(1143.9574763181595, 1101.227089271106, 0.0, 689)
femmesh.addNode(2227.687660517428, 1777.1520918722106, 0.0, 690)
femmesh.addNode(2818.0782085660194, 700.8996582959252, 0.0, 691)
femmesh.addNode(3510.8550872525384, 202.49658975529945, 0.0, 692)
femmesh.addNode(3644.9859925110595, 246.45231220686702, 0.0, 693)
femmesh.addNode(3557.7104292481927, 76.96006582259173, 0.0, 694)
femmesh.addNode(1066.5097205733537, 932.8444107597898, 0.0, 695)
femmesh.addNode(1082.7357096473884, 809.0066955637157, 0.0, 696)
femmesh.addNode(1167.441491788407, 867.3217850187992, 0.0, 697)
femmesh.addNode(981.5871912050206, 784.7653991322535, 0.0, 698)
femmesh.addNode(927.4469176979046, 1723.6945343700313, 0.0, 699)
femmesh.addNode(1050.5456009826862, 1699.6308357433313, 0.0, 700)
femmesh.addNode(585.7902114810297, 1171.2596016592215, 0.0, 701)
femmesh.addNode(888.7971247509754, 817.8811859887564, 0.0, 702)
femmesh.addNode(888.5803775236609, 728.1172638163039, 0.0, 703)
femmesh.addNode(3259.4496196090467, 421.08058486148786, 0.0, 704)
femmesh.addNode(2989.3815373219163, 1074.836654360728, 0.0, 705)
femmesh.addNode(1861.9330326513636, 487.31635133851387, 0.0, 706)
femmesh.addNode(1956.3018028657457, 427.0770036979378, 0.0, 707)
femmesh.addNode(643.1274514534958, 555.3733991750951, 0.0, 708)
femmesh.addNode(2031.1043113268252, 348.1588335551304, 0.0, 709)
femmesh.addNode(1280.4707248098148, 821.5929377963156, 0.0, 710)
femmesh.addNode(1264.2447357357803, 945.4306529923897, 0.0, 711)
femmesh.addNode(89.46945219352767, 951.7412954916135, 0.0, 712)
femmesh.addNode(2743.4044527801075, 1682.6092076912876, 0.0, 713)
femmesh.addNode(2745.2549226512433, 1581.4556065185864, 0.0, 714)
femmesh.addNode(880.0309833927339, 902.9130087228705, 0.0, 715)
femmesh.addNode(787.0241697113743, 846.2648734069207, 0.0, 716)
femmesh.addNode(1676.9179023577676, 79.80127191738595, 0.0, 717)
femmesh.addNode(1551.9179023577676, 79.80127191738595, 0.0, 718)
femmesh.addNode(1831.1334026894747, 907.3130176253524, 0.0, 719)
femmesh.addNode(1748.6673061580725, 817.0103018152672, 0.0, 720)
femmesh.addNode(879.5658102268367, 1646.6715909597174, 0.0, 721)
femmesh.addNode(817.8801189584283, 1574.3941606433596, 0.0, 722)
femmesh.addNode(1437.423633620477, 1164.1388796508154, 0.0, 723)
femmesh.addNode(1435.6230616578764, 1265.1081882321168, 0.0, 724)
femmesh.addNode(1344.636347442633, 1205.0135945624015, 0.0, 725)
femmesh.addNode(1446.3719486408504, 1057.6385794797316, 0.0, 726)
femmesh.addNode(1853.306101998708, 1001.7673168225379, 0.0, 727)
femmesh.addNode(2669.473046388524, 858.1736671166902, 0.0, 728)
femmesh.addNode(943.1562245269362, 545.942793547679, 0.0, 729)
femmesh.addNode(867.7339570652662, 470.11747746905314, 0.0, 730)
femmesh.addNode(907.9942620927546, 631.2463376653094, 0.0, 731)
femmesh.addNode(324.5244975561173, 1921.5493392573383, 0.0, 732)
femmesh.addNode(449.5244975561173, 1921.5493392573383, 0.0, 733)
femmesh.addNode(3247.590014507062, 803.2941017060745, 0.0, 734)
femmesh.addNode(3318.624297034102, 881.1375447443227, 0.0, 735)
femmesh.addNode(1937.1048923720377, 970.3722308173558, 0.0, 736)
femmesh.addNode(2029.100277914687, 906.452423288159, 0.0, 737)
femmesh.addNode(1653.573664935375, 1674.4335885703917, 0.0, 738)
femmesh.addNode(1765.0652508756893, 1610.8204100080902, 0.0, 739)
femmesh.addNode(1752.0538998398179, 1721.8520787136167, 0.0, 740)
femmesh.addNode(2584.3234676052602, 531.1123381861696, 0.0, 741)
femmesh.addNode(3397.96019017875, 912.4854117262288, 0.0, 742)
femmesh.addNode(3913.9435412865064, 958.1235944479633, 0.0, 743)
femmesh.addNode(1580.8685649559998, 1591.3481869864033, 0.0, 744)
femmesh.addNode(511.20835113244084, 1818.7826224136988, 0.0, 745)
femmesh.addNode(406.0556636014328, 1746.8004962426921, 0.0, 746)
femmesh.addNode(1757.080520946468, 1816.7608318208866, 0.0, 747)
femmesh.addNode(1811.7993639967162, 1924.0282031829274, 0.0, 748)
femmesh.addNode(707.4703968527081, 1608.672340329594, 0.0, 749)
femmesh.addNode(2124.3213909675533, 278.73657266723154, 0.0, 750)
femmesh.addNode(3933.6411729107767, 1933.6411729107767, 0.0, 751)
femmesh.addNode(3682.7104292481927, 76.96006582259173, 0.0, 752)
femmesh.addNode(1361.8554440812109, 1001.873045875503, 0.0, 753)
femmesh.addNode(1248.826211059803, 1047.6018930979867, 0.0, 754)
femmesh.addNode(1241.5681846635903, 1157.6694821542192, 0.0, 755)
femmesh.addNode(1604.83017391786, 1285.9448616602654, 0.0, 756)
femmesh.addNode(1517.0303272294914, 1326.2525533591602, 0.0, 757)
femmesh.addNode(1593.4501533116313, 1387.9639416988948, 0.0, 758)
femmesh.addNode(1424.2430410516477, 1367.1272682707463, 0.0, 759)
femmesh.addNode(815.2041956387095, 664.3621245218123, 0.0, 760)
femmesh.addNode(3928.5362108672816, 436.7929408961907, 0.0, 761)
femmesh.addNode(3928.5362108672816, 311.7929408961907, 0.0, 762)
femmesh.addNode(3402.3285337217894, 564.7500365535047, 0.0, 763)
femmesh.addNode(3401.7369740096065, 805.7986534648805, 0.0, 764)
femmesh.addNode(1662.4214922072226, 895.772059514462, 0.0, 765)
femmesh.addNode(3277.102476410255, 541.2945482292881, 0.0, 766)
femmesh.addNode(3319.943190670664, 628.8812109389553, 0.0, 767)
femmesh.addNode(757.4140113120498, 521.4148809499658, 0.0, 768)
femmesh.addNode(643.765094027996, 1458.4702682429738, 0.0, 769)
femmesh.addNode(554.6024550779284, 1393.976227772971, 0.0, 770)
femmesh.addNode(1685.3143589345677, 185.8195819235741, 0.0, 771)
femmesh.addNode(1503.5114302638422, 206.99071457826216, 0.0, 772)
femmesh.addNode(1601.5174296637306, 245.5998788372513, 0.0, 773)
femmesh.addNode(376.7938538433402, 1356.3458400712107, 0.0, 774)
femmesh.addNode(328.9909239281961, 1467.2437471118394, 0.0, 775)
femmesh.addNode(320.30902645263615, 520.9047457451477, 0.0, 776)
femmesh.addNode(576.2163956303472, 1296.2822517771538, 0.0, 777)
femmesh.addNode(2615.0250127401205, 1716.5524173279139, 0.0, 778)
femmesh.addNode(740.642253325052, 1529.3400964997809, 0.0, 779)
femmesh.addNode(305.1086044714256, 1816.066304457956, 0.0, 780)
femmesh.addNode(3765.8117741301494, 356.285187280466, 0.0, 781)
femmesh.addNode(3822.211197048748, 457.12307059638005, 0.0, 782)
femmesh.addNode(3730.950549444334, 439.8223760844646, 0.0, 783)
femmesh.addNode(1586.1176612972547, 1495.8367766313274, 0.0, 784)
femmesh.addNode(1673.9175079856234, 1455.5290849324326, 0.0, 785)
femmesh.addNode(1500.401210282008, 1523.7830437528655, 0.0, 786)
femmesh.addNode(1416.0622419447682, 1496.269437264713, 0.0, 787)
femmesh.addNode(692.3009280693152, 810.4396832255514, 0.0, 788)
femmesh.addNode(700.9176357801555, 698.3206427469418, 0.0, 789)
femmesh.addNode(3383.7851644235784, 713.3937428730732, 0.0, 790)
femmesh.addNode(3258.559107112044, 689.9382545488565, 0.0, 791)
femmesh.addNode(958.4714022117712, 965.4523905370307, 0.0, 792)
femmesh.addNode(1745.7070147434106, 1519.9432216912705, 0.0, 793)
femmesh.addNode(1844.1872496478532, 1567.3617118344957, 0.0, 794)
femmesh.addNode(3709.1612122852894, 175.50928278549492, 0.0, 795)
femmesh.addNode(473.52119387161537, 1336.9362192501924, 0.0, 796)
femmesh.addNode(720.3315203492492, 601.4497165959474, 0.0, 797)
femmesh.addNode(3829.9869939043792, 285.3421578590939, 0.0, 798)
femmesh.addNode(3738.7263462999645, 268.0414633471785, 0.0, 799)
femmesh.addNode(1905.6306232482764, 1498.1793953902616, 0.0, 800)
femmesh.addNode(2322.986143635452, 1725.6526304854035, 0.0, 801)
femmesh.addNode(2368.912619491457, 1656.7244860718467, 0.0, 802)
femmesh.addNode(2353.5578507388273, 1578.5788384269713, 0.0, 803)
femmesh.addNode(251.59438775510205, -1642.5382653061224, 0.0, 804)
femmesh.addNode(250.0, -1125.0, 0.0, 805)
femmesh.addNode(244.79166666666666, -621.527777777778, 0.0, 806)
femmesh.addNode(253.65823412698398, -349.26835317460336, 0.0, 807)
femmesh.addNode(250.26573129251702, -1377.9230442176872, 0.0, 808)
femmesh.addNode(249.1319444444444, -874.4212962962965, 0.0, 809)
femmesh.addNode(183.5156907411028, -1822.3611654157708, 0.0, 810)
femmesh.addNode(175.62426452602392, -169.8009014101267, 0.0, 811)
femmesh.addNode(340.96396806154064, -151.6577109724219, 0.0, 812)
femmesh.addNode(337.022015699241, -1842.9798861443787, 0.0, 813)
femmesh.addNode(375.1328656462585, -1313.9615221088436, 0.0, 814)
femmesh.addNode(375.1328656462585, -1438.9615221088436, 0.0, 815)
femmesh.addNode(374.5659722222222, -812.2106481481483, 0.0, 816)
femmesh.addNode(374.5659722222222, -937.2106481481483, 0.0, 817)
femmesh.addNode(250.1328656462585, -1251.4615221088436, 0.0, 818)
femmesh.addNode(375.0, -1187.5, 0.0, 819)
femmesh.addNode(250.93005952380952, -1510.2306547619048, 0.0, 820)
femmesh.addNode(375.797193877551, -1571.2691326530612, 0.0, 821)
femmesh.addNode(249.5659722222222, -999.7106481481483, 0.0, 822)
femmesh.addNode(375.0, -1062.5, 0.0, 823)
femmesh.addNode(246.96180555555554, -747.9745370370374, 0.0, 824)
femmesh.addNode(372.3958333333333, -685.7638888888891, 0.0, 825)
femmesh.addNode(249.2249503968253, -485.3980654761908, 0.0, 826)
femmesh.addNode(126.82911706349199, -424.6341765873017, 0.0, 827)
femmesh.addNode(122.39583333333333, -560.7638888888891, 0.0, 828)
femmesh.addNode(376.829117063492, -424.6341765873017, 0.0, 829)
femmesh.addNode(372.3958333333333, -560.7638888888891, 0.0, 830)
femmesh.addNode(126.82911706349199, -299.6341765873017, 0.0, 831)
femmesh.addNode(376.829117063492, -299.6341765873017, 0.0, 832)
femmesh.addNode(125.79719387755102, -1696.2691326530612, 0.0, 833)
femmesh.addNode(125.79719387755102, -1571.2691326530612, 0.0, 834)
femmesh.addNode(125.0, -1187.5, 0.0, 835)
femmesh.addNode(125.0, -1062.5, 0.0, 836)
femmesh.addNode(122.39583333333333, -685.7638888888891, 0.0, 837)
femmesh.addNode(375.797193877551, -1696.2691326530612, 0.0, 838)
femmesh.addNode(124.5659722222222, -937.2106481481483, 0.0, 839)
femmesh.addNode(125.13286564625851, -1313.9615221088436, 0.0, 840)
femmesh.addNode(124.5659722222222, -812.2106481481483, 0.0, 841)
femmesh.addNode(125.13286564625851, -1438.9615221088436, 0.0, 842)
femmesh.addNode(294.30820172717154, -1742.7590757252506, 0.0, 843)
femmesh.addNode(418.5110078496205, -1796.4899430721894, 0.0, 844)
femmesh.addNode(91.7578453705514, -1911.1805827078854, 0.0, 845)
femmesh.addNode(91.7578453705514, -1786.1805827078854, 0.0, 846)
femmesh.addNode(216.7578453705514, -1911.1805827078854, 0.0, 847)
femmesh.addNode(217.55503924810242, -1732.4497153609466, 0.0, 848)
femmesh.addNode(420.4819840307703, -200.82885548621107, 0.0, 849)
femmesh.addNode(297.3111010942623, -250.46303207351275, 0.0, 850)
femmesh.addNode(214.64124932650395, -259.53462729236503, 0.0, 851)
femmesh.addNode(87.81213226301196, -209.90045070506335, 0.0, 852)
femmesh.addNode(87.81213226301196, -84.90045070506335, 0.0, 853)
femmesh.addNode(212.81213226301196, -84.90045070506335, 0.0, 854)
femmesh.addNode(260.2688532201719, -1832.6705257800747, 0.0, 855)
femmesh.addNode(295.4819840307703, -75.82885548621107, 0.0, 856)
femmesh.addNode(420.4819840307703, -75.82885548621107, 0.0, 857)
femmesh.addNode(418.5110078496205, -1921.4899430721894, 0.0, 858)
femmesh.addNode(293.5110078496205, -1921.4899430721894, 0.0, 859)
femmesh.addNode(258.2941162937823, -160.7293061912742, 0.0, 860)
femmesh.addNode(3751.594387755102, -1642.5382653061224, 0.0, 861)
femmesh.addNode(3750.0, -1125.0, 0.0, 862)
femmesh.addNode(3744.7916666666665, -621.527777777778, 0.0, 863)
femmesh.addNode(3753.658234126984, -349.26835317460336, 0.0, 864)
femmesh.addNode(3750.265731292517, -1377.9230442176872, 0.0, 865)
femmesh.addNode(3749.1319444444443, -874.4212962962965, 0.0, 866)
femmesh.addNode(3683.515690741103, -1822.3611654157708, 0.0, 867)
femmesh.addNode(3675.624264526024, -169.8009014101267, 0.0, 868)
femmesh.addNode(3840.9639680615405, -151.6577109724219, 0.0, 869)
femmesh.addNode(3837.022015699241, -1842.9798861443787, 0.0, 870)
femmesh.addNode(3875.1328656462583, -1313.9615221088436, 0.0, 871)
femmesh.addNode(3875.1328656462583, -1438.9615221088436, 0.0, 872)
femmesh.addNode(3874.565972222222, -812.2106481481483, 0.0, 873)
femmesh.addNode(3874.565972222222, -937.2106481481483, 0.0, 874)
femmesh.addNode(3750.1328656462583, -1251.4615221088436, 0.0, 875)
femmesh.addNode(3875.0, -1187.5, 0.0, 876)
femmesh.addNode(3746.9618055555557, -747.9745370370374, 0.0, 877)
femmesh.addNode(3872.395833333333, -685.7638888888891, 0.0, 878)
femmesh.addNode(3750.9300595238096, -1510.2306547619048, 0.0, 879)
femmesh.addNode(3875.797193877551, -1571.2691326530612, 0.0, 880)
femmesh.addNode(3749.565972222222, -999.7106481481483, 0.0, 881)
femmesh.addNode(3875.0, -1062.5, 0.0, 882)
femmesh.addNode(3876.829117063492, -424.6341765873017, 0.0, 883)
femmesh.addNode(3749.224950396825, -485.3980654761908, 0.0, 884)
femmesh.addNode(3872.395833333333, -560.7638888888891, 0.0, 885)
femmesh.addNode(3626.829117063492, -424.6341765873017, 0.0, 886)
femmesh.addNode(3622.395833333333, -560.7638888888891, 0.0, 887)
femmesh.addNode(3626.829117063492, -299.6341765873017, 0.0, 888)
femmesh.addNode(3876.829117063492, -299.6341765873017, 0.0, 889)
femmesh.addNode(3625.797193877551, -1696.2691326530612, 0.0, 890)
femmesh.addNode(3625.797193877551, -1571.2691326530612, 0.0, 891)
femmesh.addNode(3625.0, -1187.5, 0.0, 892)
femmesh.addNode(3625.0, -1062.5, 0.0, 893)
femmesh.addNode(3622.395833333333, -685.7638888888891, 0.0, 894)
femmesh.addNode(3875.797193877551, -1696.2691326530612, 0.0, 895)
femmesh.addNode(3624.565972222222, -937.2106481481483, 0.0, 896)
femmesh.addNode(3625.1328656462583, -1438.9615221088436, 0.0, 897)
femmesh.addNode(3625.1328656462583, -1313.9615221088436, 0.0, 898)
femmesh.addNode(3624.565972222222, -812.2106481481483, 0.0, 899)
femmesh.addNode(3794.3082017271718, -1742.7590757252506, 0.0, 900)
femmesh.addNode(3918.5110078496205, -1796.4899430721894, 0.0, 901)
femmesh.addNode(3591.7578453705514, -1911.1805827078854, 0.0, 902)
femmesh.addNode(3591.7578453705514, -1786.1805827078854, 0.0, 903)
femmesh.addNode(3716.7578453705514, -1911.1805827078854, 0.0, 904)
femmesh.addNode(3717.5550392481027, -1732.4497153609466, 0.0, 905)
femmesh.addNode(3920.48198403077, -200.82885548621107, 0.0, 906)
femmesh.addNode(3797.311101094262, -250.46303207351275, 0.0, 907)
femmesh.addNode(3714.6412493265043, -259.53462729236503, 0.0, 908)
femmesh.addNode(3587.8121322630122, -209.90045070506335, 0.0, 909)
femmesh.addNode(3587.8121322630122, -84.90045070506335, 0.0, 910)
femmesh.addNode(3712.8121322630122, -84.90045070506335, 0.0, 911)
femmesh.addNode(3760.268853220172, -1832.6705257800747, 0.0, 912)
femmesh.addNode(3795.48198403077, -75.82885548621107, 0.0, 913)
femmesh.addNode(3920.48198403077, -75.82885548621107, 0.0, 914)
femmesh.addNode(3918.5110078496205, -1921.4899430721894, 0.0, 915)
femmesh.addNode(3793.5110078496205, -1921.4899430721894, 0.0, 916)
femmesh.addNode(3758.2941162937823, -160.7293061912742, 0.0, 917)
return True
def create_elements(femmesh):
# elements
femmesh.addFace([174, 230, 204, 315, 316, 317], 21)
femmesh.addFace([11, 263, 258, 318, 319, 320], 22)
femmesh.addFace([259, 264, 17, 321, 322, 323], 23)
femmesh.addFace([201, 261, 65, 324, 325, 326], 24)
femmesh.addFace([39, 211, 40, 327, 328, 55], 25)
femmesh.addFace([176, 235, 217, 329, 330, 331], 26)
femmesh.addFace([87, 248, 212, 332, 333, 334], 27)
femmesh.addFace([237, 280, 172, 335, 336, 337], 28)
femmesh.addFace([211, 247, 40, 338, 339, 328], 29)
femmesh.addFace([209, 279, 269, 340, 341, 342], 30)
femmesh.addFace([235, 296, 217, 343, 344, 330], 31)
femmesh.addFace([11, 258, 12, 320, 345, 19], 32)
femmesh.addFace([16, 259, 17, 346, 323, 24], 33)
femmesh.addFace([209, 269, 188, 342, 347, 348], 34)
femmesh.addFace([214, 277, 191, 349, 350, 351], 35)
femmesh.addFace([214, 301, 277, 352, 353, 349], 36)
femmesh.addFace([88, 248, 87, 354, 332, 95], 37)
femmesh.addFace([65, 242, 201, 355, 356, 326], 38)
femmesh.addFace([180, 255, 204, 357, 358, 359], 39)
femmesh.addFace([13, 258, 207, 360, 361, 362], 40)
femmesh.addFace([206, 259, 15, 363, 364, 365], 41)
femmesh.addFace([200, 246, 227, 366, 367, 368], 42)
femmesh.addFace([227, 246, 168, 367, 369, 370], 43)
femmesh.addFace([216, 248, 89, 371, 372, 373], 44)
femmesh.addFace([177, 225, 208, 374, 375, 376], 45)
femmesh.addFace([34, 262, 234, 377, 378, 379], 46)
femmesh.addFace([69, 209, 68, 380, 381, 80], 47)
femmesh.addFace([172, 279, 237, 382, 383, 337], 48)
femmesh.addFace([176, 284, 208, 384, 385, 386], 49)
femmesh.addFace([226, 289, 262, 387, 388, 389], 50)
femmesh.addFace([204, 243, 180, 390, 391, 359], 51)
femmesh.addFace([230, 281, 204, 392, 393, 316], 52)
femmesh.addFace([89, 248, 88, 372, 354, 96], 53)
femmesh.addFace([180, 243, 205, 391, 394, 395], 54)
femmesh.addFace([173, 249, 207, 396, 397, 398], 55)
femmesh.addFace([207, 249, 182, 397, 399, 400], 56)
femmesh.addFace([60, 214, 3, 401, 402, 71], 57)
femmesh.addFace([205, 289, 180, 403, 404, 395], 58)
femmesh.addFace([215, 270, 170, 405, 406, 407], 59)
femmesh.addFace([204, 255, 174, 358, 408, 317], 60)
femmesh.addFace([232, 270, 178, 409, 410, 411], 61)
femmesh.addFace([207, 277, 173, 412, 413, 398], 62)
femmesh.addFace([222, 232, 178, 414, 411, 415], 63)
femmesh.addFace([213, 238, 85, 416, 417, 418], 64)
femmesh.addFace([270, 282, 170, 419, 420, 406], 65)
femmesh.addFace([184, 247, 211, 421, 338, 422], 66)
femmesh.addFace([176, 299, 284, 423, 424, 384], 67)
femmesh.addFace([210, 241, 179, 425, 426, 427], 68)
femmesh.addFace([234, 262, 194, 378, 428, 429], 69)
femmesh.addFace([222, 267, 61, 430, 431, 432], 70)
femmesh.addFace([180, 289, 226, 404, 387, 433], 71)
femmesh.addFace([41, 218, 42, 434, 435, 57], 72)
femmesh.addFace([67, 266, 221, 436, 437, 438], 73)
femmesh.addFace([182, 252, 207, 439, 440, 400], 74)
femmesh.addFace([212, 223, 183, 441, 442, 443], 75)
femmesh.addFace([181, 223, 212, 444, 441, 445], 76)
femmesh.addFace([15, 239, 206, 446, 447, 365], 77)
femmesh.addFace([62, 222, 61, 448, 432, 73], 78)
femmesh.addFace([67, 221, 66, 438, 449, 78], 79)
femmesh.addFace([219, 225, 177, 450, 374, 451], 80)
femmesh.addFace([38, 283, 244, 452, 453, 454], 81)
femmesh.addFace([197, 302, 224, 455, 456, 457], 82)
femmesh.addFace([224, 302, 274, 456, 458, 459], 83)
femmesh.addFace([269, 279, 172, 341, 382, 460], 84)
femmesh.addFace([244, 283, 203, 453, 461, 462], 85)
femmesh.addFace([229, 273, 178, 463, 464, 465], 86)
femmesh.addFace([198, 280, 237, 466, 335, 467], 87)
femmesh.addFace([169, 240, 233, 468, 469, 470], 88)
femmesh.addFace([233, 240, 198, 469, 471, 472], 89)
femmesh.addFace([12, 258, 13, 345, 360, 20], 90)
femmesh.addFace([15, 259, 16, 364, 346, 23], 91)
femmesh.addFace([207, 252, 13, 440, 473, 362], 92)
femmesh.addFace([206, 239, 182, 447, 474, 475], 93)
femmesh.addFace([179, 227, 210, 476, 477, 427], 94)
femmesh.addFace([182, 304, 206, 478, 479, 475], 95)
femmesh.addFace([228, 285, 31, 480, 481, 482], 96)
femmesh.addFace([65, 261, 64, 325, 483, 76], 97)
femmesh.addFace([202, 273, 229, 484, 463, 485], 98)
femmesh.addFace([208, 284, 177, 385, 486, 376], 99)
femmesh.addFace([5, 224, 70, 487, 488, 82], 100)
femmesh.addFace([169, 236, 217, 489, 490, 491], 101)
femmesh.addFace([217, 236, 184, 490, 492, 493], 102)
femmesh.addFace([196, 281, 230, 494, 392, 495], 103)
femmesh.addFace([224, 268, 70, 496, 497, 488], 104)
femmesh.addFace([214, 287, 3, 498, 499, 402], 105)
femmesh.addFace([209, 266, 68, 500, 501, 381], 106)
femmesh.addFace([85, 238, 84, 417, 502, 92], 107)
femmesh.addFace([188, 266, 209, 503, 500, 348], 108)
femmesh.addFace([69, 268, 209, 504, 505, 380], 109)
femmesh.addFace([209, 268, 190, 505, 506, 507], 110)
femmesh.addFace([189, 267, 222, 508, 430, 509], 111)
femmesh.addFace([181, 236, 223, 510, 511, 444], 112)
femmesh.addFace([208, 235, 176, 512, 329, 386], 113)
femmesh.addFace([181, 307, 236, 513, 514, 510], 114)
femmesh.addFace([221, 266, 188, 437, 503, 515], 115)
femmesh.addFace([186, 225, 219, 516, 450, 517], 116)
femmesh.addFace([168, 313, 225, 518, 519, 520], 117)
femmesh.addFace([215, 229, 178, 521, 465, 522], 118)
femmesh.addFace([30, 228, 31, 523, 482, 46], 119)
femmesh.addFace([213, 295, 183, 524, 525, 526], 120)
femmesh.addFace([225, 313, 208, 519, 527, 375], 121)
femmesh.addFace([190, 279, 209, 528, 340, 507], 122)
femmesh.addFace([218, 276, 42, 529, 530, 435], 123)
femmesh.addFace([36, 220, 37, 531, 532, 52], 124)
femmesh.addFace([192, 264, 259, 533, 321, 534], 125)
femmesh.addFace([258, 263, 191, 319, 535, 536], 126)
femmesh.addFace([223, 233, 183, 537, 538, 442], 127)
femmesh.addFace([169, 233, 223, 470, 537, 539], 128)
femmesh.addFace([231, 253, 187, 540, 541, 542], 129)
femmesh.addFace([199, 285, 228, 543, 480, 544], 130)
femmesh.addFace([43, 276, 216, 545, 546, 547], 131)
femmesh.addFace([212, 245, 87, 548, 549, 334], 132)
femmesh.addFace([180, 303, 255, 550, 551, 357], 133)
femmesh.addFace([182, 249, 230, 399, 552, 553], 134)
femmesh.addFace([230, 249, 196, 552, 554, 495], 135)
femmesh.addFace([188, 257, 221, 555, 556, 515], 136)
femmesh.addFace([221, 257, 179, 556, 557, 558], 137)
femmesh.addFace([253, 275, 187, 559, 560, 541], 138)
femmesh.addFace([170, 241, 210, 561, 425, 562], 139)
femmesh.addFace([212, 248, 181, 333, 563, 445], 140)
femmesh.addFace([62, 232, 222, 564, 414, 448], 141)
femmesh.addFace([190, 268, 224, 506, 496, 565], 142)
femmesh.addFace([210, 310, 170, 566, 567, 562], 143)
femmesh.addFace([175, 302, 213, 568, 569, 570], 144)
femmesh.addFace([216, 276, 181, 546, 571, 572], 145)
femmesh.addFace([193, 270, 232, 573, 409, 574], 146)
femmesh.addFace([214, 267, 189, 575, 508, 576], 147)
femmesh.addFace([60, 267, 214, 577, 575, 401], 148)
femmesh.addFace([181, 276, 218, 571, 529, 578], 149)
femmesh.addFace([241, 312, 179, 579, 580, 426], 150)
femmesh.addFace([39, 244, 211, 581, 582, 327], 151)
femmesh.addFace([1, 263, 11, 583, 318, 18], 152)
femmesh.addFace([26, 263, 1, 584, 583, 27], 153)
femmesh.addFace([17, 264, 2, 322, 585, 25], 154)
femmesh.addFace([2, 264, 29, 585, 586, 44], 155)
femmesh.addFace([211, 291, 184, 587, 588, 422], 156)
femmesh.addFace([192, 286, 228, 589, 590, 591], 157)
femmesh.addFace([32, 226, 33, 592, 593, 48], 158)
femmesh.addFace([181, 248, 216, 563, 371, 572], 159)
femmesh.addFace([201, 282, 261, 594, 595, 324], 160)
femmesh.addFace([183, 256, 213, 596, 597, 526], 161)
femmesh.addFace([213, 256, 175, 597, 598, 570], 162)
femmesh.addFace([183, 245, 212, 599, 548, 443], 163)
femmesh.addFace([191, 287, 214, 600, 498, 351], 164)
femmesh.addFace([262, 289, 194, 388, 601, 428], 165)
femmesh.addFace([42, 276, 43, 530, 545, 58], 166)
femmesh.addFace([235, 246, 185, 602, 603, 604], 167)
femmesh.addFace([195, 293, 220, 605, 606, 607], 168)
femmesh.addFace([278, 286, 192, 608, 589, 609], 169)
femmesh.addFace([26, 287, 263, 610, 611, 584], 170)
femmesh.addFace([63, 232, 62, 612, 564, 74], 171)
femmesh.addFace([170, 309, 215, 613, 614, 407], 172)
femmesh.addFace([189, 301, 214, 615, 352, 576], 173)
femmesh.addFace([220, 293, 177, 606, 616, 617], 174)
femmesh.addFace([85, 295, 213, 618, 524, 418], 175)
femmesh.addFace([34, 234, 35, 379, 619, 50], 176)
femmesh.addFace([185, 280, 240, 620, 621, 622], 177)
femmesh.addFace([220, 290, 195, 623, 624, 607], 178)
femmesh.addFace([183, 295, 245, 525, 625, 599], 179)
femmesh.addFace([284, 299, 203, 424, 626, 627], 180)
femmesh.addFace([3, 287, 26, 499, 610, 28], 181)
femmesh.addFace([36, 290, 220, 628, 623, 531], 182)
femmesh.addFace([217, 296, 169, 344, 629, 491], 183)
femmesh.addFace([235, 313, 246, 630, 631, 602], 184)
femmesh.addFace([219, 253, 186, 632, 633, 517], 185)
femmesh.addFace([171, 253, 219, 634, 632, 635], 186)
femmesh.addFace([222, 273, 189, 636, 637, 509], 187)
femmesh.addFace([178, 273, 222, 464, 636, 415], 188)
femmesh.addFace([14, 239, 15, 638, 446, 22], 189)
femmesh.addFace([89, 306, 216, 639, 640, 373], 190)
femmesh.addFace([216, 306, 43, 640, 641, 547], 191)
femmesh.addFace([251, 280, 185, 642, 620, 643], 192)
femmesh.addFace([6, 271, 98, 644, 645, 100], 193)
femmesh.addFace([83, 271, 6, 646, 644, 90], 194)
femmesh.addFace([41, 247, 218, 647, 648, 434], 195)
femmesh.addFace([218, 247, 184, 648, 421, 649], 196)
femmesh.addFace([223, 236, 169, 511, 489, 539], 197)
femmesh.addFace([225, 250, 168, 650, 651, 520], 198)
femmesh.addFace([261, 282, 193, 595, 652, 653], 199)
femmesh.addFace([87, 245, 86, 549, 654, 94], 200)
femmesh.addFace([239, 252, 182, 655, 439, 474], 201)
femmesh.addFace([218, 307, 181, 656, 513, 578], 202)
femmesh.addFace([221, 242, 66, 657, 658, 449], 203)
femmesh.addFace([184, 291, 217, 588, 659, 493], 204)
femmesh.addFace([186, 250, 225, 660, 650, 516], 205)
femmesh.addFace([179, 257, 227, 557, 661, 476], 206)
femmesh.addFace([178, 270, 215, 410, 405, 522], 207)
femmesh.addFace([227, 257, 200, 661, 662, 368], 208)
femmesh.addFace([237, 279, 190, 383, 528, 663], 209)
femmesh.addFace([32, 285, 226, 664, 665, 592], 210)
femmesh.addFace([184, 307, 218, 666, 656, 649], 211)
femmesh.addFace([217, 291, 176, 659, 667, 331], 212)
femmesh.addFace([213, 302, 238, 569, 668, 416], 213)
femmesh.addFace([179, 312, 221, 580, 669, 558], 214)
femmesh.addFace([37, 283, 38, 670, 452, 53], 215)
femmesh.addFace([33, 262, 34, 671, 377, 49], 216)
femmesh.addFace([185, 296, 235, 672, 343, 604], 217)
femmesh.addFace([200, 269, 251, 673, 674, 675], 218)
femmesh.addFace([219, 308, 171, 676, 677, 635], 219)
femmesh.addFace([177, 293, 219, 616, 678, 451], 220)
femmesh.addFace([168, 250, 227, 651, 679, 370], 221)
femmesh.addFace([227, 250, 210, 679, 680, 477], 222)
femmesh.addFace([177, 311, 220, 681, 682, 617], 223)
femmesh.addFace([215, 309, 305, 614, 683, 684], 224)
femmesh.addFace([305, 309, 187, 683, 685, 686], 225)
femmesh.addFace([204, 281, 243, 393, 687, 390], 226)
femmesh.addFace([243, 281, 167, 687, 688, 689], 227)
femmesh.addFace([220, 283, 37, 690, 670, 532], 228)
femmesh.addFace([251, 269, 172, 674, 460, 691], 229)
femmesh.addFace([224, 292, 197, 692, 693, 457], 230)
femmesh.addFace([5, 292, 224, 694, 692, 487], 231)
femmesh.addFace([167, 300, 229, 695, 696, 697], 232)
femmesh.addFace([229, 300, 202, 696, 698, 485], 233)
femmesh.addFace([66, 242, 65, 658, 355, 77], 234)
femmesh.addFace([285, 303, 226, 699, 700, 665], 235)
femmesh.addFace([38, 244, 39, 454, 581, 54], 236)
femmesh.addFace([230, 304, 182, 701, 478, 553], 237)
femmesh.addFace([202, 300, 294, 698, 702, 703], 238)
femmesh.addFace([224, 274, 190, 459, 704, 565], 239)
femmesh.addFace([169, 296, 240, 629, 705, 468], 240)
femmesh.addFace([226, 262, 33, 389, 671, 593], 241)
femmesh.addFace([40, 247, 41, 339, 647, 56], 242)
femmesh.addFace([241, 282, 201, 706, 594, 707], 243)
femmesh.addFace([277, 301, 173, 353, 708, 413], 244)
femmesh.addFace([201, 312, 241, 709, 579, 707], 245)
femmesh.addFace([193, 282, 270, 652, 419, 573], 246)
femmesh.addFace([229, 305, 167, 710, 711, 697], 247)
femmesh.addFace([13, 252, 14, 473, 712, 21], 248)
femmesh.addFace([211, 299, 291, 713, 714, 587], 249)
femmesh.addFace([294, 300, 196, 702, 715, 716], 250)
femmesh.addFace([64, 298, 63, 717, 718, 75], 251)
femmesh.addFace([170, 310, 231, 567, 719, 720], 252)
femmesh.addFace([226, 303, 180, 700, 550, 433], 253)
femmesh.addFace([255, 303, 199, 551, 721, 722], 254)
femmesh.addFace([265, 275, 205, 723, 724, 725], 255)
femmesh.addFace([187, 275, 265, 560, 723, 726], 256)
femmesh.addFace([186, 253, 231, 633, 540, 727], 257)
femmesh.addFace([200, 251, 246, 675, 728, 366], 258)
femmesh.addFace([189, 273, 260, 637, 729, 730], 259)
femmesh.addFace([260, 273, 202, 729, 484, 731], 260)
femmesh.addFace([68, 266, 67, 501, 436, 79], 261)
femmesh.addFace([61, 267, 60, 431, 577, 72], 262)
femmesh.addFace([29, 288, 30, 732, 733, 45], 263)
femmesh.addFace([198, 314, 233, 734, 735, 472], 264)
femmesh.addFace([291, 299, 176, 714, 423, 667], 265)
femmesh.addFace([186, 310, 250, 736, 737, 660], 266)
femmesh.addFace([250, 310, 210, 737, 566, 680], 267)
femmesh.addFace([263, 287, 191, 611, 600, 535], 268)
femmesh.addFace([234, 254, 195, 738, 739, 740], 269)
femmesh.addFace([70, 268, 69, 497, 504, 81], 270)
femmesh.addFace([257, 269, 200, 741, 673, 662], 271)
femmesh.addFace([188, 269, 257, 347, 741, 555], 272)
femmesh.addFace([233, 256, 183, 742, 596, 538], 273)
femmesh.addFace([86, 295, 85, 743, 618, 93], 274)
femmesh.addFace([194, 254, 234, 744, 738, 429], 275)
femmesh.addFace([30, 288, 228, 733, 745, 523], 276)
femmesh.addFace([228, 288, 192, 745, 746, 591], 277)
femmesh.addFace([236, 307, 184, 514, 666, 492], 278)
femmesh.addFace([234, 290, 35, 747, 748, 619], 279)
femmesh.addFace([195, 290, 234, 624, 747, 740], 280)
femmesh.addFace([14, 252, 239, 712, 655, 638], 281)
femmesh.addFace([228, 286, 199, 590, 749, 544], 282)
femmesh.addFace([221, 312, 242, 669, 750, 657], 283)
femmesh.addFace([43, 306, 4, 641, 751, 59], 284)
femmesh.addFace([4, 306, 89, 751, 639, 97], 285)
femmesh.addFace([98, 292, 5, 752, 694, 99], 286)
femmesh.addFace([167, 305, 265, 711, 753, 754], 287)
femmesh.addFace([246, 251, 185, 728, 643, 603], 288)
femmesh.addFace([167, 265, 243, 754, 755, 689], 289)
femmesh.addFace([243, 265, 205, 755, 725, 394], 290)
femmesh.addFace([31, 285, 32, 481, 664, 47], 291)
femmesh.addFace([35, 290, 36, 748, 628, 51], 292)
femmesh.addFace([171, 275, 253, 756, 559, 634], 293)
femmesh.addFace([272, 275, 171, 757, 756, 758], 294)
femmesh.addFace([205, 275, 272, 724, 757, 759], 295)
femmesh.addFace([215, 305, 229, 684, 710, 521], 296)
femmesh.addFace([202, 294, 260, 703, 760, 731], 297)
femmesh.addFace([84, 297, 83, 761, 762, 91], 298)
femmesh.addFace([274, 302, 175, 458, 568, 763], 299)
femmesh.addFace([233, 314, 256, 735, 764, 742], 300)
femmesh.addFace([187, 309, 231, 685, 765, 542], 301)
femmesh.addFace([240, 280, 198, 621, 466, 471], 302)
femmesh.addFace([237, 274, 175, 766, 763, 767], 303)
femmesh.addFace([190, 274, 237, 704, 766, 663], 304)
femmesh.addFace([260, 301, 189, 768, 615, 730], 305)
femmesh.addFace([174, 286, 278, 769, 608, 770], 306)
femmesh.addFace([261, 298, 64, 771, 717, 483], 307)
femmesh.addFace([199, 303, 285, 721, 699, 543], 308)
femmesh.addFace([63, 298, 232, 718, 772, 612], 309)
femmesh.addFace([232, 298, 193, 772, 773, 574], 310)
femmesh.addFace([206, 278, 259, 774, 775, 363], 311)
femmesh.addFace([259, 278, 192, 775, 609, 534], 312)
femmesh.addFace([191, 277, 258, 350, 776, 536], 313)
femmesh.addFace([258, 277, 207, 776, 412, 361], 314)
femmesh.addFace([174, 304, 230, 777, 701, 315], 315)
femmesh.addFace([246, 313, 168, 631, 518, 369], 316)
femmesh.addFace([244, 299, 211, 778, 713, 582], 317)
femmesh.addFace([255, 286, 174, 779, 769, 408], 318)
femmesh.addFace([199, 286, 255, 749, 779, 722], 319)
femmesh.addFace([231, 310, 186, 719, 736, 727], 320)
femmesh.addFace([231, 309, 170, 765, 613, 720], 321)
femmesh.addFace([264, 288, 29, 780, 732, 586], 322)
femmesh.addFace([197, 297, 238, 781, 782, 783], 323)
femmesh.addFace([170, 282, 241, 420, 706, 561], 324)
femmesh.addFace([254, 272, 171, 784, 758, 785], 325)
femmesh.addFace([194, 272, 254, 786, 784, 744], 326)
femmesh.addFace([272, 289, 205, 787, 403, 759], 327)
femmesh.addFace([249, 294, 196, 788, 716, 554], 328)
femmesh.addFace([173, 294, 249, 789, 788, 396], 329)
femmesh.addFace([240, 296, 185, 705, 672, 622], 330)
femmesh.addFace([208, 313, 235, 527, 630, 512], 331)
femmesh.addFace([175, 314, 237, 790, 791, 767], 332)
femmesh.addFace([237, 314, 198, 791, 734, 467], 333)
femmesh.addFace([238, 297, 84, 782, 761, 502], 334)
femmesh.addFace([172, 280, 251, 336, 642, 691], 335)
femmesh.addFace([238, 302, 197, 668, 455, 783], 336)
femmesh.addFace([245, 295, 86, 625, 743, 654], 337)
femmesh.addFace([196, 300, 281, 715, 792, 494], 338)
femmesh.addFace([203, 299, 244, 626, 778, 462], 339)
femmesh.addFace([192, 288, 264, 746, 780, 533], 340)
femmesh.addFace([254, 308, 195, 793, 794, 739], 341)
femmesh.addFace([271, 292, 98, 795, 752, 645], 342)
femmesh.addFace([242, 312, 201, 750, 709, 356], 343)
femmesh.addFace([206, 304, 278, 479, 796, 774], 344)
femmesh.addFace([278, 304, 174, 796, 777, 770], 345)
femmesh.addFace([260, 294, 173, 760, 789, 797], 346)
femmesh.addFace([271, 297, 197, 798, 781, 799], 347)
femmesh.addFace([193, 298, 261, 773, 771, 653], 348)
femmesh.addFace([194, 289, 272, 601, 787, 786], 349)
femmesh.addFace([197, 292, 271, 693, 795, 799], 350)
femmesh.addFace([173, 301, 260, 708, 768, 797], 351)
femmesh.addFace([171, 308, 254, 677, 793, 785], 352)
femmesh.addFace([83, 297, 271, 762, 798, 646], 353)
femmesh.addFace([256, 314, 175, 764, 790, 598], 354)
femmesh.addFace([265, 305, 187, 753, 686, 726], 355)
femmesh.addFace([293, 308, 219, 800, 676, 678], 356)
femmesh.addFace([195, 308, 293, 794, 800, 605], 357)
femmesh.addFace([281, 300, 167, 792, 695, 688], 358)
femmesh.addFace([220, 311, 283, 682, 801, 690], 359)
femmesh.addFace([283, 311, 203, 801, 802, 461], 360)
femmesh.addFace([203, 311, 284, 802, 803, 627], 361)
femmesh.addFace([284, 311, 177, 803, 681, 486], 362)
femmesh.addFace([121, 808, 120, 814, 815, 128], 363)
femmesh.addFace([123, 809, 122, 816, 817, 130], 364)
femmesh.addFace([805, 808, 121, 818, 814, 819], 365)
femmesh.addFace([120, 808, 804, 815, 820, 821], 366)
femmesh.addFace([122, 809, 805, 817, 822, 823], 367)
femmesh.addFace([806, 809, 123, 824, 816, 825], 368)
femmesh.addFace([806, 807, 106, 826, 827, 828], 369)
femmesh.addFace([124, 807, 806, 829, 826, 830], 370)
femmesh.addFace([106, 807, 107, 827, 831, 114], 371)
femmesh.addFace([125, 807, 124, 832, 829, 132], 372)
femmesh.addFace([101, 804, 102, 833, 834, 109], 373)
femmesh.addFace([103, 805, 104, 835, 836, 111], 374)
femmesh.addFace([105, 806, 106, 837, 828, 113], 375)
femmesh.addFace([120, 804, 119, 821, 838, 127], 376)
femmesh.addFace([122, 805, 121, 823, 819, 129], 377)
femmesh.addFace([124, 806, 123, 830, 825, 131], 378)
femmesh.addFace([805, 809, 104, 822, 839, 836], 379)
femmesh.addFace([103, 808, 805, 840, 818, 835], 380)
femmesh.addFace([105, 809, 806, 841, 824, 837], 381)
femmesh.addFace([804, 808, 102, 820, 842, 834], 382)
femmesh.addFace([102, 808, 103, 842, 840, 110], 383)
femmesh.addFace([104, 809, 105, 839, 841, 112], 384)
femmesh.addFace([804, 813, 119, 843, 844, 838], 385)
femmesh.addFace([7, 810, 101, 845, 846, 108], 386)
femmesh.addFace([116, 810, 7, 847, 845, 117], 387)
femmesh.addFace([101, 810, 804, 846, 848, 833], 388)
femmesh.addFace([125, 812, 807, 849, 850, 832], 389)
femmesh.addFace([807, 811, 107, 851, 852, 831], 390)
femmesh.addFace([1, 811, 26, 853, 854, 27], 391)
femmesh.addFace([107, 811, 1, 852, 853, 115], 392)
femmesh.addFace([810, 813, 804, 855, 843, 848], 393)
femmesh.addFace([26, 812, 3, 856, 857, 28], 394)
femmesh.addFace([3, 812, 125, 857, 849, 133], 395)
femmesh.addFace([8, 813, 116, 858, 859, 118], 396)
femmesh.addFace([119, 813, 8, 844, 858, 126], 397)
femmesh.addFace([807, 812, 811, 850, 860, 851], 398)
femmesh.addFace([116, 813, 810, 859, 855, 847], 399)
femmesh.addFace([811, 812, 26, 860, 856, 854], 400)
femmesh.addFace([154, 865, 153, 871, 872, 161], 401)
femmesh.addFace([156, 866, 155, 873, 874, 163], 402)
femmesh.addFace([862, 865, 154, 875, 871, 876], 403)
femmesh.addFace([863, 866, 156, 877, 873, 878], 404)
femmesh.addFace([153, 865, 861, 872, 879, 880], 405)
femmesh.addFace([155, 866, 862, 874, 881, 882], 406)
femmesh.addFace([157, 864, 863, 883, 884, 885], 407)
femmesh.addFace([863, 864, 139, 884, 886, 887], 408)
femmesh.addFace([139, 864, 140, 886, 888, 147], 409)
femmesh.addFace([158, 864, 157, 889, 883, 165], 410)
femmesh.addFace([134, 861, 135, 890, 891, 142], 411)
femmesh.addFace([136, 862, 137, 892, 893, 144], 412)
femmesh.addFace([138, 863, 139, 894, 887, 146], 413)
femmesh.addFace([153, 861, 152, 880, 895, 160], 414)
femmesh.addFace([155, 862, 154, 882, 876, 162], 415)
femmesh.addFace([157, 863, 156, 885, 878, 164], 416)
femmesh.addFace([862, 866, 137, 881, 896, 893], 417)
femmesh.addFace([861, 865, 135, 879, 897, 891], 418)
femmesh.addFace([136, 865, 862, 898, 875, 892], 419)
femmesh.addFace([138, 866, 863, 899, 877, 894], 420)
femmesh.addFace([135, 865, 136, 897, 898, 143], 421)
femmesh.addFace([137, 866, 138, 896, 899, 145], 422)
femmesh.addFace([861, 870, 152, 900, 901, 895], 423)
femmesh.addFace([9, 867, 134, 902, 903, 141], 424)
femmesh.addFace([149, 867, 9, 904, 902, 150], 425)
femmesh.addFace([134, 867, 861, 903, 905, 890], 426)
femmesh.addFace([158, 869, 864, 906, 907, 889], 427)
femmesh.addFace([864, 868, 140, 908, 909, 888], 428)
femmesh.addFace([5, 868, 98, 910, 911, 99], 429)
femmesh.addFace([140, 868, 5, 909, 910, 148], 430)
femmesh.addFace([867, 870, 861, 912, 900, 905], 431)
femmesh.addFace([98, 869, 6, 913, 914, 100], 432)
femmesh.addFace([6, 869, 158, 914, 906, 166], 433)
femmesh.addFace([10, 870, 149, 915, 916, 151], 434)
femmesh.addFace([152, 870, 10, 901, 915, 159], 435)
femmesh.addFace([864, 869, 868, 907, 917, 908], 436)
femmesh.addFace([149, 870, 867, 916, 912, 904], 437)
femmesh.addFace([868, 869, 98, 917, 913, 911], 438)
return True
|
engines | reddit | # SPDX-License-Identifier: AGPL-3.0-or-later
"""
Reddit
"""
import json
from datetime import datetime
from urllib.parse import urlencode, urljoin, urlparse
# about
about = {
"website": "https://www.reddit.com/",
"wikidata_id": "Q1136",
"official_api_documentation": "https://www.reddit.com/dev/api",
"use_official_api": True,
"require_api_key": False,
"results": "JSON",
}
# engine dependent config
categories = ["general", "images", "news", "social media"]
page_size = 25
# search-url
base_url = "https://www.reddit.com/"
search_url = base_url + "search.json?{query}"
# do search-request
def request(query, params):
query = urlencode({"q": query, "limit": page_size})
params["url"] = search_url.format(query=query)
return params
# get response from search-request
def response(resp):
img_results = []
text_results = []
search_results = json.loads(resp.text)
# return empty array if there are no results
if "data" not in search_results:
return []
posts = search_results.get("data", {}).get("children", [])
# process results
for post in posts:
data = post["data"]
# extract post information
params = {"url": urljoin(base_url, data["permalink"]), "title": data["title"]}
# if thumbnail field contains a valid URL, we need to change template
thumbnail = data["thumbnail"]
url_info = urlparse(thumbnail)
# netloc & path
if url_info[1] != "" and url_info[2] != "":
params["img_src"] = data["url"]
params["thumbnail_src"] = thumbnail
params["template"] = "images.html"
img_results.append(params)
else:
created = datetime.fromtimestamp(data["created_utc"])
content = data["selftext"]
if len(content) > 500:
content = content[:500] + "..."
params["content"] = content
params["publishedDate"] = created
text_results.append(params)
# show images first and text results second
return img_results + text_results
|
utils | epy_block_io | import collections
import inspect
TYPE_MAP = {
"complex64": "complex",
"complex": "complex",
"float32": "float",
"float": "float",
"int32": "int",
"uint32": "int",
"int16": "short",
"uint16": "short",
"int8": "byte",
"uint8": "byte",
}
BlockIO = collections.namedtuple(
"BlockIO", "name cls params sinks sources doc callbacks"
)
def _ports(sigs, msgs):
ports = list()
for i, dtype in enumerate(sigs):
port_type = TYPE_MAP.get(dtype.base.name, None)
if not port_type:
raise ValueError("Can't map {0!r} to GRC port type".format(dtype))
vlen = dtype.shape[0] if len(dtype.shape) > 0 else 1
ports.append((str(i), port_type, vlen))
for msg_key in msgs:
if msg_key == "system":
continue
ports.append((msg_key, "message", 1))
return ports
def _find_block_class(source_code, cls):
ns = {}
try:
exec(source_code, ns)
except Exception as e:
raise ValueError("Can't interpret source code: " + str(e))
for var in ns.values():
if inspect.isclass(var) and issubclass(var, cls):
return var
raise ValueError("No python block class found in code")
def extract(cls):
try:
import pmt
from gnuradio import gr
except ImportError:
raise EnvironmentError("Can't import GNU Radio")
if not inspect.isclass(cls):
cls = _find_block_class(cls, gr.gateway.gateway_block)
spec = inspect.getfullargspec(cls.__init__)
init_args = spec.args[1:]
defaults = [repr(arg) for arg in (spec.defaults or ())]
doc = cls.__doc__ or cls.__init__.__doc__ or ""
cls_name = cls.__name__
if len(defaults) + 1 != len(spec.args):
raise ValueError("Need all __init__ arguments to have default values")
try:
instance = cls()
except Exception as e:
raise RuntimeError("Can't create an instance of your block: " + str(e))
name = instance.name()
params = list(zip(init_args, defaults))
def settable(attr):
try:
# check for a property with setter
return callable(getattr(cls, attr).fset)
except AttributeError:
return attr in instance.__dict__ # not dir() - only the instance attribs
callbacks = [attr for attr in dir(instance) if attr in init_args and settable(attr)]
sinks = _ports(instance.in_sig(), pmt.to_python(instance.message_ports_in()))
sources = _ports(instance.out_sig(), pmt.to_python(instance.message_ports_out()))
return BlockIO(name, cls_name, params, sinks, sources, doc, callbacks)
if __name__ == "__main__":
blk_code = """
import numpy as np
from gnuradio import gr
import pmt
class blk(gr.sync_block):
def __init__(self, param1=None, param2=None, param3=None):
"Test Docu"
gr.sync_block.__init__(
self,
name='Embedded Python Block',
in_sig = (np.float32,),
out_sig = (np.float32,np.complex64,),
)
self.message_port_register_in(pmt.intern('msg_in'))
self.message_port_register_out(pmt.intern('msg_out'))
self.param1 = param1
self._param2 = param2
self._param3 = param3
@property
def param2(self):
return self._param2
@property
def param3(self):
return self._param3
@param3.setter
def param3(self, value):
self._param3 = value
def work(self, inputs_items, output_items):
return 10
"""
from pprint import pprint
pprint(dict(extract(blk_code)._asdict()))
|
Tools | pythondoc | #! python
# -*- coding: utf-8 -*-
# (c) 2007 Werner Mayer LGPL
# Create HTML documentation from FreeCAD's Python modules and classes.
import os
import pkgutil
import pydoc
import sys
import zipfile
import dircache
def generateDoc():
# Get the path to the FreeCAD module relative to this directory
toolspath = os.path.dirname(__file__)
homepath = toolspath + "/../../"
homepath = os.path.realpath(homepath)
binpath = os.path.join(homepath, "bin")
docpath = os.path.join(homepath, "doc")
modpath = os.path.join(homepath, "Mod")
# Change to the doc directory
cwd = os.getcwd()
print("Change to " + docpath)
os.chdir(homepath)
if os.path.exists("doc") == False:
os.mkdir("doc")
os.chdir("doc")
# Add the bin path to the system path
if os.name == "nt":
os.environ["PATH"] = os.environ["PATH"] + ";" + binpath
else:
os.environ["PATH"] = os.environ["PATH"] + ":" + binpath
# Import FreeCAD module
sys.path.append(binpath)
print("Write documentation for module 'FreeCAD'")
pydoc.writedoc("FreeCAD")
print("")
# Module directory
ModDirs = dircache.listdir(modpath)
# Search for module paths and append them to Python path
# for Dir in ModDirs:
# if (Dir != '__init__.py'):
# sys.path.append( os.path.join(modpath,Dir) )
# Walk through the module paths again and try loading the modules to create HTML files
for Dir in ModDirs:
dest = os.path.join(modpath, Dir)
print("Write documentation for module '" + Dir + "'")
if Dir != "__init__.py":
writedocs(dest)
print("")
# Now we must create a document and create instances of all Python classes which
# cannot be directly created by a module.
# Create a ZIP archive from all HTML files
print("Creating ZIP archive 'docs.zip'...")
zip = zipfile.ZipFile("docs.zip", "w")
for file in os.listdir("."):
if not os.path.isdir(file):
if file.find(".html") > 0:
print(" Adding file " + file + " to archive")
zip.write(file)
print("done.")
zip.close()
# Remove all HTML files
print("Cleaning up HTML files...")
for file in os.listdir("."):
if not os.path.isdir(file):
if file.find(".html") > 0:
print(" Removing " + file)
os.remove(file)
os.chdir(cwd)
print("done.")
def writedocs(dir, pkgpath=""):
"""Write out HTML documentation for all modules in a directory tree."""
for importer, modname, ispkg in pkgutil.walk_packages([dir], pkgpath):
# Ignore all debug modules
if modname[-2:] != "_d":
pydoc.writedoc(modname)
return
if __name__ == "__main__":
generateDoc()
|
extractor | dlf | # coding: utf-8
from __future__ import unicode_literals
import re
from ..compat import compat_str
from ..utils import (
determine_ext,
extract_attributes,
int_or_none,
merge_dicts,
traverse_obj,
url_or_none,
variadic,
)
from .common import InfoExtractor
class DLFBaseIE(InfoExtractor):
_VALID_URL_BASE = r"https?://(?:www\.)?deutschlandfunk\.de/"
_BUTTON_REGEX = r'(<button[^>]+alt="Anhören"[^>]+data-audio-diraid[^>]*>)'
def _parse_button_attrs(self, button, audio_id=None):
attrs = extract_attributes(button)
audio_id = audio_id or attrs["data-audio-diraid"]
url = traverse_obj(
attrs,
"data-audio-download-src",
"data-audio",
"data-audioreference",
"data-audio-src",
expected_type=url_or_none,
)
ext = determine_ext(url)
formats = (
self._extract_m3u8_formats(url, audio_id, fatal=False)
if ext == "m3u8"
else [{"url": url, "ext": ext, "vcodec": "none"}]
)
self._sort_formats(formats)
def traverse_attrs(path):
path = list(variadic(path))
t = path.pop() if callable(path[-1]) else None
return traverse_obj(attrs, path, expected_type=t, get_all=False)
def txt_or_none(v, default=None):
return default if v is None else (compat_str(v).strip() or default)
return merge_dicts(
*reversed(
[
{
"id": audio_id,
# 'extractor_key': DLFIE.ie_key(),
# 'extractor': DLFIE.IE_NAME,
"formats": formats,
},
dict(
(k, traverse_attrs(v))
for k, v in {
"title": (
(
"data-audiotitle",
"data-audio-title",
"data-audio-download-tracking-title",
),
txt_or_none,
),
"duration": (
("data-audioduration", "data-audio-duration"),
int_or_none,
),
"thumbnail": ("data-audioimage", url_or_none),
"uploader": "data-audio-producer",
"series": "data-audio-series",
"channel": "data-audio-origin-site-name",
"webpage_url": (
"data-audio-download-tracking-path",
url_or_none,
),
}.items()
),
]
)
)
class DLFIE(DLFBaseIE):
IE_NAME = "dlf"
_VALID_URL = DLFBaseIE._VALID_URL_BASE + r"[\w-]+-dlf-(?P<id>[\da-f]{8})-100\.html"
_TESTS = [
# Audio as an HLS stream
{
"url": "https://www.deutschlandfunk.de/tanz-der-saiteninstrumente-das-wild-strings-trio-aus-slowenien-dlf-03a3eb19-100.html",
"info_dict": {
"id": "03a3eb19",
"title": r"re:Tanz der Saiteninstrumente [-/] Das Wild Strings Trio aus Slowenien",
"ext": "m4a",
"duration": 3298,
"thumbnail": "https://assets.deutschlandfunk.de/FALLBACK-IMAGE-AUDIO/512x512.png?t=1603714364673",
"uploader": "Deutschlandfunk",
"series": "On Stage",
"channel": "deutschlandfunk",
},
"params": {"skip_download": "m3u8"},
"skip": "This webpage no longer exists",
},
{
"url": "https://www.deutschlandfunk.de/russische-athleten-kehren-zurueck-auf-die-sportbuehne-ein-gefaehrlicher-tueroeffner-dlf-d9cc1856-100.html",
"info_dict": {
"id": "d9cc1856",
"title": "Russische Athleten kehren zurück auf die Sportbühne: Ein gefährlicher Türöffner",
"ext": "mp3",
"duration": 291,
"thumbnail": "https://assets.deutschlandfunk.de/FALLBACK-IMAGE-AUDIO/512x512.png?t=1603714364673",
"uploader": "Deutschlandfunk",
"series": "Kommentare und Themen der Woche",
"channel": "deutschlandfunk",
},
},
]
def _real_extract(self, url):
audio_id = self._match_id(url)
webpage = self._download_webpage(url, audio_id)
return self._parse_button_attrs(
self._search_regex(self._BUTTON_REGEX, webpage, "button"), audio_id
)
class DLFCorpusIE(DLFBaseIE):
IE_NAME = "dlf:corpus"
IE_DESC = "DLF Multi-feed Archives"
_VALID_URL = (
DLFBaseIE._VALID_URL_BASE + r"(?P<id>(?![\w-]+-dlf-[\da-f]{8})[\w-]+-\d+)\.html"
)
_TESTS = [
# Recorded news broadcast with referrals to related broadcasts
{
"url": "https://www.deutschlandfunk.de/fechten-russland-belarus-ukraine-protest-100.html",
"info_dict": {
"id": "fechten-russland-belarus-ukraine-protest-100",
"title": r"re:Wiederzulassung als neutrale Athleten [-/] Was die Rückkehr russischer und belarussischer Sportler beim Fechten bedeutet",
"description": "md5:91340aab29c71aa7518ad5be13d1e8ad",
},
"playlist_mincount": 5,
"playlist": [
{
"info_dict": {
"id": "1fc5d64a",
"title": r"re:Wiederzulassung als neutrale Athleten [-/] Was die Rückkehr russischer und belarussischer Sportler beim Fechten bedeutet",
"ext": "mp3",
"duration": 252,
"thumbnail": "https://assets.deutschlandfunk.de/aad16241-6b76-4a09-958b-96d0ee1d6f57/512x512.jpg?t=1679480020313",
"uploader": "Deutschlandfunk",
"series": "Sport",
"channel": "deutschlandfunk",
}
},
{
"info_dict": {
"id": "2ada145f",
"title": r"re:(?:Sportpolitik / )?Fechtverband votiert für Rückkehr russischer Athleten",
"ext": "mp3",
"duration": 336,
"thumbnail": "https://assets.deutschlandfunk.de/FILE_93982766f7317df30409b8a184ac044a/512x512.jpg?t=1678547581005",
"uploader": "Deutschlandfunk",
"series": "Deutschlandfunk Nova",
"channel": "deutschlandfunk-nova",
}
},
{
"info_dict": {
"id": "5e55e8c9",
"title": r're:Wiederzulassung von Russland und Belarus [-/] "Herumlavieren" des Fechter-Bundes sorgt für Unverständnis',
"ext": "mp3",
"duration": 187,
"thumbnail": "https://assets.deutschlandfunk.de/a595989d-1ed1-4a2e-8370-b64d7f11d757/512x512.jpg?t=1679173825412",
"uploader": "Deutschlandfunk",
"series": "Sport am Samstag",
"channel": "deutschlandfunk",
}
},
{
"info_dict": {
"id": "47e1a096",
"title": r're:Rückkehr Russlands im Fechten [-/] "Fassungslos, dass es einfach so passiert ist"',
"ext": "mp3",
"duration": 602,
"thumbnail": "https://assets.deutschlandfunk.de/da4c494a-21cc-48b4-9cc7-40e09fd442c2/512x512.jpg?t=1678562155770",
"uploader": "Deutschlandfunk",
"series": "Sport am Samstag",
"channel": "deutschlandfunk",
}
},
{
"info_dict": {
"id": "5e55e8c9",
"title": r're:Wiederzulassung von Russland und Belarus [-/] "Herumlavieren" des Fechter-Bundes sorgt für Unverständnis',
"ext": "mp3",
"duration": 187,
"thumbnail": "https://assets.deutschlandfunk.de/a595989d-1ed1-4a2e-8370-b64d7f11d757/512x512.jpg?t=1679173825412",
"uploader": "Deutschlandfunk",
"series": "Sport am Samstag",
"channel": "deutschlandfunk",
}
},
],
},
# Podcast feed with tag buttons, playlist count fluctuates
{
"url": "https://www.deutschlandfunk.de/kommentare-und-themen-der-woche-100.html",
"info_dict": {
"id": "kommentare-und-themen-der-woche-100",
"title": "Meinung - Kommentare und Themen der Woche",
"description": "md5:2901bbd65cd2d45e116d399a099ce5d5",
},
"playlist_mincount": 10,
},
# Podcast feed with no description
{
"url": "https://www.deutschlandfunk.de/podcast-tolle-idee-100.html",
"info_dict": {
"id": "podcast-tolle-idee-100",
"title": "Wissenschaftspodcast - Tolle Idee! - Was wurde daraus?",
},
"playlist_mincount": 11,
},
]
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
return self.playlist_result(
map(self._parse_button_attrs, re.findall(self._BUTTON_REGEX, webpage)),
playlist_id,
self._html_search_meta(
["og:title", "twitter:title"], webpage, default=None
),
self._html_search_meta(
["description", "og:description", "twitter:description"],
webpage,
default=None,
),
)
|
cmd | tag | from __future__ import absolute_import
import sys
from bup import git, options
from bup.compat import argv_bytes
from bup.helpers import debug1, log
from bup.io import byte_stream, path_msg
# FIXME: review for safe writes.
optspec = """
bup tag
bup tag [-f] <tag name> <commit>
bup tag [-f] -d <tag name>
--
d,delete= Delete a tag
f,force Overwrite existing tag, or ignore missing tag when deleting
"""
def main(argv):
o = options.Options(optspec)
opt, flags, extra = o.parse_bytes(argv[1:])
git.check_repo_or_die()
tags = [t for sublist in git.tags().values() for t in sublist]
if opt.delete:
# git.delete_ref() doesn't complain if a ref doesn't exist. We
# could implement this verification but we'd need to read in the
# contents of the tag file and pass the hash, and we already know
# about the tag's existance via "tags".
tag_name = argv_bytes(opt.delete)
if not opt.force and tag_name not in tags:
log("error: tag '%s' doesn't exist\n" % path_msg(tag_name))
sys.exit(1)
tag_file = b"refs/tags/%s" % tag_name
git.delete_ref(tag_file)
sys.exit(0)
if not extra:
for t in tags:
sys.stdout.flush()
out = byte_stream(sys.stdout)
out.write(t)
out.write(b"\n")
sys.exit(0)
elif len(extra) != 2:
o.fatal("expected commit ref and hash")
tag_name, commit = map(argv_bytes, extra[:2])
if not tag_name:
o.fatal("tag name must not be empty.")
debug1(
"args: tag name = %s; commit = %s\n"
% (path_msg(tag_name), commit.decode("ascii"))
)
if tag_name in tags and not opt.force:
log("bup: error: tag '%s' already exists\n" % path_msg(tag_name))
sys.exit(1)
if tag_name.startswith(b"."):
o.fatal("'%s' is not a valid tag name." % path_msg(tag_name))
try:
hash = git.rev_parse(commit)
except git.GitError as e:
log("bup: error: %s" % e)
sys.exit(2)
if not hash:
log("bup: error: commit %s not found.\n" % commit.decode("ascii"))
sys.exit(2)
with git.PackIdxList(git.repo(b"objects/pack")) as pL:
if not pL.exists(hash):
log("bup: error: commit %s not found.\n" % commit.decode("ascii"))
sys.exit(2)
git.update_ref(b"refs/tags/" + tag_name, hash, None, force=True)
|
PyObjCTest | test_nsspellchecker | from AppKit import *
from PyObjCTools.TestSupport import *
try:
unicode
except NameError:
unicode = str
class TestNSSpellChecker(TestCase):
def testMethods(self):
self.assertResultIsBOOL(NSSpellChecker.sharedSpellCheckerExists)
self.assertArgIsBOOL(
NSSpellChecker.checkSpellingOfString_startingAt_language_wrap_inSpellDocumentWithTag_wordCount_,
3,
)
self.assertArgIsOut(
NSSpellChecker.checkSpellingOfString_startingAt_language_wrap_inSpellDocumentWithTag_wordCount_,
5,
)
self.assertResultIsBOOL(NSSpellChecker.setLanguage_)
self.assertArgHasType(
NSSpellChecker.completionsForPartialWordRange_inString_language_inSpellDocumentWithTag_,
0,
NSRange.__typestr__,
)
@min_os_level("10.5")
def testMethods10_5(self):
self.assertArgIsBOOL(
NSSpellChecker.checkGrammarOfString_startingAt_language_wrap_inSpellDocumentWithTag_details_,
3,
)
self.assertArgIsOut(
NSSpellChecker.checkGrammarOfString_startingAt_language_wrap_inSpellDocumentWithTag_details_,
5,
)
self.assertResultIsBOOL(NSSpellChecker.hasLearnedWord_)
@min_os_level("10.6")
def testMethods10_6(self):
self.assertArgHasType(
NSSpellChecker.checkString_range_types_options_inSpellDocumentWithTag_orthography_wordCount_,
1,
NSRange.__typestr__,
)
self.assertArgIsOut(
NSSpellChecker.checkString_range_types_options_inSpellDocumentWithTag_orthography_wordCount_,
5,
)
self.assertArgIsOut(
NSSpellChecker.checkString_range_types_options_inSpellDocumentWithTag_orthography_wordCount_,
6,
)
self.assertArgHasType(
NSSpellChecker.requestCheckingOfString_range_types_options_inSpellDocumentWithTag_completionHandler_,
1,
NSRange.__typestr__,
)
self.assertArgIsBlock(
NSSpellChecker.requestCheckingOfString_range_types_options_inSpellDocumentWithTag_completionHandler_,
5,
b"v" + objc._C_NSInteger + b"@@" + objc._C_NSInteger,
)
self.assertArgHasType(
NSSpellChecker.menuForResult_string_options_atLocation_inView_,
3,
NSPoint.__typestr__,
)
self.assertArgHasType(
NSSpellChecker.guessesForWordRange_inString_language_inSpellDocumentWithTag_,
0,
NSRange.__typestr__,
)
self.assertResultIsBOOL(NSSpellChecker.automaticallyIdentifiesLanguages)
self.assertArgIsBOOL(NSSpellChecker.setAutomaticallyIdentifiesLanguages_, 0)
@min_os_level("10.7")
def testMethods10_7(self):
self.assertArgIsBlock(
NSSpellChecker.showCorrectionIndicatorOfType_primaryString_alternativeStrings_forStringInRect_view_completionHandler_,
5,
b"v@",
)
self.assertResultIsBOOL(NSSpellChecker.isAutomaticTextReplacementEnabled)
self.assertResultIsBOOL(NSSpellChecker.isAutomaticSpellingCorrectionEnabled)
@min_os_level("10.6")
def testConstants10_6(self):
self.assertIsInstance(NSTextCheckingOrthographyKey, unicode)
self.assertIsInstance(NSTextCheckingQuotesKey, unicode)
self.assertIsInstance(NSTextCheckingReplacementsKey, unicode)
self.assertIsInstance(NSTextCheckingReferenceDateKey, unicode)
self.assertIsInstance(NSTextCheckingReferenceTimeZoneKey, unicode)
self.assertIsInstance(NSTextCheckingDocumentURLKey, unicode)
self.assertIsInstance(NSTextCheckingDocumentTitleKey, unicode)
self.assertIsInstance(NSTextCheckingDocumentAuthorKey, unicode)
@min_os_level("10.7")
def testConstants10_7(self):
self.assertIsInstance(NSTextCheckingRegularExpressionsKey, unicode)
self.assertIsInstance(
NSSpellCheckerDidChangeAutomaticSpellingCorrectionNotification, unicode
)
self.assertIsInstance(
NSSpellCheckerDidChangeAutomaticTextReplacementNotification, unicode
)
if __name__ == "__main__":
main()
|
frescobaldi-app | tabbar | # This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2008 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
The tab bar with the documents.
"""
import platform
import app
import document
import documentcontextmenu
import documenticon
import engrave
import icons
import util
from PyQt5.QtCore import QSettings, Qt, QUrl, pyqtSignal
from PyQt5.QtWidgets import QMenu, QTabBar
class TabBar(QTabBar):
"""The tabbar above the editor window."""
currentDocumentChanged = pyqtSignal(document.Document)
def __init__(self, parent=None):
super().__init__(parent)
self.setFocusPolicy(Qt.NoFocus)
self.setMovable(True) # TODO: make configurable
self.setExpanding(False)
self.setUsesScrollButtons(True)
self.setElideMode(Qt.ElideNone)
mainwin = self.window()
self.docs = []
for doc in app.documents:
self.addDocument(doc)
if doc is mainwin.currentDocument():
self.setCurrentDocument(doc)
app.documentCreated.connect(self.addDocument)
app.documentClosed.connect(self.removeDocument)
app.documentUrlChanged.connect(self.setDocumentStatus)
app.documentModificationChanged.connect(self.setDocumentStatus)
app.jobStarted.connect(self.setDocumentStatus)
app.jobFinished.connect(self.setDocumentStatus)
app.settingsChanged.connect(self.readSettings)
engrave.engraver(mainwin).stickyChanged.connect(self.setDocumentStatus)
mainwin.currentDocumentChanged.connect(self.setCurrentDocument)
self.currentChanged.connect(self.slotCurrentChanged)
self.tabMoved.connect(self.slotTabMoved)
self.tabCloseRequested.connect(self.slotTabCloseRequested)
self.readSettings()
style = """
QTabBar::tab {
background: white;
border-style: solid;
border-width: 1px 0px;
border-color: #ACACAC;
min-width: 8ex;
padding: 2px 4px 2px 2px;
}
QTabBar::tab:selected:active {
border-color: #045FFF;
background: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,
stop: 0 #69B1FA, stop: 1 #0C80FF);
color: white;
}
QTabBar::tab:selected:!active {
background: #E5E5E5;
}
QTabBar::tab:first,
QTabBar::tab:only-one {
border-left-width: 1px;
border-top-left-radius: 4px;
border-bottom-left-radius: 4px;
}
QTabBar::tab:!first:!selected:!previous-selected {
border-left-color: #E5E5E5;
border-left-width: 1px;
}
QTabBar::tab:last,
QTabBar::tab:only-one {
border-right-width: 1px;
border-top-right-radius: 4px;
border-bottom-right-radius: 4px;
}
"""
if platform.system() == "Darwin":
self.setStyleSheet(style)
def readSettings(self):
"""Called on init, and when the user changes the settings."""
s = QSettings()
self.setTabsClosable(s.value("tabs_closable", True, bool))
def documents(self):
return list(self.docs)
def addDocument(self, doc):
if doc not in self.docs:
self.docs.append(doc)
self.blockSignals(True)
self.addTab("")
self.blockSignals(False)
self.setDocumentStatus(doc)
def removeDocument(self, doc):
if doc in self.docs:
index = self.docs.index(doc)
self.docs.remove(doc)
self.blockSignals(True)
self.removeTab(index)
self.blockSignals(False)
def setDocumentStatus(self, doc):
if doc in self.docs:
index = self.docs.index(doc)
text = doc.documentName().replace("&", "&&")
if self.tabText(index) != text:
self.setTabText(index, text)
tooltip = util.path(doc.url())
self.setTabToolTip(index, tooltip)
self.setTabIcon(index, documenticon.icon(doc, self.window()))
def setCurrentDocument(self, doc):
"""Raise the tab belonging to this document."""
if doc in self.docs:
index = self.docs.index(doc)
self.blockSignals(True)
self.setCurrentIndex(index)
self.blockSignals(False)
def slotCurrentChanged(self, index):
"""Called when the user clicks a tab."""
self.currentDocumentChanged.emit(self.docs[index])
def slotTabCloseRequested(self, index):
"""Called when the user clicks the close button."""
self.window().closeDocument(self.docs[index])
def slotTabMoved(self, index_from, index_to):
"""Called when the user moved a tab."""
doc = self.docs.pop(index_from)
self.docs.insert(index_to, doc)
def nextDocument(self):
"""Switches to the next document."""
index = self.currentIndex() + 1
if index == self.count():
index = 0
self.setCurrentIndex(index)
def previousDocument(self):
index = self.currentIndex() - 1
if index < 0:
index = self.count() - 1
self.setCurrentIndex(index)
def contextMenuEvent(self, ev):
index = self.tabAt(ev.pos())
if index >= 0:
self.contextMenu().exec_(self.docs[index], ev.globalPos())
def contextMenu(self):
try:
return self._contextMenu
except AttributeError:
import documentcontextmenu
self._contextMenu = documentcontextmenu.DocumentContextMenu(self.window())
return self._contextMenu
|
mako | pygen | # mako/pygen.py
# Copyright (C) 2006-2016 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""utilities for generating and formatting literal Python code."""
import re
from mako import exceptions
class PythonPrinter(object):
def __init__(self, stream):
# indentation counter
self.indent = 0
# a stack storing information about why we incremented
# the indentation counter, to help us determine if we
# should decrement it
self.indent_detail = []
# the string of whitespace multiplied by the indent
# counter to produce a line
self.indentstring = " "
# the stream we are writing to
self.stream = stream
# current line number
self.lineno = 1
# a list of lines that represents a buffered "block" of code,
# which can be later printed relative to an indent level
self.line_buffer = []
self.in_indent_lines = False
self._reset_multi_line_flags()
# mapping of generated python lines to template
# source lines
self.source_map = {}
def _update_lineno(self, num):
self.lineno += num
def start_source(self, lineno):
if self.lineno not in self.source_map:
self.source_map[self.lineno] = lineno
def write_blanks(self, num):
self.stream.write("\n" * num)
self._update_lineno(num)
def write_indented_block(self, block):
"""print a line or lines of python which already contain indentation.
The indentation of the total block of lines will be adjusted to that of
the current indent level."""
self.in_indent_lines = False
for l in re.split(r"\r?\n", block):
self.line_buffer.append(l)
self._update_lineno(1)
def writelines(self, *lines):
"""print a series of lines of python."""
for line in lines:
self.writeline(line)
def writeline(self, line):
"""print a line of python, indenting it according to the current
indent level.
this also adjusts the indentation counter according to the
content of the line.
"""
if not self.in_indent_lines:
self._flush_adjusted_lines()
self.in_indent_lines = True
if line is None or re.match(r"^\s*#", line) or re.match(r"^\s*$", line):
hastext = False
else:
hastext = True
is_comment = line and len(line) and line[0] == "#"
# see if this line should decrease the indentation level
if not is_comment and (not hastext or self._is_unindentor(line)):
if self.indent > 0:
self.indent -= 1
# if the indent_detail stack is empty, the user
# probably put extra closures - the resulting
# module wont compile.
if len(self.indent_detail) == 0:
raise exceptions.SyntaxException("Too many whitespace closures")
self.indent_detail.pop()
if line is None:
return
# write the line
self.stream.write(self._indent_line(line) + "\n")
self._update_lineno(len(line.split("\n")))
# see if this line should increase the indentation level.
# note that a line can both decrase (before printing) and
# then increase (after printing) the indentation level.
if re.search(r":[ \t]*(?:#.*)?$", line):
# increment indentation count, and also
# keep track of what the keyword was that indented us,
# if it is a python compound statement keyword
# where we might have to look for an "unindent" keyword
match = re.match(r"^\s*(if|try|elif|while|for|with)", line)
if match:
# its a "compound" keyword, so we will check for "unindentors"
indentor = match.group(1)
self.indent += 1
self.indent_detail.append(indentor)
else:
indentor = None
# its not a "compound" keyword. but lets also
# test for valid Python keywords that might be indenting us,
# else assume its a non-indenting line
m2 = re.match(r"^\s*(def|class|else|elif|except|finally)", line)
if m2:
self.indent += 1
self.indent_detail.append(indentor)
def close(self):
"""close this printer, flushing any remaining lines."""
self._flush_adjusted_lines()
def _is_unindentor(self, line):
"""return true if the given line is an 'unindentor',
relative to the last 'indent' event received.
"""
# no indentation detail has been pushed on; return False
if len(self.indent_detail) == 0:
return False
indentor = self.indent_detail[-1]
# the last indent keyword we grabbed is not a
# compound statement keyword; return False
if indentor is None:
return False
# if the current line doesnt have one of the "unindentor" keywords,
# return False
match = re.match(r"^\s*(else|elif|except|finally).*\:", line)
if not match:
return False
# whitespace matches up, we have a compound indentor,
# and this line has an unindentor, this
# is probably good enough
return True
# should we decide that its not good enough, heres
# more stuff to check.
# keyword = match.group(1)
# match the original indent keyword
# for crit in [
# (r'if|elif', r'else|elif'),
# (r'try', r'except|finally|else'),
# (r'while|for', r'else'),
# ]:
# if re.match(crit[0], indentor) and re.match(crit[1], keyword):
# return True
# return False
def _indent_line(self, line, stripspace=""):
"""indent the given line according to the current indent level.
stripspace is a string of space that will be truncated from the
start of the line before indenting."""
return re.sub(r"^%s" % stripspace, self.indentstring * self.indent, line)
def _reset_multi_line_flags(self):
"""reset the flags which would indicate we are in a backslashed
or triple-quoted section."""
self.backslashed, self.triplequoted = False, False
def _in_multi_line(self, line):
"""return true if the given line is part of a multi-line block,
via backslash or triple-quote."""
# we are only looking for explicitly joined lines here, not
# implicit ones (i.e. brackets, braces etc.). this is just to
# guard against the possibility of modifying the space inside of
# a literal multiline string with unfortunately placed
# whitespace
current_state = self.backslashed or self.triplequoted
if re.search(r"\\$", line):
self.backslashed = True
else:
self.backslashed = False
triples = len(re.findall(r"\"\"\"|\'\'\'", line))
if triples == 1 or triples % 2 != 0:
self.triplequoted = not self.triplequoted
return current_state
def _flush_adjusted_lines(self):
stripspace = None
self._reset_multi_line_flags()
for entry in self.line_buffer:
if self._in_multi_line(entry):
self.stream.write(entry + "\n")
else:
entry = entry.expandtabs()
if stripspace is None and re.search(r"^[ \t]*[^# \t]", entry):
stripspace = re.match(r"^([ \t]*)", entry).group(1)
self.stream.write(self._indent_line(entry, stripspace) + "\n")
self.line_buffer = []
self._reset_multi_line_flags()
def adjust_whitespace(text):
"""remove the left-whitespace margin of a block of Python code."""
state = [False, False]
(backslashed, triplequoted) = (0, 1)
def in_multi_line(line):
start_state = state[backslashed] or state[triplequoted]
if re.search(r"\\$", line):
state[backslashed] = True
else:
state[backslashed] = False
def match(reg, t):
m = re.match(reg, t)
if m:
return m, t[len(m.group(0)) :]
else:
return None, t
while line:
if state[triplequoted]:
m, line = match(r"%s" % state[triplequoted], line)
if m:
state[triplequoted] = False
else:
m, line = match(r".*?(?=%s|$)" % state[triplequoted], line)
else:
m, line = match(r"#", line)
if m:
return start_state
m, line = match(r"\"\"\"|\'\'\'", line)
if m:
state[triplequoted] = m.group(0)
continue
m, line = match(r".*?(?=\"\"\"|\'\'\'|#|$)", line)
return start_state
def _indent_line(line, stripspace=""):
return re.sub(r"^%s" % stripspace, "", line)
lines = []
stripspace = None
for line in re.split(r"\r?\n", text):
if in_multi_line(line):
lines.append(line)
else:
line = line.expandtabs()
if stripspace is None and re.search(r"^[ \t]*[^# \t]", line):
stripspace = re.match(r"^([ \t]*)", line).group(1)
lines.append(_indent_line(line, stripspace))
return "\n".join(lines)
|
gui | VariableEditor | """
Copyright 2015, 2016 Free Software Foundation, Inc.
This file is part of GNU Radio
SPDX-License-Identifier: GPL-2.0-or-later
"""
from gi.repository import Gdk, GObject, Gtk
from . import Actions, Constants, Utils
BLOCK_INDEX = 0
ID_INDEX = 1
class VariableEditorContextMenu(Gtk.Menu):
"""A simple context menu for our variable editor"""
def __init__(self, var_edit):
Gtk.Menu.__init__(self)
self.imports = Gtk.MenuItem(label="Add _Import")
self.imports.connect("activate", var_edit.handle_action, var_edit.ADD_IMPORT)
self.add(self.imports)
self.variables = Gtk.MenuItem(label="Add _Variable")
self.variables.connect(
"activate", var_edit.handle_action, var_edit.ADD_VARIABLE
)
self.add(self.variables)
self.add(Gtk.SeparatorMenuItem())
self.enable = Gtk.MenuItem(label="_Enable")
self.enable.connect("activate", var_edit.handle_action, var_edit.ENABLE_BLOCK)
self.disable = Gtk.MenuItem(label="_Disable")
self.disable.connect("activate", var_edit.handle_action, var_edit.DISABLE_BLOCK)
self.add(self.enable)
self.add(self.disable)
self.add(Gtk.SeparatorMenuItem())
self.delete = Gtk.MenuItem(label="_Delete")
self.delete.connect("activate", var_edit.handle_action, var_edit.DELETE_BLOCK)
self.add(self.delete)
self.add(Gtk.SeparatorMenuItem())
self.properties = Gtk.MenuItem(label="_Properties...")
self.properties.connect(
"activate", var_edit.handle_action, var_edit.OPEN_PROPERTIES
)
self.add(self.properties)
self.show_all()
def update_sensitive(self, selected, enabled=False):
self.delete.set_sensitive(selected)
self.properties.set_sensitive(selected)
self.enable.set_sensitive(selected and not enabled)
self.disable.set_sensitive(selected and enabled)
class VariableEditor(Gtk.VBox):
# Actions that are handled by the editor
ADD_IMPORT = 0
ADD_VARIABLE = 1
OPEN_PROPERTIES = 2
DELETE_BLOCK = 3
DELETE_CONFIRM = 4
ENABLE_BLOCK = 5
DISABLE_BLOCK = 6
__gsignals__ = {
"create_new_block": (GObject.SignalFlags.RUN_FIRST, None, (str,)),
"remove_block": (GObject.SignalFlags.RUN_FIRST, None, (str,)),
}
def __init__(self):
Gtk.VBox.__init__(self)
config = Gtk.Application.get_default().config
self._block = None
self._mouse_button_pressed = False
self._imports = []
self._variables = []
# Only use the model to store the block reference and name.
# Generate everything else dynamically
self.treestore = Gtk.TreeStore(
GObject.TYPE_PYOBJECT,
GObject.TYPE_STRING, # Block reference
) # Category and block name
self.treeview = Gtk.TreeView(model=self.treestore)
self.treeview.set_enable_search(False)
self.treeview.set_search_column(-1)
# self.treeview.set_enable_search(True)
# self.treeview.set_search_column(ID_INDEX)
self.treeview.get_selection().set_mode(Gtk.SelectionMode.SINGLE)
self.treeview.set_headers_visible(True)
self.treeview.connect("button-press-event", self._handle_mouse_button_press)
self.treeview.connect("button-release-event", self._handle_mouse_button_release)
self.treeview.connect("motion-notify-event", self._handle_motion_notify)
self.treeview.connect("key-press-event", self._handle_key_button_press)
# Block Name or Category
self.id_cell = Gtk.CellRendererText()
self.id_cell.connect("edited", self._handle_name_edited_cb)
id_column = Gtk.TreeViewColumn("ID", self.id_cell, text=ID_INDEX)
id_column.set_name("id")
id_column.set_resizable(True)
id_column.set_max_width(Utils.scale_scalar(300))
id_column.set_min_width(Utils.scale_scalar(80))
id_column.set_fixed_width(Utils.scale_scalar(120))
id_column.set_sizing(Gtk.TreeViewColumnSizing.FIXED)
id_column.set_cell_data_func(self.id_cell, self.set_properties)
self.id_column = id_column
self.treeview.append_column(id_column)
self.treestore.set_sort_column_id(ID_INDEX, Gtk.SortType.ASCENDING)
# For forcing resize
self._col_width = 0
# Block Value
self.value_cell = Gtk.CellRendererText()
self.value_cell.connect("edited", self._handle_value_edited_cb)
value_column = Gtk.TreeViewColumn("Value", self.value_cell)
value_column.set_name("value")
value_column.set_resizable(False)
value_column.set_expand(True)
value_column.set_min_width(Utils.scale_scalar(100))
value_column.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
value_column.set_cell_data_func(self.value_cell, self.set_value)
self.value_column = value_column
self.treeview.append_column(value_column)
# Block Actions (Add, Remove)
self.action_cell = Gtk.CellRendererPixbuf()
value_column.pack_start(self.action_cell, False)
value_column.set_cell_data_func(self.action_cell, self.set_icon)
# Make the scrolled window to hold the tree view
scrolled_window = Gtk.ScrolledWindow()
scrolled_window.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
scrolled_window.add(self.treeview)
scrolled_window.set_size_request(Constants.DEFAULT_BLOCKS_WINDOW_WIDTH, -1)
self.pack_start(scrolled_window, True, True, 0)
# Context menus
self._context_menu = VariableEditorContextMenu(self)
self._confirm_delete = config.variable_editor_confirm_delete()
# Sets cell contents
def set_icon(self, col, cell, model, iter, data):
block = model.get_value(iter, BLOCK_INDEX)
cell.set_property("icon-name", "window-close" if block else "list-add")
def set_value(self, col, cell, model, iter, data):
sp = cell.set_property
block = model.get_value(iter, BLOCK_INDEX)
# Set the default properties for this column first.
# Some set in set_properties() may be overridden (editable for advanced variable blocks)
self.set_properties(col, cell, model, iter, data)
# Set defaults
value = None
self.set_tooltip_text(None)
# Block specific values
if block:
if block.key == "import":
value = block.params["imports"].get_value()
elif block.key != "variable":
value = "<Open Properties>"
sp("editable", False)
sp("foreground", "#0D47A1")
else:
value = block.params["value"].get_value()
# Check if there are errors in the blocks.
# Show the block error as a tooltip
error_message = block.get_error_messages()
if len(error_message) > 0:
# Set the error message to the last error in the list.
# This should be the first message generated
self.set_tooltip_text(error_message[-1])
else:
# Evaluate and show the value (if it is a variable)
if block.is_variable:
value = str(block.evaluate(block.value))
# Always set the text value.
sp("text", value)
def set_properties(self, col, cell, model, iter, data):
sp = cell.set_property
block = model.get_value(iter, BLOCK_INDEX)
# Set defaults
sp("sensitive", True)
sp("editable", False)
sp("foreground", None)
# Block specific changes
if block:
if not block.enabled:
# Disabled block. But, this should still be editable
sp("editable", True)
sp("foreground", "gray")
else:
sp("editable", True)
if block.get_error_messages():
sp("foreground", "red")
def update_gui(self, blocks):
self._imports = [block for block in blocks if block.is_import]
self._variables = [block for block in blocks if block.is_variable]
self._rebuild()
self.treeview.expand_all()
def _rebuild(self, *args):
self.treestore.clear()
imports = self.treestore.append(None, [None, "Imports"])
variables = self.treestore.append(None, [None, "Variables"])
for block in self._imports:
self.treestore.append(imports, [block, block.params["id"].get_value()])
for block in sorted(self._variables, key=lambda v: v.name):
self.treestore.append(variables, [block, block.params["id"].get_value()])
def _handle_name_edited_cb(self, cell, path, new_text):
block = self.treestore[path][BLOCK_INDEX]
block.params["id"].set_value(new_text)
Actions.VARIABLE_EDITOR_UPDATE()
def _handle_value_edited_cb(self, cell, path, new_text):
block = self.treestore[path][BLOCK_INDEX]
if block.is_import:
block.params["import"].set_value(new_text)
else:
block.params["value"].set_value(new_text)
Actions.VARIABLE_EDITOR_UPDATE()
def handle_action(self, item, key, event=None):
"""
Single handler for the different actions that can be triggered by the context menu,
key presses or mouse clicks. Also triggers an update of the flow graph and editor.
"""
if key == self.ADD_IMPORT:
self.emit("create_new_block", "import")
elif key == self.ADD_VARIABLE:
self.emit("create_new_block", "variable")
elif key == self.OPEN_PROPERTIES:
# TODO: This probably isn't working because the action doesn't expect a parameter
# Actions.BLOCK_PARAM_MODIFY()
pass
elif key == self.DELETE_BLOCK:
self.emit("remove_block", self._block.name)
elif key == self.DELETE_CONFIRM:
if self._confirm_delete:
# Create a context menu to confirm the delete operation
confirmation_menu = Gtk.Menu()
block_id = self._block.params["id"].get_value().replace("_", "__")
confirm = Gtk.MenuItem(label="Delete {}".format(block_id))
confirm.connect("activate", self.handle_action, self.DELETE_BLOCK)
confirmation_menu.add(confirm)
confirmation_menu.show_all()
confirmation_menu.popup(
None, None, None, None, event.button, event.time
)
else:
self.handle_action(None, self.DELETE_BLOCK, None)
elif key == self.ENABLE_BLOCK:
self._block.state = "enabled"
elif key == self.DISABLE_BLOCK:
self._block.state = "disabled"
Actions.VARIABLE_EDITOR_UPDATE()
def _handle_mouse_button_press(self, widget, event):
"""
Handles mouse button for several different events:
- Double Click to open properties for advanced blocks
- Click to add/remove blocks
"""
# Save the column width to see if it changes on button_release
self._mouse_button_pressed = True
self._col_width = self.id_column.get_width()
path = widget.get_path_at_pos(int(event.x), int(event.y))
if path:
# If there is a valid path, then get the row, column and block selected.
row = self.treestore[path[0]]
col = path[1]
self._block = row[BLOCK_INDEX]
if event.button == 1 and col.get_name() == "value":
# Make sure this has a block (not the import/variable rows)
if self._block and event.type == Gdk.EventType._2BUTTON_PRESS:
# Open the advanced dialog if it is a gui variable
if self._block.key not in ("variable", "import"):
self.handle_action(None, self.OPEN_PROPERTIES, event=event)
return True
if event.type == Gdk.EventType.BUTTON_PRESS:
# User is adding/removing blocks
# Make sure this is the action cell (Add/Remove Icons)
if path[2] > col.cell_get_position(self.action_cell)[0]:
if row[1] == "Imports":
# Add a new import block.
self.handle_action(None, self.ADD_IMPORT, event=event)
elif row[1] == "Variables":
# Add a new variable block
self.handle_action(None, self.ADD_VARIABLE, event=event)
else:
self.handle_action(None, self.DELETE_CONFIRM, event=event)
return True
elif event.button == 3 and event.type == Gdk.EventType.BUTTON_PRESS:
if self._block:
self._context_menu.update_sensitive(
True, enabled=self._block.enabled
)
else:
self._context_menu.update_sensitive(False)
self._context_menu.popup(
None, None, None, None, event.button, event.time
)
# Null handler. Stops the treeview from handling double click events.
if event.type == Gdk.EventType._2BUTTON_PRESS:
return True
return False
def _handle_mouse_button_release(self, widget, event):
self._mouse_button_pressed = False
return False
def _handle_motion_notify(self, widget, event):
# Check to see if the column size has changed
if self._mouse_button_pressed and self.id_column.get_width() != self._col_width:
self.value_column.queue_resize()
return False
def _handle_key_button_press(self, widget, event):
model, path = self.treeview.get_selection().get_selected_rows()
if path and self._block:
if self._block.enabled and event.string == "d":
self.handle_action(None, self.DISABLE_BLOCK, None)
return True
elif not self._block.enabled and event.string == "e":
self.handle_action(None, self.ENABLE_BLOCK, None)
return True
return False
|
PyObjCTest | test_IKFilterBrowserPanel | from PyObjCTools.TestSupport import *
from Quartz import *
try:
unicode
except NameError:
unicode = str
class TestIKFilterBrowserPanel(TestCase):
@min_os_level("10.5")
def testMethods(self):
self.assertArgIsSEL(
IKFilterBrowserPanel.beginWithOptions_modelessDelegate_didEndSelector_contextInfo_,
2,
b"v@:@" + objc._C_NSInteger + b"^v",
)
self.assertArgIsSEL(
IKFilterBrowserPanel.beginSheetWithOptions_modalForWindow_modalDelegate_didEndSelector_contextInfo_,
3,
b"v@:@" + objc._C_NSInteger + b"^v",
)
@min_os_level("10.5")
def testConstants(self):
self.assertIsInstance(IKFilterBrowserFilterSelectedNotification, unicode)
self.assertIsInstance(IKFilterBrowserFilterDoubleClickNotification, unicode)
self.assertIsInstance(IKFilterBrowserWillPreviewFilterNotification, unicode)
self.assertIsInstance(IKFilterBrowserShowCategories, unicode)
self.assertIsInstance(IKFilterBrowserShowPreview, unicode)
self.assertIsInstance(IKFilterBrowserExcludeCategories, unicode)
self.assertIsInstance(IKFilterBrowserExcludeFilters, unicode)
self.assertIsInstance(IKFilterBrowserDefaultInputImage, unicode)
if __name__ == "__main__":
main()
|
ci | workflow_generate | #!/usr/bin/env python
import shlex
import textwrap
import jinja2
import yaml
class GithubActionsYamlLoader(yaml.SafeLoader):
@staticmethod
def _unsupported(kind, token):
return SyntaxError(
"Github Actions does not support %s:\n%s" % (kind, token.start_mark)
)
def fetch_alias(self):
super().fetch_alias()
raise self._unsupported("aliases", self.tokens[0])
def fetch_anchor(self):
super().fetch_anchor()
raise self._unsupported("anchors", self.tokens[0])
environment = jinja2.Environment(
block_start_string="<%",
block_end_string="%>",
variable_start_string="<@",
variable_end_string="@>",
comment_start_string="<#",
comment_end_string="#>",
lstrip_blocks=True,
trim_blocks=True,
)
with open(".github/workflows/ci/workflow_context.yml") as fp:
context = yaml.load(fp, Loader=yaml.SafeLoader)
with open(".github/workflows/ci/workflow_template.yml") as fp:
template = environment.from_string(fp.read())
for j in context["jobs"]:
base_type = j["type"].split("_")[0]
j["id"] = "%s_%s" % (
base_type,
j["variant"].lower().replace(" ", "_").replace(".", ""),
)
j["name"] = "%s (%s)" % (base_type.capitalize(), j["variant"])
j["needs"] = j.get("needs", [])
j["reqs"] = ["reqs/%s.txt" % r for r in j["reqs"]]
j["cache_extra_deps"] = j.get("cache_extra_deps", [])
if context["skippy_enabled"]:
# Path to the "skip_cache" file.
j["skip_cache_path"] = ".skip_cache_{id}".format(**j)
# Name of the "skip_cache" (name + tree SHA1 = key).
j["skip_cache_name"] = "skip_{id}_py-{python}_{platform}".format(
cache_epoch=context["cache_epoch"], **j
)
shell_definition = []
for k, v in sorted(j.items()):
if isinstance(v, list):
v = "(" + " ".join(map(shlex.quote, v)) + ")"
else:
v = shlex.quote(v)
shell_definition.append("job_%s=%s" % (k, v))
j["shell_definition"] = "; ".join(shell_definition)
# Render template.
workflow = template.render(context)
# Save result.
with open(".github/workflows/ci.yml", "w") as fp:
fp.write(
textwrap.dedent(
"""
#
# DO NOT MODIFY! AUTO-GENERATED FROM:
# .github/workflows/ci/workflow_template.yml
#
"""
).lstrip()
)
fp.write(workflow)
# And try parsing it to check it's valid YAML,
# and ensure anchors/aliases are not used.
GithubActionsYamlLoader(workflow).get_single_data()
|
migrations | 0001_initial | # -*- coding: utf-8 -*-
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("upload", "0002_uploadedpdf_num_pages"),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("papers", "0001_squashed_0059_remove_django_geojson"),
]
operations = [
migrations.CreateModel(
name="DepositRecord",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
("request", models.TextField(null=True, blank=True)),
("identifier", models.CharField(max_length=512, null=True, blank=True)),
("pdf_url", models.URLField(max_length=1024, null=True, blank=True)),
("date", models.DateTimeField(auto_now=True)),
("upload_type", models.FileField(upload_to="deposits")),
(
"file",
models.ForeignKey(
to="upload.UploadedPDF", on_delete=models.CASCADE
),
),
(
"paper",
models.ForeignKey(to="papers.Paper", on_delete=models.CASCADE),
),
(
"user",
models.ForeignKey(
to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE
),
),
],
options={
"db_table": "papers_depositrecord",
},
bases=(models.Model,),
),
]
|
mako | util | # mako/util.py
# Copyright (C) 2006-2016 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import codecs
import collections
import operator
import os
import re
from mako import compat
def update_wrapper(decorated, fn):
decorated.__wrapped__ = fn
decorated.__name__ = fn.__name__
return decorated
class PluginLoader(object):
def __init__(self, group):
self.group = group
self.impls = {}
def load(self, name):
if name in self.impls:
return self.impls[name]()
else:
import pkg_resources
for impl in pkg_resources.iter_entry_points(self.group, name):
self.impls[name] = impl.load
return impl.load()
else:
from mako import exceptions
raise exceptions.RuntimeException(
"Can't load plugin %s %s" % (self.group, name)
)
def register(self, name, modulepath, objname):
def load():
mod = __import__(modulepath)
for token in modulepath.split(".")[1:]:
mod = getattr(mod, token)
return getattr(mod, objname)
self.impls[name] = load
def verify_directory(dir):
"""create and/or verify a filesystem directory."""
tries = 0
while not os.path.exists(dir):
try:
tries += 1
os.makedirs(dir, compat.octal("0775"))
except:
if tries > 5:
raise
def to_list(x, default=None):
if x is None:
return default
if not isinstance(x, (list, tuple)):
return [x]
else:
return x
class memoized_property(object):
"""A read-only @property that is only evaluated once."""
def __init__(self, fget, doc=None):
self.fget = fget
self.__doc__ = doc or fget.__doc__
self.__name__ = fget.__name__
def __get__(self, obj, cls):
if obj is None:
return self
obj.__dict__[self.__name__] = result = self.fget(obj)
return result
class memoized_instancemethod(object):
"""Decorate a method memoize its return value.
Best applied to no-arg methods: memoization is not sensitive to
argument values, and will always return the same value even when
called with different arguments.
"""
def __init__(self, fget, doc=None):
self.fget = fget
self.__doc__ = doc or fget.__doc__
self.__name__ = fget.__name__
def __get__(self, obj, cls):
if obj is None:
return self
def oneshot(*args, **kw):
result = self.fget(obj, *args, **kw)
memo = lambda *a, **kw: result
memo.__name__ = self.__name__
memo.__doc__ = self.__doc__
obj.__dict__[self.__name__] = memo
return result
oneshot.__name__ = self.__name__
oneshot.__doc__ = self.__doc__
return oneshot
class SetLikeDict(dict):
"""a dictionary that has some setlike methods on it"""
def union(self, other):
"""produce a 'union' of this dict and another (at the key level).
values in the second dict take precedence over that of the first"""
x = SetLikeDict(**self)
x.update(other)
return x
class FastEncodingBuffer(object):
"""a very rudimentary buffer that is faster than StringIO,
but doesn't crash on unicode data like cStringIO."""
def __init__(self, encoding=None, errors="strict", as_unicode=False):
self.data = collections.deque()
self.encoding = encoding
if as_unicode:
self.delim = compat.u("")
else:
self.delim = ""
self.as_unicode = as_unicode
self.errors = errors
self.write = self.data.append
def truncate(self):
self.data = collections.deque()
self.write = self.data.append
def getvalue(self):
if self.encoding:
return self.delim.join(self.data).encode(self.encoding, self.errors)
else:
return self.delim.join(self.data)
class LRUCache(dict):
"""A dictionary-like object that stores a limited number of items,
discarding lesser used items periodically.
this is a rewrite of LRUCache from Myghty to use a periodic timestamp-based
paradigm so that synchronization is not really needed. the size management
is inexact.
"""
class _Item(object):
def __init__(self, key, value):
self.key = key
self.value = value
self.timestamp = compat.time_func()
def __repr__(self):
return repr(self.value)
def __init__(self, capacity, threshold=0.5):
self.capacity = capacity
self.threshold = threshold
def __getitem__(self, key):
item = dict.__getitem__(self, key)
item.timestamp = compat.time_func()
return item.value
def values(self):
return [i.value for i in dict.values(self)]
def setdefault(self, key, value):
if key in self:
return self[key]
else:
self[key] = value
return value
def __setitem__(self, key, value):
item = dict.get(self, key)
if item is None:
item = self._Item(key, value)
dict.__setitem__(self, key, item)
else:
item.value = value
self._manage_size()
def _manage_size(self):
while len(self) > self.capacity + self.capacity * self.threshold:
bytime = sorted(
dict.values(self), key=operator.attrgetter("timestamp"), reverse=True
)
for item in bytime[self.capacity :]:
try:
del self[item.key]
except KeyError:
# if we couldn't find a key, most likely some other thread
# broke in on us. loop around and try again
break
# Regexp to match python magic encoding line
_PYTHON_MAGIC_COMMENT_re = re.compile(
r"[ \t\f]* \# .* coding[=:][ \t]*([-\w.]+)", re.VERBOSE
)
def parse_encoding(fp):
"""Deduce the encoding of a Python source file (binary mode) from magic
comment.
It does this in the same way as the `Python interpreter`__
.. __: http://docs.python.org/ref/encodings.html
The ``fp`` argument should be a seekable file object in binary mode.
"""
pos = fp.tell()
fp.seek(0)
try:
line1 = fp.readline()
has_bom = line1.startswith(codecs.BOM_UTF8)
if has_bom:
line1 = line1[len(codecs.BOM_UTF8) :]
m = _PYTHON_MAGIC_COMMENT_re.match(line1.decode("ascii", "ignore"))
if not m:
try:
import parser
parser.suite(line1.decode("ascii", "ignore"))
except (ImportError, SyntaxError):
# Either it's a real syntax error, in which case the source
# is not valid python source, or line2 is a continuation of
# line1, in which case we don't want to scan line2 for a magic
# comment.
pass
else:
line2 = fp.readline()
m = _PYTHON_MAGIC_COMMENT_re.match(line2.decode("ascii", "ignore"))
if has_bom:
if m:
raise SyntaxError(
"python refuses to compile code with both a UTF8"
" byte-order-mark and a magic encoding comment"
)
return "utf_8"
elif m:
return m.group(1)
else:
return None
finally:
fp.seek(pos)
def sorted_dict_repr(d):
"""repr() a dictionary with the keys in order.
Used by the lexer unit test to compare parse trees based on strings.
"""
keys = list(d.keys())
keys.sort()
return "{" + ", ".join(["%r: %r" % (k, d[k]) for k in keys]) + "}"
def restore__ast(_ast):
"""Attempt to restore the required classes to the _ast module if it
appears to be missing them
"""
if hasattr(_ast, "AST"):
return
_ast.PyCF_ONLY_AST = 2 << 9
m = compile(
"""\
def foo(): pass
class Bar(object): pass
if False: pass
baz = 'mako'
1 + 2 - 3 * 4 / 5
6 // 7 % 8 << 9 >> 10
11 & 12 ^ 13 | 14
15 and 16 or 17
-baz + (not +18) - ~17
baz and 'foo' or 'bar'
(mako is baz == baz) is not baz != mako
mako > baz < mako >= baz <= mako
mako in baz not in mako""",
"<unknown>",
"exec",
_ast.PyCF_ONLY_AST,
)
_ast.Module = type(m)
for cls in _ast.Module.__mro__:
if cls.__name__ == "mod":
_ast.mod = cls
elif cls.__name__ == "AST":
_ast.AST = cls
_ast.FunctionDef = type(m.body[0])
_ast.ClassDef = type(m.body[1])
_ast.If = type(m.body[2])
_ast.Name = type(m.body[3].targets[0])
_ast.Store = type(m.body[3].targets[0].ctx)
_ast.Str = type(m.body[3].value)
_ast.Sub = type(m.body[4].value.op)
_ast.Add = type(m.body[4].value.left.op)
_ast.Div = type(m.body[4].value.right.op)
_ast.Mult = type(m.body[4].value.right.left.op)
_ast.RShift = type(m.body[5].value.op)
_ast.LShift = type(m.body[5].value.left.op)
_ast.Mod = type(m.body[5].value.left.left.op)
_ast.FloorDiv = type(m.body[5].value.left.left.left.op)
_ast.BitOr = type(m.body[6].value.op)
_ast.BitXor = type(m.body[6].value.left.op)
_ast.BitAnd = type(m.body[6].value.left.left.op)
_ast.Or = type(m.body[7].value.op)
_ast.And = type(m.body[7].value.values[0].op)
_ast.Invert = type(m.body[8].value.right.op)
_ast.Not = type(m.body[8].value.left.right.op)
_ast.UAdd = type(m.body[8].value.left.right.operand.op)
_ast.USub = type(m.body[8].value.left.left.op)
_ast.Or = type(m.body[9].value.op)
_ast.And = type(m.body[9].value.values[0].op)
_ast.IsNot = type(m.body[10].value.ops[0])
_ast.NotEq = type(m.body[10].value.ops[1])
_ast.Is = type(m.body[10].value.left.ops[0])
_ast.Eq = type(m.body[10].value.left.ops[1])
_ast.Gt = type(m.body[11].value.ops[0])
_ast.Lt = type(m.body[11].value.ops[1])
_ast.GtE = type(m.body[11].value.ops[2])
_ast.LtE = type(m.body[11].value.ops[3])
_ast.In = type(m.body[12].value.ops[0])
_ast.NotIn = type(m.body[12].value.ops[1])
def read_file(path, mode="rb"):
fp = open(path, mode)
try:
data = fp.read()
return data
finally:
fp.close()
def read_python_file(path):
fp = open(path, "rb")
try:
encoding = parse_encoding(fp)
data = fp.read()
if encoding:
data = data.decode(encoding)
return data
finally:
fp.close()
|
views | add_to_timeline_treeview | """
@file
@brief This file contains the add to timeline file treeview
@author Jonathan Thomas <jonathan@openshot.org>
@section LICENSE
Copyright (c) 2008-2018 OpenShot Studios, LLC
(http://www.openshotstudios.com). This file is part of
OpenShot Video Editor (http://www.openshot.org), an open-source project
dedicated to delivering high quality video editing and animation solutions
to the world.
OpenShot Video Editor is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenShot Video Editor is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OpenShot Library. If not, see <http://www.gnu.org/licenses/>.
"""
from classes import info
from classes.app import get_app
from PyQt5.QtCore import QSize
from PyQt5.QtWidgets import QAbstractItemView, QTreeView
from windows.models.add_to_timeline_model import TimelineModel
class TimelineTreeView(QTreeView):
"""A TreeView QWidget used on the add to timeline window"""
def currentChanged(self, selected, deselected):
# Get selected item
self.selected = selected
self.deselected = deselected
# Get translation object
_ = self.app._tr
def contextMenuEvent(self, event):
# # Ignore event, propagate to parent
event.ignore()
def mousePressEvent(self, event):
# Ignore event, propagate to parent
event.ignore()
super().mousePressEvent(event)
def refresh_view(self):
self.timeline_model.update_model()
self.hideColumn(2)
def __init__(self, *args):
# Invoke parent init
QTreeView.__init__(self, *args)
# Get a reference to the window object
self.app = get_app()
self.win = args[0]
# Get Model data
self.timeline_model = TimelineModel()
# Keep track of mouse press start position to determine when to start drag
self.selected = None
self.deselected = None
# Setup header columns
self.setModel(self.timeline_model.model)
self.setIconSize(info.TREE_ICON_SIZE)
self.setIndentation(0)
self.setSelectionBehavior(QTreeView.SelectRows)
self.setSelectionBehavior(QAbstractItemView.SelectRows)
self.setWordWrap(True)
self.setStyleSheet("QTreeView::item { padding-top: 2px; }")
# Refresh view
self.refresh_view()
|
docbrowser | sourceviewer | # This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2008 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
A dialog to view LilyPond source.
"""
import app
import highlighter
import qutil
import textformats
from PyQt5.QtCore import QSettings, QSize, Qt
from PyQt5.QtWidgets import QDialog, QLabel, QSizePolicy, QTextBrowser, QVBoxLayout
class SourceViewer(QDialog):
def __init__(self, browser):
super().__init__(browser.parentWidget())
layout = QVBoxLayout()
layout.setContentsMargins(4, 4, 4, 4)
self.setLayout(layout)
self.urlLabel = QLabel(wordWrap=True)
layout.addWidget(self.urlLabel)
self.textbrowser = QTextBrowser()
layout.addWidget(self.textbrowser)
self.urlLabel.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
self.textbrowser.setLineWrapMode(QTextBrowser.NoWrap)
app.settingsChanged.connect(self.readSettings)
self.readSettings()
app.translateUI(self)
qutil.saveDialogSize(self, "helpbrowser/sourceviewer/size", QSize(400, 300))
def translateUI(self):
self.setWindowTitle(app.caption(_("LilyPond Source")))
def readSettings(self):
data = textformats.formatData("editor")
self.textbrowser.setPalette(data.palette())
self.textbrowser.setFont(data.font)
highlighter.highlight(self.textbrowser.document())
def showReply(self, reply):
reply.setParent(self)
self.urlLabel.setText(reply.url().toString())
self._reply = reply
reply.finished.connect(self.loadingFinished)
self.textbrowser.clear()
self.show()
def loadingFinished(self):
data = self._reply.readAll()
self._reply.close()
self._reply.deleteLater()
del self._reply
self.textbrowser.clear()
self.textbrowser.setText(str(data, "utf-8", "replace"))
highlighter.highlight(self.textbrowser.document())
|
Path | InitGui | # ***************************************************************************
# * Copyright (c) 2014 Yorik van Havre <yorik@uncreated.net> *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * FreeCAD is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Lesser General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with FreeCAD; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
class PathCommandGroup:
def __init__(self, cmdlist, menu, tooltip=None):
self.cmdlist = cmdlist
self.menu = menu
if tooltip is None:
self.tooltip = menu
else:
self.tooltip = tooltip
def GetCommands(self):
return tuple(self.cmdlist)
def GetResources(self):
return {"MenuText": self.menu, "ToolTip": self.tooltip}
def IsActive(self):
if FreeCAD.ActiveDocument is not None:
for o in FreeCAD.ActiveDocument.Objects:
if o.Name[:3] == "Job":
return True
return False
class PathWorkbench(Workbench):
"Path workbench"
def __init__(self):
self.__class__.Icon = (
FreeCAD.getResourceDir() + "Mod/Path/Resources/icons/PathWorkbench.svg"
)
self.__class__.MenuText = "Path"
self.__class__.ToolTip = "Path workbench"
def Initialize(self):
global PathCommandGroup
# Add preferences pages - before loading PathGui to properly order pages of Path group
import Path.Dressup.Gui.Preferences as PathPreferencesPathDressup
import Path.Main.Gui.PreferencesJob as PathPreferencesPathJob
translate = FreeCAD.Qt.translate
# load the builtin modules
import Path
import PathGui
import PathScripts
from PySide import QtCore, QtGui
FreeCADGui.addLanguagePath(":/translations")
FreeCADGui.addIconPath(":/icons")
import subprocess
import Path.GuiInit
import PathCommands
from packaging.version import Version, parse
from Path.Main.Gui import JobCmd as PathJobCmd
from Path.Tool.Gui import BitCmd as PathToolBitCmd
from Path.Tool.Gui import BitLibraryCmd as PathToolBitLibraryCmd
from PySide.QtCore import QT_TRANSLATE_NOOP
FreeCADGui.addPreferencePage(
PathPreferencesPathJob.JobPreferencesPage,
QT_TRANSLATE_NOOP("QObject", "Path"),
)
FreeCADGui.addPreferencePage(
PathPreferencesPathDressup.DressupPreferencesPage,
QT_TRANSLATE_NOOP("QObject", "Path"),
)
Path.GuiInit.Startup()
# build commands list
projcmdlist = ["Path_Job", "Path_Post", "Path_Sanity"]
toolcmdlist = [
"Path_Inspect",
"Path_Simulator",
"Path_SelectLoop",
"Path_OpActiveToggle",
]
prepcmdlist = [
"Path_Fixture",
"Path_Comment",
"Path_Stop",
"Path_Custom",
"Path_Probe",
]
twodopcmdlist = [
"Path_Profile",
"Path_Pocket_Shape",
"Path_Drilling",
"Path_MillFace",
"Path_Helix",
"Path_Adaptive",
]
threedopcmdlist = ["Path_Pocket3D"]
engravecmdlist = ["Path_Engrave", "Path_Deburr", "Path_Vcarve"]
modcmdlist = ["Path_OperationCopy", "Path_Array", "Path_SimpleCopy"]
dressupcmdlist = [
"Path_DressupAxisMap",
"Path_DressupPathBoundary",
"Path_DressupDogbone",
"Path_DressupDragKnife",
"Path_DressupLeadInOut",
"Path_DressupRampEntry",
"Path_DressupTag",
"Path_DressupZCorrect",
]
extracmdlist = []
# modcmdmore = ["Path_Hop",]
# remotecmdlist = ["Path_Remote"]
specialcmdlist = []
toolcmdlist.extend(PathToolBitLibraryCmd.BarList)
toolbitcmdlist = PathToolBitLibraryCmd.MenuList
engravecmdgroup = ["Path_EngraveTools"]
FreeCADGui.addCommand(
"Path_EngraveTools",
PathCommandGroup(
engravecmdlist,
QT_TRANSLATE_NOOP("Path_EngraveTools", "Engraving Operations"),
),
)
threedcmdgroup = threedopcmdlist
if Path.Preferences.experimentalFeaturesEnabled():
prepcmdlist.append("Path_Shape")
extracmdlist.extend(["Path_Area", "Path_Area_Workplane"])
specialcmdlist.append("Path_ThreadMilling")
twodopcmdlist.append("Path_Slot")
if Path.Preferences.advancedOCLFeaturesEnabled():
try:
r = subprocess.run(
["camotics", "--version"], capture_output=True, text=True
).stderr.strip()
v = parse(r)
if v >= Version("1.2.2"):
toolcmdlist.append("Path_Camotics")
except (FileNotFoundError, ModuleNotFoundError):
pass
try:
try:
import ocl # pylint: disable=unused-variable
except ImportError:
import opencamlib as ocl
from Path.Op.Gui import Surface, Waterline
threedopcmdlist.extend(["Path_Surface", "Path_Waterline"])
threedcmdgroup = ["Path_3dTools"]
FreeCADGui.addCommand(
"Path_3dTools",
PathCommandGroup(
threedopcmdlist,
QT_TRANSLATE_NOOP("Path_3dTools", "3D Operations"),
),
)
except ImportError:
if not Path.Preferences.suppressOpenCamLibWarning():
FreeCAD.Console.PrintError("OpenCamLib is not working!\n")
self.appendToolbar(QT_TRANSLATE_NOOP("Workbench", "Project Setup"), projcmdlist)
self.appendToolbar(QT_TRANSLATE_NOOP("Workbench", "Tool Commands"), toolcmdlist)
self.appendToolbar(
QT_TRANSLATE_NOOP("Workbench", "New Operations"),
twodopcmdlist + engravecmdgroup + threedcmdgroup,
)
self.appendToolbar(
QT_TRANSLATE_NOOP("Workbench", "Path Modification"), modcmdlist
)
if extracmdlist:
self.appendToolbar(
QT_TRANSLATE_NOOP("Workbench", "Helpful Tools"), extracmdlist
)
self.appendMenu(
[QT_TRANSLATE_NOOP("Workbench", "&Path")],
projcmdlist
+ ["Path_ExportTemplate", "Separator"]
+ toolcmdlist
+ toolbitcmdlist
+ ["Separator"]
+ twodopcmdlist
+ engravecmdlist
+ ["Separator"]
+ threedopcmdlist
+ ["Separator"],
)
self.appendMenu(
[
QT_TRANSLATE_NOOP("Workbench", "&Path"),
QT_TRANSLATE_NOOP("Workbench", "Path Dressup"),
],
dressupcmdlist,
)
self.appendMenu(
[
QT_TRANSLATE_NOOP("Workbench", "&Path"),
QT_TRANSLATE_NOOP("Workbench", "Supplemental Commands"),
],
prepcmdlist,
)
self.appendMenu(
[
QT_TRANSLATE_NOOP("Workbench", "&Path"),
QT_TRANSLATE_NOOP("Workbench", "Path Modification"),
],
modcmdlist,
)
if specialcmdlist:
self.appendMenu(
[
QT_TRANSLATE_NOOP("Workbench", "&Path"),
QT_TRANSLATE_NOOP("Workbench", "Specialty Operations"),
],
specialcmdlist,
)
if extracmdlist:
self.appendMenu([QT_TRANSLATE_NOOP("Workbench", "&Path")], extracmdlist)
self.appendMenu([QT_TRANSLATE_NOOP("Workbench", "&Path")], ["Separator"])
self.appendMenu(
[
QT_TRANSLATE_NOOP("Workbench", "&Path"),
QT_TRANSLATE_NOOP("Workbench", "Utils"),
],
["Path_PropertyBag"],
)
self.dressupcmds = dressupcmdlist
curveAccuracy = Path.Preferences.defaultLibAreaCurveAccuracy()
if curveAccuracy:
Path.Area.setDefaultParams(Accuracy=curveAccuracy)
# keep this one the last entry in the preferences
import Path.Base.Gui.PreferencesAdvanced as PathPreferencesAdvanced
from Path.Preferences import preferences
FreeCADGui.addPreferencePage(
PathPreferencesAdvanced.AdvancedPreferencesPage,
QT_TRANSLATE_NOOP("QObject", "Path"),
)
Log("Loading Path workbench... done\n")
# Warn user if current schema doesn't use minute for time in velocity
if not Path.Preferences.suppressVelocity():
velString = FreeCAD.Units.Quantity(
1, FreeCAD.Units.Velocity
).getUserPreferred()[2][3:]
if velString != "min":
current_schema = FreeCAD.Units.listSchemas(FreeCAD.Units.getSchema())
msg = translate(
"Path",
"The currently selected unit schema: \n '{}'\n Does not use 'minutes' for velocity values. \n \nCNC machines require feed rate to be expressed in \nunit/minute. To ensure correct G-code: \nSelect a minute-based schema in preferences.\nFor example:\n 'Metric, Small Parts & CNC'\n 'US Customary'\n 'Imperial Decimal'",
).format(current_schema)
header = translate("Path", "Warning")
msgbox = QtGui.QMessageBox(QtGui.QMessageBox.Warning, header, msg)
msgbox.addButton(translate("Path", "Ok"), QtGui.QMessageBox.AcceptRole)
msgbox.addButton(
translate("Path", "Don't Show This Anymore"),
QtGui.QMessageBox.ActionRole,
)
if msgbox.exec_() == 1:
preferences().SetBool("WarningSuppressVelocity", True)
def GetClassName(self):
return "Gui::PythonWorkbench"
def Activated(self):
# update the translation engine
FreeCADGui.updateLocale()
# Msg("Path workbench activated\n")
def Deactivated(self):
# Msg("Path workbench deactivated\n")
pass
def ContextMenu(self, recipient):
import PathScripts
menuAppended = False
if len(FreeCADGui.Selection.getSelection()) == 1:
obj = FreeCADGui.Selection.getSelection()[0]
if obj.isDerivedFrom("Path::Feature"):
self.appendContextMenu("", "Separator")
self.appendContextMenu("", ["Path_Inspect"])
selectedName = obj.Name
if "Remote" in selectedName:
self.appendContextMenu("", ["Refresh_Path"])
if "Job" in selectedName:
self.appendContextMenu(
"", ["Path_ExportTemplate"] + self.toolbitctxmenu
)
menuAppended = True
if isinstance(obj.Proxy, Path.Op.Base.ObjectOp):
self.appendContextMenu(
"", ["Path_OperationCopy", "Path_OpActiveToggle"]
)
menuAppended = True
if obj.isDerivedFrom("Path::Feature"):
if (
"Profile" in selectedName
or "Contour" in selectedName
or "Dressup" in selectedName
):
self.appendContextMenu("", "Separator")
# self.appendContextMenu("", ["Set_StartPoint"])
# self.appendContextMenu("", ["Set_EndPoint"])
for cmd in self.dressupcmds:
self.appendContextMenu("", [cmd])
menuAppended = True
if isinstance(obj.Proxy, Path.Tool.Bit.ToolBit):
self.appendContextMenu("", ["Path_ToolBitSave", "Path_ToolBitSaveAs"])
menuAppended = True
if menuAppended:
self.appendContextMenu("", "Separator")
Gui.addWorkbench(PathWorkbench())
FreeCAD.addImportType("GCode (*.nc *.gc *.ncc *.ngc *.cnc *.tap *.gcode)", "PathGui")
|
gui | external_editor | """
Copyright 2015 Free Software Foundation, Inc.
This file is part of GNU Radio
SPDX-License-Identifier: GPL-2.0-or-later
"""
import os
import subprocess
import sys
import tempfile
import threading
import time
class ExternalEditor(threading.Thread):
def __init__(self, editor, name, value, callback):
threading.Thread.__init__(self)
self.daemon = True
self._stop_event = threading.Event()
self.editor = editor
self.callback = callback
self.filename = self._create_tempfile(name, value)
def _create_tempfile(self, name, value):
with tempfile.NamedTemporaryFile(
mode="wb",
prefix=name + "_",
suffix=".py",
delete=False,
) as fp:
fp.write(value.encode("utf-8"))
return fp.name
def open_editor(self):
proc = subprocess.Popen(args=(self.editor, self.filename))
proc.poll()
return proc
def stop(self):
self._stop_event.set()
def run(self):
filename = self.filename
# print "file monitor: started for", filename
last_change = os.path.getmtime(filename)
try:
while not self._stop_event.is_set():
mtime = os.path.getmtime(filename)
if mtime > last_change:
# print "file monitor: reload trigger for", filename
last_change = mtime
with open(filename, "rb") as fp:
data = fp.read().decode("utf-8")
self.callback(data)
time.sleep(1)
except Exception as e:
print("file monitor crashed:", str(e), file=sys.stderr)
finally:
try:
os.remove(self.filename)
except OSError:
pass
if __name__ == "__main__":
e = ExternalEditor("/usr/bin/gedit", "test", "content", print)
e.open_editor()
e.start()
time.sleep(15)
e.stop()
e.join()
|
utils | pystone | #!/usr/bin/python3 -OO
"""
"PYSTONE" Benchmark Program
Version: Python/1.1 (corresponds to C/1.1 plus 2 Pystone fixes)
Author: Reinhold P. Weicker, CACM Vol 27, No 10, 10/84 pg. 1013.
Translated from ADA to C by Rick Richardson.
Every method to preserve ADA-likeness has been used,
at the expense of C-ness.
Translated from C to Python by Guido van Rossum.
Version History:
Version 1.1 corrects two bugs in version 1.0:
First, it leaked memory: in Proc1(), NextRecord ends
up having a pointer to itself. I have corrected this
by zapping NextRecord.PtrComp at the end of Proc1().
Second, Proc3() used the operator != to compare a
record to None. This is rather inefficient and not
true to the intention of the original benchmark (where
a pointer comparison to None is intended; the !=
operator attempts to find a method __cmp__ to do value
comparison of the record). Version 1.1 runs 5-10
percent faster than version 1.0, so benchmark figures
of different versions can't be compared directly.
"""
LOOPS = 50000
from time import perf_counter
__version__ = "1.1"
[Ident1, Ident2, Ident3, Ident4, Ident5] = range(1, 6)
class Record:
def __init__(self, PtrComp=None, Discr=0, EnumComp=0, IntComp=0, StringComp=0):
self.PtrComp = PtrComp
self.Discr = Discr
self.EnumComp = EnumComp
self.IntComp = IntComp
self.StringComp = StringComp
def copy(self):
return Record(
self.PtrComp, self.Discr, self.EnumComp, self.IntComp, self.StringComp
)
TRUE = 1
FALSE = 0
def main(loops=LOOPS):
benchtime, stones = pystones(loops)
print("Pystone(%s) time for %d passes = %g" % (__version__, loops, benchtime))
print("This machine benchmarks at %g pystones/second" % stones)
def pystones(loops=LOOPS):
return Proc0(loops)
IntGlob = 0
BoolGlob = FALSE
Char1Glob = "\0"
Char2Glob = "\0"
Array1Glob = [0] * 51
Array2Glob = [x[:] for x in [Array1Glob] * 51]
PtrGlb = None
PtrGlbNext = None
def Proc0(loops=LOOPS):
global IntGlob
global BoolGlob
global Char1Glob
global Char2Glob
global Array1Glob
global Array2Glob
global PtrGlb
global PtrGlbNext
starttime = perf_counter()
for i in range(loops):
pass
nulltime = perf_counter() - starttime
PtrGlbNext = Record()
PtrGlb = Record()
PtrGlb.PtrComp = PtrGlbNext
PtrGlb.Discr = Ident1
PtrGlb.EnumComp = Ident3
PtrGlb.IntComp = 40
PtrGlb.StringComp = "DHRYSTONE PROGRAM, SOME STRING"
String1Loc = "DHRYSTONE PROGRAM, 1'ST STRING"
Array2Glob[8][7] = 10
starttime = perf_counter()
for i in range(loops):
Proc5()
Proc4()
IntLoc1 = 2
IntLoc2 = 3
String2Loc = "DHRYSTONE PROGRAM, 2'ND STRING"
EnumLoc = Ident2
BoolGlob = not Func2(String1Loc, String2Loc)
while IntLoc1 < IntLoc2:
IntLoc3 = 5 * IntLoc1 - IntLoc2
IntLoc3 = Proc7(IntLoc1, IntLoc2)
IntLoc1 = IntLoc1 + 1
Proc8(Array1Glob, Array2Glob, IntLoc1, IntLoc3)
PtrGlb = Proc1(PtrGlb)
CharIndex = "A"
while CharIndex <= Char2Glob:
if EnumLoc == Func1(CharIndex, "C"):
EnumLoc = Proc6(Ident1)
CharIndex = chr(ord(CharIndex) + 1)
IntLoc3 = IntLoc2 * IntLoc1
IntLoc2 = IntLoc3 / IntLoc1
IntLoc2 = 7 * (IntLoc3 - IntLoc2) - IntLoc1
IntLoc1 = Proc2(IntLoc1)
benchtime = perf_counter() - starttime - nulltime
if benchtime == 0.0:
loopsPerBenchtime = 0.0
else:
loopsPerBenchtime = loops / benchtime
return benchtime, loopsPerBenchtime
def Proc1(PtrParIn):
PtrParIn.PtrComp = NextRecord = PtrGlb.copy()
PtrParIn.IntComp = 5
NextRecord.IntComp = PtrParIn.IntComp
NextRecord.PtrComp = PtrParIn.PtrComp
NextRecord.PtrComp = Proc3(NextRecord.PtrComp)
if NextRecord.Discr == Ident1:
NextRecord.IntComp = 6
NextRecord.EnumComp = Proc6(PtrParIn.EnumComp)
NextRecord.PtrComp = PtrGlb.PtrComp
NextRecord.IntComp = Proc7(NextRecord.IntComp, 10)
else:
PtrParIn = NextRecord.copy()
NextRecord.PtrComp = None
return PtrParIn
def Proc2(IntParIO):
IntLoc = IntParIO + 10
while 1:
if Char1Glob == "A":
IntLoc = IntLoc - 1
IntParIO = IntLoc - IntGlob
EnumLoc = Ident1
if EnumLoc == Ident1:
break
return IntParIO
def Proc3(PtrParOut):
global IntGlob
if PtrGlb is not None:
PtrParOut = PtrGlb.PtrComp
else:
IntGlob = 100
PtrGlb.IntComp = Proc7(10, IntGlob)
return PtrParOut
def Proc4():
global Char2Glob
BoolLoc = Char1Glob == "A"
BoolLoc = BoolLoc or BoolGlob
Char2Glob = "B"
def Proc5():
global Char1Glob
global BoolGlob
Char1Glob = "A"
BoolGlob = FALSE
def Proc6(EnumParIn):
EnumParOut = EnumParIn
if not Func3(EnumParIn):
EnumParOut = Ident4
if EnumParIn == Ident1:
EnumParOut = Ident1
elif EnumParIn == Ident2:
if IntGlob > 100:
EnumParOut = Ident1
else:
EnumParOut = Ident4
elif EnumParIn == Ident3:
EnumParOut = Ident2
elif EnumParIn == Ident4:
pass
elif EnumParIn == Ident5:
EnumParOut = Ident3
return EnumParOut
def Proc7(IntParI1, IntParI2):
IntLoc = IntParI1 + 2
IntParOut = IntParI2 + IntLoc
return IntParOut
def Proc8(Array1Par, Array2Par, IntParI1, IntParI2):
global IntGlob
IntLoc = IntParI1 + 5
Array1Par[IntLoc] = IntParI2
Array1Par[IntLoc + 1] = Array1Par[IntLoc]
Array1Par[IntLoc + 30] = IntLoc
for IntIndex in range(IntLoc, IntLoc + 2):
Array2Par[IntLoc][IntIndex] = IntLoc
Array2Par[IntLoc][IntLoc - 1] = Array2Par[IntLoc][IntLoc - 1] + 1
Array2Par[IntLoc + 20][IntLoc] = Array1Par[IntLoc]
IntGlob = 5
def Func1(CharPar1, CharPar2):
CharLoc1 = CharPar1
CharLoc2 = CharLoc1
if CharLoc2 != CharPar2:
return Ident1
else:
return Ident2
def Func2(StrParI1, StrParI2):
IntLoc = 1
while IntLoc <= 1:
if Func1(StrParI1[IntLoc], StrParI2[IntLoc + 1]) == Ident1:
CharLoc = "A"
IntLoc = IntLoc + 1
if "W" <= CharLoc <= "Z":
IntLoc = 7
if CharLoc == "X":
return TRUE
else:
if StrParI1 > StrParI2:
IntLoc = IntLoc + 7
return TRUE
else:
return FALSE
def Func3(EnumParIn):
EnumLoc = EnumParIn
if EnumLoc == Ident3:
return TRUE
return FALSE
if __name__ == "__main__":
import sys
def error(msg):
print(msg, end=" ", file=sys.stderr)
print("usage: %s [number_of_loops]" % sys.argv[0], file=sys.stderr)
sys.exit(100)
nargs = len(sys.argv) - 1
if nargs > 1:
error("%d arguments are too many;" % nargs)
elif nargs == 1:
try:
loops = int(sys.argv[1])
except ValueError:
error("Invalid argument %r;" % sys.argv[1])
else:
loops = LOOPS
main(loops)
|
torrentlist | queue_mode | #
# Copyright (C) 2011 Nick Lanham <nick@afternight.org>
#
# This file is part of Deluge and is licensed under GNU General Public License 3.0, or later, with
# the additional special exception to link portions of this program with the OpenSSL library.
# See LICENSE for more details.
#
from deluge.ui.client import client
from deluge.ui.console.utils import curses_util as util
from deluge.ui.console.widgets.popup import MessagePopup, SelectablePopup
from . import ACTION
try:
import curses
except ImportError:
pass
key_to_action = {
curses.KEY_HOME: ACTION.QUEUE_TOP,
curses.KEY_UP: ACTION.QUEUE_UP,
curses.KEY_DOWN: ACTION.QUEUE_DOWN,
curses.KEY_END: ACTION.QUEUE_BOTTOM,
}
QUEUE_MODE_HELP_STR = """
Change queue position of selected torrents
{!info!}'+'{!normal!} - {|indent_pos:|}Move up
{!info!}'-'{!normal!} - {|indent_pos:|}Move down
{!info!}'Home'{!normal!} - {|indent_pos:|}Move to top
{!info!}'End'{!normal!} - {|indent_pos:|}Move to bottom
"""
class QueueMode:
def __init__(self, torrentslist, torrent_ids):
self.torrentslist = torrentslist
self.torrentview = torrentslist.torrentview
self.torrent_ids = torrent_ids
def set_statusbar_args(self, statusbar_args):
statusbar_args[
"bottombar"
] = "{!black,white!}Queue mode: change queue position of selected torrents."
statusbar_args["bottombar_help"] = " Press [h] for help"
def update_cursor(self):
pass
def update_colors(self, tidx, colors):
pass
def handle_read(self, c):
if c in [util.KEY_ESC, util.KEY_BELL]: # If Escape key or CTRL-g, we abort
self.torrentslist.set_minor_mode(None)
elif c == ord("h"):
popup = MessagePopup(
self.torrentslist,
"Help",
QUEUE_MODE_HELP_STR,
width_req=0.65,
border_off_west=1,
)
self.torrentslist.push_popup(popup, clear=True)
elif c in [
curses.KEY_UP,
curses.KEY_DOWN,
curses.KEY_HOME,
curses.KEY_END,
curses.KEY_NPAGE,
curses.KEY_PPAGE,
]:
action = key_to_action[c]
self.do_queue(action)
def move_selection(self, cb_arg, qact):
if self.torrentslist.config["torrentview"]["move_selection"] is False:
return
queue_length = 0
selected_num = 0
for tid in self.torrentview.curstate:
tq = self.torrentview.curstate[tid]["queue"]
if tq != -1:
queue_length += 1
if tq in self.torrentview.marked:
selected_num += 1
if qact == ACTION.QUEUE_TOP:
if self.torrentview.marked:
self.torrentview.cursel = 1 + sorted(self.torrentview.marked).index(
self.torrentview.cursel
)
else:
self.torrentview.cursel = 1
self.torrentview.marked = list(range(1, selected_num + 1))
elif qact == ACTION.QUEUE_UP:
self.torrentview.cursel = max(1, self.torrentview.cursel - 1)
self.torrentview.marked = [marked - 1 for marked in self.torrentview.marked]
self.torrentview.marked = [
marked for marked in self.torrentview.marked if marked > 0
]
elif qact == ACTION.QUEUE_DOWN:
self.torrentview.cursel = min(queue_length, self.torrentview.cursel + 1)
self.torrentview.marked = [marked + 1 for marked in self.torrentview.marked]
self.torrentview.marked = [
marked for marked in self.torrentview.marked if marked <= queue_length
]
elif qact == ACTION.QUEUE_BOTTOM:
if self.torrentview.marked:
self.torrentview.cursel = (
queue_length
- selected_num
+ 1
+ sorted(self.torrentview.marked).index(self.torrentview.cursel)
)
else:
self.torrentview.cursel = queue_length
self.torrentview.marked = list(
range(queue_length - selected_num + 1, queue_length + 1)
)
def do_queue(self, qact, *args, **kwargs):
if qact == ACTION.QUEUE_TOP:
client.core.queue_top(self.torrent_ids).addCallback(
self.move_selection, qact
)
elif qact == ACTION.QUEUE_BOTTOM:
client.core.queue_bottom(self.torrent_ids).addCallback(
self.move_selection, qact
)
elif qact == ACTION.QUEUE_UP:
client.core.queue_up(self.torrent_ids).addCallback(
self.move_selection, qact
)
elif qact == ACTION.QUEUE_DOWN:
client.core.queue_down(self.torrent_ids).addCallback(
self.move_selection, qact
)
def popup(self, **kwargs):
popup = SelectablePopup(
self.torrentslist,
"Queue Action",
self.do_queue,
cb_args=kwargs,
border_off_west=1,
)
popup.add_line(ACTION.QUEUE_TOP, "_Top")
popup.add_line(ACTION.QUEUE_UP, "_Up")
popup.add_line(ACTION.QUEUE_DOWN, "_Down")
popup.add_line(ACTION.QUEUE_BOTTOM, "_Bottom")
self.torrentslist.push_popup(popup)
|
tagsources | freedb | from . import CDDB
CDDB.proto = 6 # utf8 instead of latin1
import time
from collections import defaultdict
from puddlestuff import audioinfo, version_string
from puddlestuff.tagsources import RetrievalError
from puddlestuff.util import to_string, translate
CLIENTINFO = {"client_name": "puddletag", "client_version": version_string}
def sumdigits(n):
return sum(map(int, str(n)))
def sort_func(key, default):
def func(audio):
track = to_string(audio.get(key, [default]))
try:
return int(track)
except:
return track
return func
def calculate_discid(album):
# from quodlibet's cddb plugin by Michael Urman
album = sorted(album, key=sort_func("__filename", ""))
album = sorted(album, key=sort_func("track", "1"))
lengths = [audioinfo.lnglength(to_string(song["__length"])) for song in album]
total_time = 0
offsets = []
for length in lengths:
offsets.append(total_time)
total_time += length
checksum = sum(map(sumdigits, offsets))
discid = ((checksum % 0xFF) << 24) | (total_time << 8) | len(album)
return [discid, len(album)] + [75 * o for o in offsets] + [total_time]
def convert_info(info):
keys = {"category": "#category", "disc_id": "#discid", "title": "album"}
for key in list(info.keys()):
if key not in ["disc_id", "category"] and isinstance(info[key], str):
info[key] = decode_str(info[key])
if key in keys:
info[keys[key]] = info[key]
del info[key]
if "artist" not in info and "album" in info:
try:
info["artist"], info["album"] = [
z.strip() for z in info["album"].split(" / ", 1)
]
except (TypeError, ValueError):
pass
if "#discid" in info:
info["freedb_disc_id"] = decode_str(info["#discid"])
if "#category" in info:
info["freedb_category"] = decode_str(info["#category"])
return info
def convert_tracks(disc):
tracks = []
for tracknum, title in sorted(disc.items()):
track = {"track": str(tracknum + 1)}
if " / " in title:
track["artist"], track["title"] = [
z.strip() for z in decode_str(title).split(" / ", 1)
]
else:
track["title"] = title
tracks.append(track)
return tracks
def decode_str(s):
return s if isinstance(s, str) else s.decode("utf8", "replace")
def query(category, discid, xcode="utf8:utf8"):
# from quodlibet's cddb plugin by Michael Urman
discinfo = {}
tracktitles = {}
read, info = CDDB.read(category, discid, **CLIENTINFO)
if read != 210:
return None
xf, xt = xcode.split(":")
for key, value in info.items():
try:
value = (
value.decode("utf-8", "replace")
.strip()
.encode(xf, "replace")
.decode(xt, "replace")
)
except AttributeError:
pass
if key.startswith("TTITLE"):
try:
tracktitles[int(key[6:])] = value
except ValueError:
pass
elif key == "DGENRE":
discinfo["genre"] = value
elif key == "DTITLE":
dtitle = value.strip().split(" / ", 1)
if len(dtitle) == 2:
discinfo["artist"], discinfo["title"] = dtitle
else:
discinfo["title"] = dtitle[0].strip()
elif key == "DYEAR":
discinfo["year"] = value
return discinfo, tracktitles
def retrieve(category, discid):
try:
info, tracks = query(category, discid)
except EnvironmentError as e:
raise RetrievalError(e.strerror)
if "disc_id" not in info:
info["disc_id"] = discid
if "category" not in info:
info["category"] = category
return convert_info(info), convert_tracks(tracks)
def retrieve_from_info(info):
category = info["#category"]
discid = info["#discid"]
return retrieve(category, discid)
def search(tracklist):
ret = []
for tracks in split_by_tag(tracklist, "album", None).values():
discid = calculate_discid(tracks)
ret.extend(search_by_id(discid))
return ret
def search_by_id(discid):
try:
stat, discs = CDDB.query(discid, **CLIENTINFO)
except EnvironmentError as e:
raise RetrievalError(e.strerror)
if stat not in [200, 211]:
return []
if discs:
if hasattr(discs, "items"):
discs = [discs]
return [(convert_info(info), []) for info in discs]
return []
def split_by_tag(tracks, main="artist", secondary="album"):
if secondary:
ret = defaultdict(lambda: defaultdict(lambda: []))
[
ret[to_string(track.get(main))][to_string(track.get(secondary))].append(
track
)
for track in tracks
]
else:
ret = defaultdict(lambda: [])
[ret[to_string(track.get(main))].append(track) for track in tracks]
return ret
class FreeDB(object):
name = "FreeDB"
tooltip = translate("FreeDB", "<b>FreeDB does not support text-based searches.</b>")
group_by = ["album", None]
def __init__(self):
object.__init__(self)
self.__retrieved = {}
self.__lasttime = time.time()
def search(self, album, files):
if time.time() - self.__lasttime < 1000:
time.sleep(1)
if files:
results = search(files)
self.__lasttime = time.time()
if results:
results[0] = self.retrieve(results[0][0])
return results
else:
return []
def retrieve(self, info):
if time.time() - self.__lasttime < 1000:
time.sleep(1)
discid = info["#discid"]
if discid in self.__retrieved:
return self.__retrieved[discid]
else:
info, tracks = retrieve_from_info(info)
self.__retrieved[info["#discid"]] = [info, tracks]
return info, tracks
info = FreeDB
if __name__ == "__main__":
# return [({'#discid': '0200d001', '#category': 'soundtrack', 'album': 'German'}, [])]
import glob
from .. import audioinfo
files = list(
map(audioinfo.Tag, glob.glob("/mnt/multimedia/Music/Ratatat - Classics/*.mp3"))
)
print(search(files))
|
extractor | ruutu | # coding: utf-8
from __future__ import unicode_literals
from ..compat import compat_urllib_parse_urlparse
from ..utils import (
ExtractorError,
determine_ext,
find_xpath_attr,
int_or_none,
unified_strdate,
url_or_none,
xpath_attr,
xpath_text,
)
from .common import InfoExtractor
class RuutuIE(InfoExtractor):
_VALID_URL = r"""(?x)
https?://
(?:
(?:www\.)?(?:ruutu|supla)\.fi/(?:video|supla|audio)/|
static\.nelonenmedia\.fi/player/misc/embed_player\.html\?.*?\bnid=
)
(?P<id>\d+)
"""
_TESTS = [
{
"url": "http://www.ruutu.fi/video/2058907",
"md5": "ab2093f39be1ca8581963451b3c0234f",
"info_dict": {
"id": "2058907",
"ext": "mp4",
"title": "Oletko aina halunnut tietää mitä tapahtuu vain hetki ennen lähetystä? - Nyt se selvisi!",
"description": "md5:cfc6ccf0e57a814360df464a91ff67d6",
"thumbnail": r"re:^https?://.*\.jpg$",
"duration": 114,
"age_limit": 0,
},
},
{
"url": "http://www.ruutu.fi/video/2057306",
"md5": "065a10ae4d5b8cfd9d0c3d332465e3d9",
"info_dict": {
"id": "2057306",
"ext": "mp4",
"title": "Superpesis: katso koko kausi Ruudussa",
"description": "md5:bfb7336df2a12dc21d18fa696c9f8f23",
"thumbnail": r"re:^https?://.*\.jpg$",
"duration": 40,
"age_limit": 0,
},
},
{
"url": "http://www.supla.fi/supla/2231370",
"md5": "df14e782d49a2c0df03d3be2a54ef949",
"info_dict": {
"id": "2231370",
"ext": "mp4",
"title": "Osa 1: Mikael Jungner",
"description": "md5:7d90f358c47542e3072ff65d7b1bcffe",
"thumbnail": r"re:^https?://.*\.jpg$",
"age_limit": 0,
},
},
# Episode where <SourceFile> is "NOT-USED", but has other
# downloadable sources available.
{
"url": "http://www.ruutu.fi/video/3193728",
"only_matching": True,
},
{
# audio podcast
"url": "https://www.supla.fi/supla/3382410",
"md5": "b9d7155fed37b2ebf6021d74c4b8e908",
"info_dict": {
"id": "3382410",
"ext": "mp3",
"title": "Mikä ihmeen poltergeist?",
"description": "md5:bbb6963df17dfd0ecd9eb9a61bf14b52",
"thumbnail": r"re:^https?://.*\.jpg$",
"age_limit": 0,
},
"expected_warnings": [
"HTTP Error 502: Bad Gateway",
"Failed to download m3u8 information",
],
},
{
"url": "http://www.supla.fi/audio/2231370",
"only_matching": True,
},
{
"url": "https://static.nelonenmedia.fi/player/misc/embed_player.html?nid=3618790",
"only_matching": True,
},
{
# episode
"url": "https://www.ruutu.fi/video/3401964",
"info_dict": {
"id": "3401964",
"ext": "mp4",
"title": "Temptation Island Suomi - Kausi 5 - Jakso 17",
"description": "md5:87cf01d5e1e88adf0c8a2937d2bd42ba",
"thumbnail": r"re:^https?://.*\.jpg$",
"duration": 2582,
"age_limit": 12,
"upload_date": "20190508",
"series": "Temptation Island Suomi",
"season_number": 5,
"episode_number": 17,
"categories": [
"Reality ja tositapahtumat",
"Kotimaiset suosikit",
"Romantiikka ja parisuhde",
],
},
"params": {
"skip_download": True,
},
},
{
# premium
"url": "https://www.ruutu.fi/video/3618715",
"only_matching": True,
},
]
_API_BASE = "https://gatling.nelonenmedia.fi"
def _real_extract(self, url):
video_id = self._match_id(url)
video_xml = self._download_xml(
"%s/media-xml-cache" % self._API_BASE, video_id, query={"id": video_id}
)
formats = []
processed_urls = []
def extract_formats(node):
for child in node:
if child.tag.endswith("Files"):
extract_formats(child)
elif child.tag.endswith("File"):
video_url = child.text
if (
not video_url
or video_url in processed_urls
or any(p in video_url for p in ("NOT_USED", "NOT-USED"))
):
continue
processed_urls.append(video_url)
ext = determine_ext(video_url)
auth_video_url = url_or_none(
self._download_webpage(
"%s/auth/access/v2" % self._API_BASE,
video_id,
note="Downloading authenticated %s stream URL" % ext,
fatal=False,
query={"stream": video_url},
)
)
if auth_video_url:
processed_urls.append(auth_video_url)
video_url = auth_video_url
if ext == "m3u8":
formats.extend(
self._extract_m3u8_formats(
video_url,
video_id,
"mp4",
entry_protocol="m3u8_native",
m3u8_id="hls",
fatal=False,
)
)
elif ext == "f4m":
formats.extend(
self._extract_f4m_formats(
video_url, video_id, f4m_id="hds", fatal=False
)
)
elif ext == "mpd":
# video-only and audio-only streams are of different
# duration resulting in out of sync issue
continue
formats.extend(
self._extract_mpd_formats(
video_url, video_id, mpd_id="dash", fatal=False
)
)
elif ext == "mp3" or child.tag == "AudioMediaFile":
formats.append(
{
"format_id": "audio",
"url": video_url,
"vcodec": "none",
}
)
else:
proto = compat_urllib_parse_urlparse(video_url).scheme
if not child.tag.startswith("HTTP") and proto != "rtmp":
continue
preference = -1 if proto == "rtmp" else 1
label = child.get("label")
tbr = int_or_none(child.get("bitrate"))
format_id = (
"%s-%s" % (proto, label if label else tbr)
if label or tbr
else proto
)
if not self._is_valid_url(video_url, video_id, format_id):
continue
width, height = [
int_or_none(x)
for x in child.get("resolution", "x").split("x")[:2]
]
formats.append(
{
"format_id": format_id,
"url": video_url,
"width": width,
"height": height,
"tbr": tbr,
"preference": preference,
}
)
extract_formats(video_xml.find("./Clip"))
def pv(name):
node = find_xpath_attr(
video_xml, "./Clip/PassthroughVariables/variable", "name", name
)
if node is not None:
return node.get("value")
if not formats:
drm = xpath_text(video_xml, "./Clip/DRM", default=None)
if drm:
raise ExtractorError("This video is DRM protected.", expected=True)
ns_st_cds = pv("ns_st_cds")
if ns_st_cds != "free":
raise ExtractorError("This video is %s." % ns_st_cds, expected=True)
self._sort_formats(formats)
themes = pv("themes")
return {
"id": video_id,
"title": xpath_attr(
video_xml, ".//Behavior/Program", "program_name", "title", fatal=True
),
"description": xpath_attr(
video_xml, ".//Behavior/Program", "description", "description"
),
"thumbnail": xpath_attr(
video_xml, ".//Behavior/Startpicture", "href", "thumbnail"
),
"duration": int_or_none(xpath_text(video_xml, ".//Runtime", "duration"))
or int_or_none(pv("runtime")),
"age_limit": int_or_none(xpath_text(video_xml, ".//AgeLimit", "age limit")),
"upload_date": unified_strdate(pv("date_start")),
"series": pv("series_name"),
"season_number": int_or_none(pv("season_number")),
"episode_number": int_or_none(pv("episode_number")),
"categories": themes.split(",") if themes else [],
"formats": formats,
}
|
searx | query | # SPDX-License-Identifier: AGPL-3.0-or-later
import re
from abc import ABC, abstractmethod
from searx.engines import categories, engine_shortcuts, engines
from searx.external_bang import get_bang_definition_and_autocomplete
from searx.languages import language_codes
from searx.search import EngineRef
from searx.webutils import VALID_LANGUAGE_CODE
class QueryPartParser(ABC):
__slots__ = "raw_text_query", "enable_autocomplete"
@staticmethod
@abstractmethod
def check(raw_value):
"""Check if raw_value can be parsed"""
def __init__(self, raw_text_query, enable_autocomplete):
self.raw_text_query = raw_text_query
self.enable_autocomplete = enable_autocomplete
@abstractmethod
def __call__(self, raw_value):
"""Try to parse raw_value: set the self.raw_text_query properties
return True if raw_value has been parsed
self.raw_text_query.autocomplete_list is also modified
if self.enable_autocomplete is True
"""
def _add_autocomplete(self, value):
if value not in self.raw_text_query.autocomplete_list:
self.raw_text_query.autocomplete_list.append(value)
class TimeoutParser(QueryPartParser):
@staticmethod
def check(raw_value):
return raw_value[0] == "<"
def __call__(self, raw_value):
value = raw_value[1:]
found = self._parse(value) if len(value) > 0 else False
if self.enable_autocomplete and not value:
self._autocomplete()
return found
def _parse(self, value):
if not value.isdigit():
return False
raw_timeout_limit = int(value)
if raw_timeout_limit < 100:
# below 100, the unit is the second ( <3 = 3 seconds timeout )
self.raw_text_query.timeout_limit = float(raw_timeout_limit)
else:
# 100 or above, the unit is the millisecond ( <850 = 850 milliseconds timeout )
self.raw_text_query.timeout_limit = raw_timeout_limit / 1000.0
return True
def _autocomplete(self):
for suggestion in ["<3", "<850"]:
self._add_autocomplete(suggestion)
class LanguageParser(QueryPartParser):
@staticmethod
def check(raw_value):
return raw_value[0] == ":"
def __call__(self, raw_value):
value = raw_value[1:].lower().replace("_", "-")
found = self._parse(value) if len(value) > 0 else False
if self.enable_autocomplete and not found:
self._autocomplete(value)
return found
def _parse(self, value):
found = False
# check if any language-code is equal with
# declared language-codes
for lc in language_codes:
lang_id, lang_name, country, english_name = map(str.lower, lc)
# if correct language-code is found
# set it as new search-language
if (
value == lang_id
or value == lang_name
or value == english_name
or value.replace("-", " ") == country
) and value not in self.raw_text_query.languages:
found = True
lang_parts = lang_id.split("-")
if len(lang_parts) == 2:
self.raw_text_query.languages.append(
lang_parts[0] + "-" + lang_parts[1].upper()
)
else:
self.raw_text_query.languages.append(lang_id)
# to ensure best match (first match is not necessarily the best one)
if value == lang_id:
break
# user may set a valid, yet not selectable language
if VALID_LANGUAGE_CODE.match(value):
lang_parts = value.split("-")
if len(lang_parts) > 1:
value = lang_parts[0].lower() + "-" + lang_parts[1].upper()
if value not in self.raw_text_query.languages:
self.raw_text_query.languages.append(value)
found = True
return found
def _autocomplete(self, value):
if not value:
# show some example queries
for lang in [":en", ":en_us", ":english", ":united_kingdom"]:
self.raw_text_query.autocomplete_list.append(lang)
return
for lc in language_codes:
lang_id, lang_name, country, english_name = map(str.lower, lc)
# check if query starts with language-id
if lang_id.startswith(value):
if len(value) <= 2:
self._add_autocomplete(":" + lang_id.split("-")[0])
else:
self._add_autocomplete(":" + lang_id)
# check if query starts with language name
if lang_name.startswith(value) or english_name.startswith(value):
self._add_autocomplete(":" + lang_name)
# check if query starts with country
# here "new_zealand" is "new-zealand" (see __call__)
if country.startswith(value.replace("-", " ")):
self._add_autocomplete(":" + country.replace(" ", "_"))
class ExternalBangParser(QueryPartParser):
@staticmethod
def check(raw_value):
return raw_value.startswith("!!")
def __call__(self, raw_value):
value = raw_value[2:]
found, bang_ac_list = self._parse(value) if len(value) > 0 else (False, [])
if self.enable_autocomplete:
self._autocomplete(bang_ac_list)
return found
def _parse(self, value):
found = False
bang_definition, bang_ac_list = get_bang_definition_and_autocomplete(value)
if bang_definition is not None:
self.raw_text_query.external_bang = value
found = True
return found, bang_ac_list
def _autocomplete(self, bang_ac_list):
if not bang_ac_list:
bang_ac_list = ["g", "ddg", "bing"]
for external_bang in bang_ac_list:
self._add_autocomplete("!!" + external_bang)
class BangParser(QueryPartParser):
@staticmethod
def check(raw_value):
return raw_value[0] == "!" or raw_value[0] == "?"
def __call__(self, raw_value):
value = raw_value[1:].replace("-", " ").replace("_", " ")
found = self._parse(value) if len(value) > 0 else False
if found and raw_value[0] == "!":
self.raw_text_query.specific = True
if self.enable_autocomplete:
self._autocomplete(raw_value[0], value)
return found
def _parse(self, value):
# check if prefix is equal with engine shortcut
if value in engine_shortcuts:
value = engine_shortcuts[value]
# check if prefix is equal with engine name
if value in engines:
self.raw_text_query.enginerefs.append(EngineRef(value, "none"))
return True
# check if prefix is equal with category name
if value in categories:
# using all engines for that search, which
# are declared under that category name
self.raw_text_query.enginerefs.extend(
EngineRef(engine.name, value)
for engine in categories[value]
if (engine.name, value) not in self.raw_text_query.disabled_engines
)
return True
return False
def _autocomplete(self, first_char, value):
if not value:
# show some example queries
for suggestion in ["images", "wikipedia", "osm"]:
if (
suggestion not in self.raw_text_query.disabled_engines
or suggestion in categories
):
self._add_autocomplete(first_char + suggestion)
return
# check if query starts with category name
for category in categories:
if category.startswith(value):
self._add_autocomplete(first_char + category)
# check if query starts with engine name
for engine in engines:
if engine.startswith(value):
self._add_autocomplete(first_char + engine.replace(" ", "_"))
# check if query starts with engine shortcut
for engine_shortcut in engine_shortcuts:
if engine_shortcut.startswith(value):
self._add_autocomplete(first_char + engine_shortcut)
class RawTextQuery:
"""parse raw text query (the value from the html input)"""
PARSER_CLASSES = [
TimeoutParser, # this force the timeout
LanguageParser, # this force a language
ExternalBangParser, # external bang (must be before BangParser)
BangParser, # this force a engine or category
]
def __init__(self, query, disabled_engines):
assert isinstance(query, str)
# input parameters
self.query = query
self.disabled_engines = disabled_engines if disabled_engines else []
# parsed values
self.enginerefs = []
self.languages = []
self.timeout_limit = None
self.external_bang = None
self.specific = False
self.autocomplete_list = []
# internal properties
self.query_parts = [] # use self.getFullQuery()
self.user_query_parts = [] # use self.getQuery()
self.autocomplete_location = None
self._parse_query()
def _parse_query(self):
"""
parse self.query, if tags are set, which
change the search engine or search-language
"""
# split query, including whitespaces
raw_query_parts = re.split(r"(\s+)", self.query)
last_index_location = None
autocomplete_index = len(raw_query_parts) - 1
for i, query_part in enumerate(raw_query_parts):
# part does only contain spaces, skip
if query_part.isspace() or query_part == "":
continue
# parse special commands
special_part = False
for parser_class in RawTextQuery.PARSER_CLASSES:
if parser_class.check(query_part):
special_part = parser_class(self, i == autocomplete_index)(
query_part
)
break
# append query part to query_part list
qlist = self.query_parts if special_part else self.user_query_parts
qlist.append(query_part)
last_index_location = (qlist, len(qlist) - 1)
self.autocomplete_location = last_index_location
def get_autocomplete_full_query(self, text):
qlist, position = self.autocomplete_location
qlist[position] = text
return self.getFullQuery()
def changeQuery(self, query):
self.user_query_parts = query.strip().split()
self.query = self.getFullQuery()
self.autocomplete_location = (
self.user_query_parts,
len(self.user_query_parts) - 1,
)
self.autocomplete_list = []
return self
def getQuery(self):
return " ".join(self.user_query_parts)
def getFullQuery(self):
"""
get full query including whitespaces
"""
return "{0} {1}".format(" ".join(self.query_parts), self.getQuery()).strip()
def __str__(self):
return self.getFullQuery()
def __repr__(self):
return (
f"<{self.__class__.__name__} "
+ f"query={self.query!r} "
+ f"disabled_engines={self.disabled_engines!r}\n "
+ f"languages={self.languages!r} "
+ f"timeout_limit={self.timeout_limit!r} "
+ f"external_bang={self.external_bang!r} "
+ f"specific={self.specific!r} "
+ f"enginerefs={self.enginerefs!r}\n "
+ f"autocomplete_list={self.autocomplete_list!r}\n "
+ f"query_parts={self.query_parts!r}\n "
+ f"user_query_parts={self.user_query_parts!r} >"
)
|
XmlMaterialProfile | XmlMaterialUpgrader | # Copyright (c) 2019 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
import xml.etree.ElementTree as ET
from UM.VersionUpgrade import VersionUpgrade
from .XmlMaterialProfile import XmlMaterialProfile
class XmlMaterialUpgrader(VersionUpgrade):
def getXmlVersion(self, serialized):
return XmlMaterialProfile.getVersionFromSerialized(serialized)
def _xmlVersionToSettingVersion(self, xml_version: str) -> int:
return XmlMaterialProfile.xmlVersionToSettingVersion(xml_version)
def upgradeMaterial(self, serialised, filename):
data = ET.fromstring(serialised)
# update version
metadata = data.iterfind(
"./um:metadata/*", {"um": "http://www.ultimaker.com/material"}
)
for entry in metadata:
if _tag_without_namespace(entry) == "version":
entry.text = "2"
break
data.attrib["version"] = "1.3"
# this makes sure that the XML header states encoding="utf-8"
new_serialised = ET.tostring(data, encoding="utf-8").decode("utf-8")
return [filename], [new_serialised]
def _tag_without_namespace(element):
return element.tag[element.tag.rfind("}") + 1 :]
|
gtk3 | torrentview | #
# Copyright (C) 2007, 2008 Andrew Resch <andrewresch@gmail.com>
#
# This file is part of Deluge and is licensed under GNU General Public License 3.0, or later, with
# the additional special exception to link portions of this program with the OpenSSL library.
# See LICENSE for more details.
#
"""The torrent view component that lists all torrents in the session."""
import logging
from locale import strcoll
import deluge.component as component
from deluge.common import decode_bytes
from deluge.ui.client import client
from gi.repository.Gdk import ModifierType, keyval_name
from gi.repository.GLib import idle_add
from gi.repository.GObject import TYPE_UINT64
from gi.repository.Gtk import EntryIconPosition
from twisted.internet import reactor
from . import torrentview_data_funcs as funcs
from .common import cmp
from .listview import ListView
from .removetorrentdialog import RemoveTorrentDialog
log = logging.getLogger(__name__)
try:
CTRL_ALT_MASK = ModifierType.CONTROL_MASK | ModifierType.MOD1_MASK
except TypeError:
# Sphinx AutoDoc has a mock issue with Gdk masks.
pass
def str_nocase_sort(model, iter1, iter2, data):
"""Sort string column data using ISO 14651 in lowercase.
Uses locale.strcoll which (allegedly) uses ISO 14651. Compares first
value with second and returns -1, 0, 1 for where it should be placed.
"""
v1 = model[iter1][data]
v2 = model[iter2][data]
# Catch any values of None from model.
v1 = v1.lower() if v1 else ""
v2 = v2.lower() if v2 else ""
return strcoll(v1, v2)
def queue_peer_seed_sort_function(v1, v2):
if v1 == v2:
return 0
if v2 < 0:
return -1
if v1 < 0:
return 1
if v1 > v2:
return 1
if v2 > v1:
return -1
def queue_column_sort(model, iter1, iter2, data):
v1 = model[iter1][data]
v2 = model[iter2][data]
return queue_peer_seed_sort_function(v1, v2)
def eta_column_sort(model, iter1, iter2, data):
v1 = model[iter1][data]
v2 = model[iter2][data]
if v1 == v2:
return 0
if v1 == 0:
return -1
if v2 == 0:
return 1
if v1 > v2:
return -1
if v2 > v1:
return 1
def seed_peer_column_sort(model, iter1, iter2, data):
v1 = model[iter1][data] # num seeds/peers
v3 = model[iter2][data] # num seeds/peers
if v1 == v3:
v2 = model[iter1][data + 1] # total seeds/peers
v4 = model[iter2][data + 1] # total seeds/peers
return queue_peer_seed_sort_function(v2, v4)
return queue_peer_seed_sort_function(v1, v3)
def progress_sort(model, iter1, iter2, sort_column_id):
progress1 = model[iter1][sort_column_id]
progress2 = model[iter2][sort_column_id]
# Progress value is equal, so sort on state
if progress1 == progress2:
state1 = model[iter1][sort_column_id + 1]
state2 = model[iter2][sort_column_id + 1]
return cmp(state1, state2)
return cmp(progress1, progress2)
class SearchBox:
def __init__(self, torrentview):
self.torrentview = torrentview
mainwindow = component.get("MainWindow")
main_builder = mainwindow.get_builder()
self.visible = False
self.search_pending = self.prefiltered = None
self.search_box = main_builder.get_object("search_box")
self.search_torrents_entry = main_builder.get_object("search_torrents_entry")
self.close_search_button = main_builder.get_object("close_search_button")
self.match_search_button = main_builder.get_object("search_torrents_match")
mainwindow.connect_signals(self)
def show(self):
self.visible = True
self.search_box.show_all()
self.search_torrents_entry.grab_focus()
def hide(self):
self.visible = False
self.clear_search()
self.search_box.hide()
self.search_pending = self.prefiltered = None
def clear_search(self):
if self.search_pending and self.search_pending.active():
self.search_pending.cancel()
if self.prefiltered:
filter_column = self.torrentview.columns["filter"].column_indices[0]
torrent_id_column = self.torrentview.columns["torrent_id"].column_indices[0]
for row in self.torrentview.liststore:
torrent_id = row[torrent_id_column]
if torrent_id in self.prefiltered:
# Reset to previous filter state
self.prefiltered.pop(self.prefiltered.index(torrent_id))
row[filter_column] = not row[filter_column]
self.prefiltered = None
self.search_torrents_entry.set_text("")
if self.torrentview.filter and "name" in self.torrentview.filter:
self.torrentview.filter.pop("name", None)
self.search_pending = reactor.callLater(0.5, self.torrentview.update)
def set_search_filter(self):
if self.search_pending and self.search_pending.active():
self.search_pending.cancel()
if self.torrentview.filter and "name" in self.torrentview.filter:
self.torrentview.filter.pop("name", None)
elif self.torrentview.filter is None:
self.torrentview.filter = {}
search_string = self.search_torrents_entry.get_text()
if not search_string:
self.clear_search()
else:
if self.match_search_button.get_active():
search_string += "::match"
self.torrentview.filter["name"] = search_string
self.prefilter_torrentview()
def prefilter_torrentview(self):
filter_column = self.torrentview.columns["filter"].column_indices[0]
torrent_id_column = self.torrentview.columns["torrent_id"].column_indices[0]
torrent_name_column = self.torrentview.columns[_("Name")].column_indices[1]
match_case = self.match_search_button.get_active()
if match_case:
search_string = self.search_torrents_entry.get_text()
else:
search_string = self.search_torrents_entry.get_text().lower()
if self.prefiltered is None:
self.prefiltered = []
for row in self.torrentview.liststore:
torrent_id = row[torrent_id_column]
if torrent_id in self.prefiltered:
# Reset to previous filter state
self.prefiltered.pop(self.prefiltered.index(torrent_id))
row[filter_column] = not row[filter_column]
if not row[filter_column]:
# Row is not visible(filtered out, but not by our filter), skip it
continue
if match_case:
torrent_name = row[torrent_name_column]
else:
torrent_name = row[torrent_name_column].lower()
if search_string in torrent_name and not row[filter_column]:
row[filter_column] = True
self.prefiltered.append(torrent_id)
elif search_string not in torrent_name and row[filter_column]:
row[filter_column] = False
self.prefiltered.append(torrent_id)
def on_close_search_button_clicked(self, widget):
self.hide()
def on_search_filter_toggle(self, widget):
if self.visible:
self.hide()
else:
self.show()
def on_search_torrents_match_toggled(self, widget):
if self.search_torrents_entry.get_text():
self.set_search_filter()
self.search_pending = reactor.callLater(0.7, self.torrentview.update)
def on_search_torrents_entry_icon_press(self, entry, icon, event):
if icon != EntryIconPosition.SECONDARY:
return
self.clear_search()
def on_search_torrents_entry_changed(self, widget):
self.set_search_filter()
self.search_pending = reactor.callLater(0.7, self.torrentview.update)
class TorrentView(ListView, component.Component):
"""TorrentView handles the listing of torrents."""
def __init__(self):
component.Component.__init__(
self, "TorrentView", interval=2, depend=["SessionProxy"]
)
main_builder = component.get("MainWindow").get_builder()
# Call the ListView constructor
ListView.__init__(
self, main_builder.get_object("torrent_view"), "torrentview.state"
)
log.debug("TorrentView Init..")
# If we have gotten the state yet
self.got_state = False
# This is where status updates are put
self.status = {}
# We keep a copy of the previous status to compare for changes
self.prev_status = {}
# Register the columns menu with the listview so it gets updated accordingly.
self.register_checklist_menu(main_builder.get_object("menu_columns"))
# Add the columns to the listview
self.add_text_column("torrent_id", hidden=True, unique=True)
self.add_bool_column("dirty", hidden=True)
self.add_func_column(
"#",
funcs.cell_data_queue,
[int],
status_field=["queue"],
sort_func=queue_column_sort,
)
self.add_texticon_column(
_("Name"),
status_field=["state", "name"],
function=funcs.cell_data_statusicon,
sort_func=str_nocase_sort,
default_sort=True,
)
self.add_func_column(
_("Size"),
funcs.cell_data_size,
[TYPE_UINT64],
status_field=["total_wanted"],
)
self.add_func_column(
_("Downloaded"),
funcs.cell_data_size,
[TYPE_UINT64],
status_field=["all_time_download"],
default=False,
)
self.add_func_column(
_("Uploaded"),
funcs.cell_data_size,
[TYPE_UINT64],
status_field=["total_uploaded"],
default=False,
)
self.add_func_column(
_("Remaining"),
funcs.cell_data_size,
[TYPE_UINT64],
status_field=["total_remaining"],
default=False,
)
self.add_progress_column(
_("Progress"),
status_field=["progress", "state"],
col_types=[float, str],
function=funcs.cell_data_progress,
sort_func=progress_sort,
)
self.add_func_column(
_("Seeds"),
funcs.cell_data_peer,
[int, int],
status_field=["num_seeds", "total_seeds"],
sort_func=seed_peer_column_sort,
default=False,
)
self.add_func_column(
_("Peers"),
funcs.cell_data_peer,
[int, int],
status_field=["num_peers", "total_peers"],
sort_func=seed_peer_column_sort,
default=False,
)
self.add_func_column(
_("Seeds:Peers"),
funcs.cell_data_ratio_seeds_peers,
[float],
status_field=["seeds_peers_ratio"],
default=False,
)
self.add_func_column(
_("Down Speed"),
funcs.cell_data_speed_down,
[int],
status_field=["download_payload_rate"],
)
self.add_func_column(
_("Up Speed"),
funcs.cell_data_speed_up,
[int],
status_field=["upload_payload_rate"],
)
self.add_func_column(
_("Down Limit"),
funcs.cell_data_speed_limit_down,
[float],
status_field=["max_download_speed"],
default=False,
)
self.add_func_column(
_("Up Limit"),
funcs.cell_data_speed_limit_up,
[float],
status_field=["max_upload_speed"],
default=False,
)
self.add_func_column(
_("ETA"),
funcs.cell_data_time,
[int],
status_field=["eta"],
sort_func=eta_column_sort,
)
self.add_func_column(
_("Ratio"),
funcs.cell_data_ratio_ratio,
[float],
status_field=["ratio"],
default=False,
)
self.add_func_column(
_("Avail"),
funcs.cell_data_ratio_avail,
[float],
status_field=["distributed_copies"],
default=False,
)
self.add_func_column(
_("Added"),
funcs.cell_data_date_added,
[int],
status_field=["time_added"],
default=False,
)
self.add_func_column(
_("Completed"),
funcs.cell_data_date_completed,
[int],
status_field=["completed_time"],
default=False,
)
self.add_func_column(
_("Complete Seen"),
funcs.cell_data_date_or_never,
[int],
status_field=["last_seen_complete"],
default=False,
)
self.add_func_column(
_("Last Transfer"),
funcs.cell_data_time,
[int],
status_field=["time_since_transfer"],
default=False,
)
self.add_texticon_column(
_("Tracker"),
function=funcs.cell_data_trackericon,
status_field=["tracker_host", "tracker_host"],
default=False,
)
self.add_text_column(
_("Download Folder"), status_field=["download_location"], default=False
)
self.add_text_column(_("Owner"), status_field=["owner"], default=False)
self.add_bool_column(
_("Shared"),
status_field=["shared"],
default=False,
tooltip=_("Torrent is shared between other Deluge users or not."),
)
self.restore_columns_order_from_state()
# Set filter to None for now
self.filter = None
# Connect Signals #
# Connect to the 'button-press-event' to know when to bring up the
# torrent menu popup.
self.treeview.connect("button-press-event", self.on_button_press_event)
# Connect to the 'key-press-event' to know when the bring up the
# torrent menu popup via keypress.
self.treeview.connect("key-release-event", self.on_key_press_event)
# Connect to the 'changed' event of TreeViewSelection to get selection
# changes.
self.treeview.get_selection().connect("changed", self.on_selection_changed)
self.treeview.connect("drag-drop", self.on_drag_drop)
self.treeview.connect("drag_data_received", self.on_drag_data_received)
self.treeview.connect("key-press-event", self.on_key_press_event)
self.treeview.connect("columns-changed", self.on_columns_changed_event)
self.search_box = SearchBox(self)
self.permanent_status_keys = ["owner"]
self.columns_to_update = []
def start(self):
"""Start the torrentview"""
# We need to get the core session state to know which torrents are in
# the session so we can add them to our list.
# Only get the status fields required for the visible columns
status_fields = []
for listview_column in self.columns.values():
if listview_column.column.get_visible():
if not listview_column.status_field:
continue
status_fields.extend(listview_column.status_field)
component.get("SessionProxy").get_torrents_status(
{}, status_fields
).addCallback(self._on_session_state)
client.register_event_handler(
"TorrentStateChangedEvent", self.on_torrentstatechanged_event
)
client.register_event_handler("TorrentAddedEvent", self.on_torrentadded_event)
client.register_event_handler(
"TorrentRemovedEvent", self.on_torrentremoved_event
)
client.register_event_handler("SessionPausedEvent", self.on_sessionpaused_event)
client.register_event_handler(
"SessionResumedEvent", self.on_sessionresumed_event
)
client.register_event_handler(
"TorrentQueueChangedEvent", self.on_torrentqueuechanged_event
)
def _on_session_state(self, state):
self.add_rows(state)
self.got_state = True
# Update the view right away with our status
self.status = state
self.set_columns_to_update()
self.update_view(load_new_list=True)
self.select_first_row()
def stop(self):
"""Stops the torrentview"""
client.deregister_event_handler(
"TorrentStateChangedEvent", self.on_torrentstatechanged_event
)
client.deregister_event_handler("TorrentAddedEvent", self.on_torrentadded_event)
client.deregister_event_handler(
"TorrentRemovedEvent", self.on_torrentremoved_event
)
client.deregister_event_handler(
"SessionPausedEvent", self.on_sessionpaused_event
)
client.deregister_event_handler(
"SessionResumedEvent", self.on_sessionresumed_event
)
client.deregister_event_handler(
"TorrentQueueChangedEvent", self.on_torrentqueuechanged_event
)
if self.treeview.get_selection():
self.treeview.get_selection().unselect_all()
# Save column state before clearing liststore
# so column sort details are correctly saved.
self.save_state()
self.liststore.clear()
self.prev_status = {}
self.filter = None
self.search_box.hide()
def shutdown(self):
"""Called when GtkUi is exiting"""
pass
def save_state(self):
"""
Saves the state of the torrent view.
"""
if component.get("MainWindow").visible():
ListView.save_state(self, "torrentview.state")
def remove_column(self, header):
"""Removes the column with the name 'header' from the torrentview"""
self.save_state()
ListView.remove_column(self, header)
def set_filter(self, filter_dict):
"""
Sets filters for the torrentview..
see: core.get_torrents_status
"""
search_filter = self.filter and self.filter.get("name", None) or None
self.filter = dict(filter_dict) # Copied version of filter_dict.
if search_filter and "name" not in filter_dict:
self.filter["name"] = search_filter
self.update(select_row=True)
def set_columns_to_update(self, columns=None):
status_keys = []
self.columns_to_update = []
if columns is None:
# We need to iterate through all columns
columns = list(self.columns)
# Iterate through supplied list of columns to update
for column in columns:
# Make sure column is visible and has 'status_field' set.
# If not, we can ignore it.
if (
self.columns[column].column.get_visible() is True
and self.columns[column].hidden is False
and self.columns[column].status_field is not None
):
for field in self.columns[column].status_field:
status_keys.append(field)
self.columns_to_update.append(column)
# Remove duplicates
self.columns_to_update = list(set(self.columns_to_update))
status_keys = list(set(status_keys + self.permanent_status_keys))
return status_keys
def send_status_request(self, columns=None, select_row=False):
# Store the 'status_fields' we need to send to core
status_keys = self.set_columns_to_update(columns)
# If there is nothing in status_keys then we must not continue
if status_keys is []:
return
# Remove duplicates from status_key list
status_keys = list(set(status_keys))
# Request the statuses for all these torrent_ids, this is async so we
# will deal with the return in a signal callback.
d = (
component.get("SessionProxy")
.get_torrents_status(self.filter, status_keys)
.addCallback(self._on_get_torrents_status)
)
if select_row:
d.addCallback(self.select_first_row)
def select_first_row(self, ignored=None):
"""
Set the first row in the list selected if a selection does
not already exist
"""
rows = self.treeview.get_selection().get_selected_rows()[1]
# Only select row if noe rows are selected
if not rows:
self.treeview.get_selection().select_path((0,))
def update(self, select_row=False):
"""
Sends a status request to core and updates the torrent list with the result.
:param select_row: if the first row in the list should be selected if
no rows are already selected.
:type select_row: boolean
"""
if self.got_state:
if (
self.search_box.search_pending is not None
and self.search_box.search_pending.active()
):
# An update request is scheduled, let's wait for that one
return
# Send a status request
idle_add(self.send_status_request, None, select_row)
def update_view(self, load_new_list=False):
"""Update the torrent view model with data we've received."""
filter_column = self.columns["filter"].column_indices[0]
status = self.status
if not load_new_list:
# Freeze notications while updating
self.treeview.freeze_child_notify()
# Get the columns to update from one of the torrents
if status:
torrent_id = list(status)[0]
fields_to_update = []
for column in self.columns_to_update:
column_index = self.get_column_index(column)
for i, status_field in enumerate(self.columns[column].status_field):
# Only use columns that the torrent has in the state
if status_field in status[torrent_id]:
fields_to_update.append((column_index[i], status_field))
for row in self.liststore:
torrent_id = row[self.columns["torrent_id"].column_indices[0]]
# We expect the torrent_id to be in status and prev_status,
# as it will be as long as the list isn't changed by the user
torrent_id_in_status = False
try:
torrent_status = status[torrent_id]
torrent_id_in_status = True
if torrent_status == self.prev_status[torrent_id]:
# The status dict is the same, so do nothing to update for this torrent
continue
except KeyError:
pass
if not torrent_id_in_status:
if row[filter_column] is True:
row[filter_column] = False
else:
if row[filter_column] is False:
row[filter_column] = True
# Find the fields to update
to_update = []
for i, status_field in fields_to_update:
row_value = status[torrent_id][status_field]
if decode_bytes(row[i]) != row_value:
to_update.append(i)
to_update.append(row_value)
# Update fields in the liststore
if to_update:
self.liststore.set(row.iter, *to_update)
if load_new_list:
# Create the model filter. This sets the model for the treeview and enables sorting.
self.create_model_filter()
else:
self.treeview.thaw_child_notify()
component.get("MenuBar").update_menu()
self.prev_status = status
def _on_get_torrents_status(self, status, select_row=False):
"""Callback function for get_torrents_status(). 'status' should be a
dictionary of {torrent_id: {key, value}}."""
self.status = status
if self.search_box.prefiltered is not None:
self.search_box.prefiltered = None
if self.status == self.prev_status and self.prev_status:
# We do not bother updating since the status hasn't changed
self.prev_status = self.status
return
self.update_view()
def add_rows(self, torrent_ids):
"""Accepts a list of torrent_ids to add to self.liststore"""
torrent_id_column = self.columns["torrent_id"].column_indices[0]
dirty_column = self.columns["dirty"].column_indices[0]
filter_column = self.columns["filter"].column_indices[0]
for torrent_id in torrent_ids:
# Insert a new row to the liststore
row = self.liststore.append()
self.liststore.set(
row,
torrent_id_column,
torrent_id,
dirty_column,
True,
filter_column,
True,
)
def remove_row(self, torrent_id):
"""Removes a row with torrent_id"""
for row in self.liststore:
if row[self.columns["torrent_id"].column_indices[0]] == torrent_id:
self.liststore.remove(row.iter)
# Force an update of the torrentview
self.update(select_row=True)
break
def mark_dirty(self, torrent_id=None):
for row in self.liststore:
if (
not torrent_id
or row[self.columns["torrent_id"].column_indices[0]] == torrent_id
):
# log.debug('marking %s dirty', torrent_id)
row[self.columns["dirty"].column_indices[0]] = True
if torrent_id:
break
def get_selected_torrent(self):
"""Returns a torrent_id or None. If multiple torrents are selected,
it will return the torrent_id of the first one."""
selected = self.get_selected_torrents()
if selected:
return selected[0]
else:
return selected
def get_selected_torrents(self):
"""Returns a list of selected torrents or None"""
torrent_ids = []
try:
paths = self.treeview.get_selection().get_selected_rows()[1]
except AttributeError:
# paths is likely None .. so lets return []
return []
try:
for path in paths:
try:
row = self.treeview.get_model().get_iter(path)
except Exception as ex:
log.debug("Unable to get iter from path: %s", ex)
continue
child_row = self.treeview.get_model().convert_iter_to_child_iter(row)
child_row = (
self.treeview.get_model()
.get_model()
.convert_iter_to_child_iter(child_row)
)
if self.liststore.iter_is_valid(child_row):
try:
value = self.liststore.get_value(
child_row, self.columns["torrent_id"].column_indices[0]
)
except Exception as ex:
log.debug("Unable to get value from row: %s", ex)
else:
torrent_ids.append(value)
if len(torrent_ids) == 0:
return []
return torrent_ids
except (ValueError, TypeError):
return []
def get_torrent_status(self, torrent_id):
"""Returns data stored in self.status, it may not be complete"""
try:
return self.status[torrent_id]
except KeyError:
return {}
def get_visible_torrents(self):
return list(self.status)
# Callbacks #
def on_button_press_event(self, widget, event):
"""This is a callback for showing the right-click context menu."""
log.debug("on_button_press_event")
# We only care about right-clicks
if event.button == 3 and event.window == self.treeview.get_bin_window():
x, y = event.get_coords()
path = self.treeview.get_path_at_pos(int(x), int(y))
if not path:
return
row = self.model_filter.get_iter(path[0])
if self.get_selected_torrents():
if (
self.model_filter.get_value(
row, self.columns["torrent_id"].column_indices[0]
)
not in self.get_selected_torrents()
):
self.treeview.get_selection().unselect_all()
self.treeview.get_selection().select_iter(row)
else:
self.treeview.get_selection().select_iter(row)
torrentmenu = component.get("MenuBar").torrentmenu
torrentmenu.popup(None, None, None, None, event.button, event.time)
return True
def on_selection_changed(self, treeselection):
"""This callback is know when the selection has changed."""
log.debug("on_selection_changed")
component.get("TorrentDetails").update()
component.get("MenuBar").update_menu()
def on_drag_drop(self, widget, drag_context, x, y, timestamp):
widget.stop_emission("drag-drop")
def on_drag_data_received(
self, widget, drag_context, x, y, selection_data, info, timestamp
):
widget.stop_emission("drag_data_received")
def on_columns_changed_event(self, treeview):
log.debug("Treeview Columns Changed")
self.save_state()
def on_torrentadded_event(self, torrent_id, from_state):
self.add_rows([torrent_id])
self.update(select_row=True)
def on_torrentremoved_event(self, torrent_id):
self.remove_row(torrent_id)
def on_torrentstatechanged_event(self, torrent_id, state):
# Update the torrents state
for row in self.liststore:
if torrent_id != row[self.columns["torrent_id"].column_indices[0]]:
continue
for name in self.columns_to_update:
if not self.columns[name].status_field:
continue
for idx, status_field in enumerate(self.columns[name].status_field):
# Update all columns that use the state field to current state
if status_field != "state":
continue
row[self.get_column_index(name)[idx]] = state
if self.filter.get("state", None) is not None:
# We have a filter set, let's see if theres anything to hide
# and remove from status
if (
torrent_id in self.status
and self.status[torrent_id]["state"] != state
):
row[self.columns["filter"].column_indices[0]] = False
del self.status[torrent_id]
self.mark_dirty(torrent_id)
def on_sessionpaused_event(self):
self.mark_dirty()
self.update()
def on_sessionresumed_event(self):
self.mark_dirty()
self.update(select_row=True)
def on_torrentqueuechanged_event(self):
self.mark_dirty()
self.update()
# Handle keyboard shortcuts
def on_key_press_event(self, widget, event):
keyname = keyval_name(event.keyval)
if keyname is not None:
func = getattr(self, "keypress_" + keyname.lower(), None)
if func:
return func(event)
def keypress_up(self, event):
"""Handle any Up arrow keypresses"""
log.debug("keypress_up")
torrents = self.get_selected_torrents()
if not torrents:
return
# Move queue position up with Ctrl+Alt or Ctrl+Alt+Shift
if event.get_state() & CTRL_ALT_MASK:
if event.get_state() & ModifierType.SHIFT_MASK:
client.core.queue_top(torrents)
else:
client.core.queue_up(torrents)
def keypress_down(self, event):
"""Handle any Down arrow keypresses"""
log.debug("keypress_down")
torrents = self.get_selected_torrents()
if not torrents:
return
# Move queue position down with Ctrl+Alt or Ctrl+Alt+Shift
if event.get_state() & CTRL_ALT_MASK:
if event.get_state() & ModifierType.SHIFT_MASK:
client.core.queue_bottom(torrents)
else:
client.core.queue_down(torrents)
def keypress_delete(self, event):
log.debug("keypress_delete")
torrents = self.get_selected_torrents()
if torrents:
if event.get_state() & ModifierType.SHIFT_MASK:
RemoveTorrentDialog(torrents, delete_files=True).run()
else:
RemoveTorrentDialog(torrents).run()
def keypress_menu(self, event):
log.debug("keypress_menu")
if not self.get_selected_torrent():
return
torrentmenu = component.get("MenuBar").torrentmenu
torrentmenu.popup(None, None, None, None, 3, event.time)
return True
|
picard | album | # -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2004 Robert Kaye
# Copyright (C) 2006-2009, 2011-2012, 2014 Lukáš Lalinský
# Copyright (C) 2008 Gary van der Merwe
# Copyright (C) 2008 Hendrik van Antwerpen
# Copyright (C) 2008 ojnkpjg
# Copyright (C) 2008-2011, 2014, 2018-2023 Philipp Wolfer
# Copyright (C) 2009 Nikolai Prokoschenko
# Copyright (C) 2011-2012 Chad Wilson
# Copyright (C) 2011-2013, 2019 Michael Wiencek
# Copyright (C) 2012-2013, 2016-2017 Wieland Hoffmann
# Copyright (C) 2013, 2018 Calvin Walton
# Copyright (C) 2013-2015, 2017 Sophist-UK
# Copyright (C) 2013-2015, 2017-2022 Laurent Monin
# Copyright (C) 2016 Suhas
# Copyright (C) 2016-2018 Sambhav Kothari
# Copyright (C) 2017 Antonio Larrosa
# Copyright (C) 2018 Vishal Choudhary
# Copyright (C) 2019 Joel Lintunen
# Copyright (C) 2020-2021 Gabriel Ferreira
# Copyright (C) 2021 Petit Minion
# Copyright (C) 2022 skelly37
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import traceback
from collections import OrderedDict, defaultdict, namedtuple
from enum import IntEnum
from picard import log
from picard.cluster import Cluster
from picard.collection import add_release_to_user_collections
from picard.config import get_config
from picard.const import VARIOUS_ARTISTS_ID
from picard.dataobj import DataObject
from picard.file import File
from picard.mbjson import (
medium_to_metadata,
release_group_to_metadata,
release_to_metadata,
track_to_metadata,
)
from picard.metadata import (
Metadata,
run_album_metadata_processors,
run_track_metadata_processors,
)
from picard.plugin import PluginFunctions, PluginPriority
from picard.script import ScriptError, ScriptParser, enabled_tagger_scripts_texts
from picard.track import Track
from picard.ui.item import Item
from picard.util import find_best_match, format_time, mbid_validate
from picard.util.imagelist import (
add_metadata_images,
remove_metadata_images,
update_metadata_images,
)
from picard.util.textencoding import asciipunct
from PyQt5 import QtCore, QtNetwork
RECORDING_QUERY_LIMIT = 100
def _create_artist_node_dict(source_node):
return {x["artist"]["id"]: x["artist"] for x in source_node["artist-credit"]}
def _copy_artist_nodes(source, target_node):
for credit in target_node["artist-credit"]:
artist_node = source.get(credit["artist"]["id"])
if artist_node:
credit["artist"] = artist_node
class AlbumArtist(DataObject):
def __init__(self, album_artist_id):
super().__init__(album_artist_id)
class AlbumStatus(IntEnum):
NONE = 0
LOADING = 1
ERROR = 2
LOADED = 3
class ParseResult(IntEnum):
PARSED = 0
REDIRECT = 1
MISSING_TRACK_RELS = 2
class Album(DataObject, Item):
metadata_images_changed = QtCore.pyqtSignal()
def __init__(self, album_id, discid=None):
DataObject.__init__(self, album_id)
self.metadata = Metadata()
self.orig_metadata = Metadata()
self.tracks = []
self.loaded = False
self.load_task = None
self.release_group = None
self._files_count = 0
self._requests = 0
self._tracks_loaded = False
self._discids = set()
self._recordings_map = {}
if discid:
self._discids.add(discid)
self._after_load_callbacks = []
self.unmatched_files = Cluster(
_("Unmatched Files"), special=True, related_album=self, hide_if_empty=True
)
self.unmatched_files.metadata_images_changed.connect(
self.update_metadata_images
)
self.status = AlbumStatus.NONE
self._album_artists = []
self.update_metadata_images_enabled = True
def __repr__(self):
return "<Album %s %r>" % (self.id, self.metadata["album"])
def iterfiles(self, save=False):
for track in self.tracks:
yield from track.iterfiles()
if not save:
yield from self.unmatched_files.iterfiles()
def iter_correctly_matched_tracks(self):
yield from (track for track in self.tracks if track.num_linked_files == 1)
def enable_update_metadata_images(self, enabled):
self.update_metadata_images_enabled = enabled
def append_album_artist(self, album_artist_id):
"""Append artist id to the list of album artists
and return an AlbumArtist instance"""
album_artist = AlbumArtist(album_artist_id)
self._album_artists.append(album_artist)
return album_artist
def add_discid(self, discid):
if not discid:
return
self._discids.add(discid)
for track in self.tracks:
medium_discids = track.metadata.getall("~musicbrainz_discids")
track_discids = list(self._discids.intersection(medium_discids))
if track_discids:
track.metadata["musicbrainz_discid"] = track_discids
track.update()
for file in track.files:
file.metadata["musicbrainz_discid"] = track_discids
file.update()
def get_next_track(self, track):
try:
index = self.tracks.index(track)
return self.tracks[index + 1]
except (IndexError, ValueError):
return None
def get_album_artists(self):
"""Returns the list of album artists (as AlbumArtist objects)"""
return self._album_artists
def _run_album_metadata_processors(self):
try:
run_album_metadata_processors(self, self._new_metadata, self._release_node)
except BaseException:
self.error_append(traceback.format_exc())
def _parse_release(self, release_node):
log.debug("Loading release %r …", self.id)
self._tracks_loaded = False
release_id = release_node["id"]
if release_id != self.id:
self.tagger.mbid_redirects[self.id] = release_id
album = self.tagger.albums.get(release_id)
if album:
log.debug("Release %r already loaded", release_id)
album.match_files(self.unmatched_files.files)
album.update()
self.tagger.remove_album(self)
return ParseResult.REDIRECT
else:
del self.tagger.albums[self.id]
self.tagger.albums[release_id] = self
self.id = release_id
self._release_node = release_node
# Make the release artist nodes available, since they may
# contain supplementary data (aliases, tags, genres, ratings)
# which aren't present in the release group, track, or
# recording artist nodes. We can copy them into those places
# wherever the IDs match, so that the data is shared and
# available for use in mbjson.py and external plugins.
self._release_artist_nodes = _create_artist_node_dict(release_node)
# Get release metadata
m = self._new_metadata
m.length = 0
rg_node = release_node["release-group"]
rg = self.release_group = self.tagger.get_release_group_by_id(rg_node["id"])
rg.loaded_albums.add(self.id)
rg.refcount += 1
_copy_artist_nodes(self._release_artist_nodes, rg_node)
release_group_to_metadata(rg_node, rg.metadata, rg)
m.copy(rg.metadata)
release_to_metadata(release_node, m, album=self)
config = get_config()
# Custom VA name
if m["musicbrainz_albumartistid"] == VARIOUS_ARTISTS_ID:
m["albumartistsort"] = m["albumartist"] = config.setting["va_name"]
# Convert Unicode punctuation
if config.setting["convert_punctuation"]:
m.apply_func(asciipunct)
m["totaldiscs"] = len(release_node["media"])
# Add album to collections
add_release_to_user_collections(release_node)
if config.setting["track_ars"]:
# Detect if track relationships did not get loaded
try:
for medium_node in release_node["media"]:
if medium_node["track-count"]:
if "relations" in medium_node["tracks"][0]["recording"]:
return ParseResult.PARSED
else:
return ParseResult.MISSING_TRACK_RELS
except KeyError:
pass
return ParseResult.PARSED
def _release_request_finished(self, document, http, error):
if self.load_task is None:
return
self.load_task = None
parse_result = None
try:
if error:
self.error_append(http.errorString())
# Fix for broken NAT releases
if error == QtNetwork.QNetworkReply.NetworkError.ContentNotFoundError:
config = get_config()
nats = False
nat_name = config.setting["nat_name"]
files = list(self.unmatched_files.files)
for file in files:
recordingid = file.metadata["musicbrainz_recordingid"]
if (
mbid_validate(recordingid)
and file.metadata["album"] == nat_name
):
nats = True
self.tagger.move_file_to_nat(file, recordingid)
self.tagger.nats.update()
if nats and not self.get_num_unmatched_files():
self.tagger.remove_album(self)
error = False
else:
try:
parse_result = self._parse_release(document)
config = get_config()
if parse_result == ParseResult.MISSING_TRACK_RELS:
log.debug(
"Recording relationships not loaded in initial request for %r, issuing separate requests",
self,
)
self._request_recording_relationships()
elif parse_result == ParseResult.PARSED:
self._run_album_metadata_processors()
elif parse_result == ParseResult.REDIRECT:
error = False
except Exception:
error = True
self.error_append(traceback.format_exc())
finally:
self._requests -= 1
if parse_result == ParseResult.PARSED or error:
self._finalize_loading(error)
def _request_recording_relationships(self, offset=0, limit=RECORDING_QUERY_LIMIT):
inc = (
"artist-rels",
"recording-rels",
"release-rels",
"url-rels",
"work-rels",
"work-level-rels",
)
log.debug(
"Loading recording relationships for %r (offset=%i, limit=%i)",
self,
offset,
limit,
)
self._requests += 1
self.load_task = self.tagger.mb_api.browse_recordings(
self._recordings_request_finished,
inc=inc,
release=self.id,
limit=limit,
offset=offset,
)
def _recordings_request_finished(self, document, http, error):
if error:
self.error_append(http.errorString())
self._requests -= 1
self._finalize_loading(error)
else:
for recording in document.get("recordings", []):
recording_id = recording.get("id")
if recording_id:
self._recordings_map[recording_id] = recording
count = document.get("recording-count", 0)
offset = document.get("recording-offset", 0)
next_offset = offset + RECORDING_QUERY_LIMIT
if next_offset < count:
self._requests -= 1
self._request_recording_relationships(offset=next_offset)
else:
# Merge separately loaded recording relationships into release node
self._merge_release_recording_relationships()
self._run_album_metadata_processors()
self._requests -= 1
self._finalize_loading(error)
def _merge_recording_relationships(self, track_node):
if "relations" not in track_node["recording"]:
recording = self._recordings_map.get(track_node["recording"]["id"])
if recording:
track_node["recording"]["relations"] = recording.get("relations", [])
def _merge_release_recording_relationships(self):
for medium_node in self._release_node["media"]:
pregap_node = medium_node.get("pregap")
if pregap_node:
self._merge_recording_relationships(pregap_node)
for track_node in medium_node.get("tracks", []):
self._merge_recording_relationships(track_node)
for track_node in medium_node.get("data-tracks", []):
self._merge_recording_relationships(track_node)
self._recordings_map = {}
def _finalize_loading_track(
self, track_node, metadata, artists, extra_metadata=None
):
# As noted in `_parse_release` above, the release artist nodes
# may contain supplementary data that isn't present in track
# artist nodes. Similarly, the track artists may contain
# information which the recording artists don't. Copy this
# information across to wherever the artist IDs match.
_copy_artist_nodes(self._release_artist_nodes, track_node)
_copy_artist_nodes(self._release_artist_nodes, track_node["recording"])
_copy_artist_nodes(
_create_artist_node_dict(track_node), track_node["recording"]
)
track = Track(track_node["recording"]["id"], self)
self._new_tracks.append(track)
# Get track metadata
tm = track.metadata
tm.copy(metadata)
track_to_metadata(track_node, track)
track._customize_metadata()
self._new_metadata.length += tm.length
artists.add(tm["artist"])
if extra_metadata:
tm.update(extra_metadata)
# Run track metadata plugins
try:
run_track_metadata_processors(self, tm, track_node, self._release_node)
except BaseException:
self.error_append(traceback.format_exc())
return track
def _load_tracks(self):
artists = set()
all_media = []
absolutetracknumber = 0
def _load_track(node, mm, artists, extra_metadata):
nonlocal absolutetracknumber
absolutetracknumber += 1
extra_metadata["~absolutetracknumber"] = absolutetracknumber
self._finalize_loading_track(node, mm, artists, extra_metadata)
va = self._new_metadata["musicbrainz_albumartistid"] == VARIOUS_ARTISTS_ID
djmix_ars = {}
if hasattr(self._new_metadata, "_djmix_ars"):
djmix_ars = self._new_metadata._djmix_ars
for medium_node in self._release_node["media"]:
mm = Metadata()
mm.copy(self._new_metadata)
medium_to_metadata(medium_node, mm)
fmt = medium_node.get("format")
if fmt:
all_media.append(fmt)
for dj in djmix_ars.get(mm["discnumber"], []):
mm.add("djmixer", dj)
if va:
mm["compilation"] = "1"
else:
del mm["compilation"]
if "discs" in medium_node:
discids = [disc.get("id") for disc in medium_node["discs"]]
mm["~musicbrainz_discids"] = discids
mm["musicbrainz_discid"] = list(self._discids.intersection(discids))
pregap_node = medium_node.get("pregap")
if pregap_node:
mm["~discpregap"] = "1"
_load_track(pregap_node, mm, artists, {"~pregap": "1"})
for track_node in medium_node.get("tracks", []):
_load_track(track_node, mm, artists, {})
for track_node in medium_node.get("data-tracks", []):
_load_track(track_node, mm, artists, {"~datatrack": "1"})
totalalbumtracks = absolutetracknumber
self._new_metadata["~totalalbumtracks"] = totalalbumtracks
# Generate a list of unique media, but keep order of first appearance
self._new_metadata["media"] = " / ".join(list(OrderedDict.fromkeys(all_media)))
multiartists = len(artists) > 1
for track in self._new_tracks:
track.metadata["~totalalbumtracks"] = totalalbumtracks
if multiartists:
track.metadata["~multiartist"] = "1"
del self._release_node
del self._release_artist_nodes
self._tracks_loaded = True
def _finalize_loading_album(self):
self.enable_update_metadata_images(False)
for track in self._new_tracks:
track.orig_metadata.copy(track.metadata)
track.metadata_images_changed.connect(self.update_metadata_images)
# Prepare parser for user's script
for s_name, s_text in enabled_tagger_scripts_texts():
parser = ScriptParser()
for track in self._new_tracks:
# Run tagger script for each track
try:
parser.eval(s_text, track.metadata)
except ScriptError:
log.exception("Failed to run tagger script %s on track", s_name)
track.metadata.strip_whitespace()
track.scripted_metadata.update(track.metadata)
# Run tagger script for the album itself
try:
parser.eval(s_text, self._new_metadata)
except ScriptError:
log.exception("Failed to run tagger script %s on album", s_name)
self._new_metadata.strip_whitespace()
unmatched_files = [file for track in self.tracks for file in track.files]
self.metadata = self._new_metadata
self.orig_metadata.copy(self.metadata)
self.orig_metadata.images.clear()
self.tracks = self._new_tracks
del self._new_metadata
del self._new_tracks
self.loaded = True
self.status = AlbumStatus.LOADED
self.match_files(unmatched_files + self.unmatched_files.files)
self.enable_update_metadata_images(True)
self.update_metadata_images()
self.update()
self.tagger.window.set_statusbar_message(
N_("Album %(id)s loaded: %(artist)s - %(album)s"),
{
"id": self.id,
"artist": self.metadata["albumartist"],
"album": self.metadata["album"],
},
timeout=3000,
)
for func, always in self._after_load_callbacks:
func()
self._after_load_callbacks = []
if self.item.isSelected():
self.tagger.window.refresh_metadatabox()
self.tagger.window.cover_art_box.update_metadata()
def _finalize_loading(self, error):
if self.loaded:
# This is not supposed to happen, _finalize_loading should only
# be called once after all requests finished.
import inspect
stack = inspect.stack()
args = [self]
msg = "Album._finalize_loading called for already loaded album %r"
if len(stack) > 1:
f = stack[1]
msg += " at %s:%d in %s"
args.extend((f.filename, f.lineno, f.function))
log.warning(msg, *args)
return
if error:
self.metadata.clear()
self.status = AlbumStatus.ERROR
self.update()
if not self._requests:
del self._new_metadata
del self._new_tracks
self.loaded = True
for func, always in self._after_load_callbacks:
if always:
func()
return
if self._requests > 0:
return
if not self._tracks_loaded:
self._load_tracks()
if not self._requests:
self._finalize_loading_album()
def load(self, priority=False, refresh=False):
if self._requests:
log.info("Not reloading, some requests are still active.")
return
self.tagger.window.set_statusbar_message(
N_("Loading album %(id)s …"), {"id": self.id}
)
self.loaded = False
self.status = AlbumStatus.LOADING
if self.release_group:
self.release_group.loaded = False
self.release_group.genres.clear()
self.metadata.clear()
self.genres.clear()
self.update(update_selection=False)
self._new_metadata = Metadata()
self._new_tracks = []
self._requests = 1
self.clear_errors()
config = get_config()
require_authentication = False
inc = {
"aliases",
"annotation",
"artist-credits",
"artists",
"collections",
"discids",
"isrcs",
"labels",
"media",
"recordings",
"release-groups",
}
if self.tagger.webservice.oauth_manager.is_authorized():
require_authentication = True
inc |= {"user-collections"}
if config.setting["release_ars"] or config.setting["track_ars"]:
inc |= {
"artist-rels",
"recording-rels",
"release-group-level-rels",
"release-rels",
"series-rels",
"url-rels",
"work-rels",
}
if config.setting["track_ars"]:
inc |= {
"recording-level-rels",
"work-level-rels",
}
require_authentication = (
self.set_genre_inc_params(inc, config) or require_authentication
)
if config.setting["enable_ratings"]:
require_authentication = True
inc |= {"user-ratings"}
self.load_task = self.tagger.mb_api.get_release_by_id(
self.id,
self._release_request_finished,
inc=inc,
mblogin=require_authentication,
priority=priority,
refresh=refresh,
)
def run_when_loaded(self, func, always=False):
if self.loaded:
func()
else:
self._after_load_callbacks.append((func, always))
def stop_loading(self):
if self.load_task:
self.tagger.webservice.remove_task(self.load_task)
self.load_task = None
def update(self, update_tracks=True, update_selection=True):
if self.item:
self.item.update(update_tracks, update_selection=update_selection)
def add_file(self, track, file, new_album=True):
self._files_count += 1
if new_album:
self.update(update_tracks=False)
add_metadata_images(self, [file])
def remove_file(self, track, file, new_album=True):
self._files_count -= 1
if new_album:
self.update(update_tracks=False)
remove_metadata_images(self, [file])
@staticmethod
def _match_files(files, tracks, unmatched_files, threshold=0):
"""Match files to tracks on this album, based on metadata similarity or recordingid."""
tracks_cache = defaultdict(lambda: None)
def build_tracks_cache():
for track in tracks:
tm_recordingid = track.orig_metadata["musicbrainz_recordingid"]
tm_trackid = track.orig_metadata["musicbrainz_trackid"]
tm_tracknumber = track.orig_metadata["tracknumber"]
tm_discnumber = track.orig_metadata["discnumber"]
for tup in (
(tm_recordingid, tm_tracknumber, tm_discnumber),
(tm_recordingid, tm_tracknumber),
(tm_recordingid,),
(tm_trackid, tm_tracknumber, tm_discnumber),
(tm_trackid, tm_tracknumber),
(tm_trackid,),
):
tracks_cache[tup] = track
SimMatchAlbum = namedtuple("SimMatchAlbum", "similarity track")
no_match = SimMatchAlbum(similarity=-1, track=unmatched_files)
for file in list(files):
if file.state == File.REMOVED:
continue
# if we have a recordingid or trackid to match against, use that in priority
# if recordingid and trackid do point to different tracks, compare the file
# and track durations to find the better match.
recid = file.match_recordingid or file.metadata["musicbrainz_recordingid"]
trackid = file.metadata["musicbrainz_trackid"]
tracknumber = file.metadata["tracknumber"]
discnumber = file.metadata["discnumber"]
def mbid_candidates():
for mbid in (recid, trackid):
if mbid and mbid_validate(mbid):
if not tracks_cache:
build_tracks_cache()
track = (
tracks_cache[(mbid, tracknumber, discnumber)]
or tracks_cache[(mbid, tracknumber)]
or tracks_cache[(mbid,)]
)
if track:
similarity = track.metadata.length_score(
track.metadata.length, file.metadata.length
)
yield SimMatchAlbum(similarity=similarity, track=track)
best_match = find_best_match(mbid_candidates(), no_match)
if best_match.result != no_match:
yield (file, best_match.result.track)
continue
# try to match by similarity
def similarity_candidates():
for track in tracks:
similarity = track.metadata.compare(file.orig_metadata)
if similarity >= threshold:
yield SimMatchAlbum(similarity=similarity, track=track)
best_match = find_best_match(similarity_candidates(), no_match)
yield (file, best_match.result.track)
def match_files(self, files):
"""Match and move files to tracks on this album, based on metadata similarity or recordingid."""
if self.loaded:
config = get_config()
threshold = config.setting["track_matching_threshold"]
moves = self._match_files(
files, self.tracks, self.unmatched_files, threshold=threshold
)
with self.tagger.window.metadata_box.ignore_updates:
for file, target in moves:
file.move(target)
else:
with self.tagger.window.metadata_box.ignore_updates:
for file in list(files):
file.move(self.unmatched_files)
def can_save(self):
return self._files_count > 0
def can_remove(self):
return True
def can_edit_tags(self):
return True
def can_analyze(self):
return False
def can_autotag(self):
return False
def can_refresh(self):
return True
def can_view_info(self):
return self.loaded or bool(self.errors)
def is_album_like(self):
return True
def get_num_matched_tracks(self):
return sum(1 for track in self.tracks if track.is_linked())
def get_num_unmatched_files(self):
return len(self.unmatched_files.files)
def get_num_total_files(self):
return self._files_count + len(self.unmatched_files.files)
def is_complete(self):
if not self.tracks:
return False
for track in self.tracks:
if not track.is_complete():
return False
return not self.get_num_unmatched_files()
def is_modified(self):
return any(self._iter_unsaved_files())
def get_num_unsaved_files(self):
return sum(1 for file in self._iter_unsaved_files())
def _iter_unsaved_files(self):
yield from (file for file in self.iterfiles(save=True) if not file.is_saved())
def column(self, column):
if column == "title":
if self.status == AlbumStatus.LOADING:
title = _("[loading album information]")
elif self.status == AlbumStatus.ERROR:
title = _("[could not load album %s]") % self.id
else:
title = self.metadata["album"]
if self.tracks:
elems = ["%d/%d" % (self.get_num_matched_tracks(), len(self.tracks))]
unmatched = self.get_num_unmatched_files()
if unmatched:
elems.append("%d?" % (unmatched,))
unsaved = self.get_num_unsaved_files()
if unsaved:
elems.append("%d*" % (unsaved,))
ca_detailed = self.cover_art_description_detailed()
if ca_detailed:
elems.append(ca_detailed)
return "%s\u200E (%s)" % (title, "; ".join(elems))
else:
return title
elif column == "~length":
length = self.metadata.length
if length:
return format_time(length)
else:
return ""
elif column == "artist":
return self.metadata["albumartist"]
elif column == "tracknumber":
return self.metadata["~totalalbumtracks"]
elif column == "discnumber":
return self.metadata["totaldiscs"]
elif column == "covercount":
return self.cover_art_description()
else:
return self.metadata[column]
def switch_release_version(self, mbid):
if mbid == self.id:
return
for file in list(self.iterfiles(True)):
file.move(self.unmatched_files)
album = self.tagger.albums.get(mbid)
if album:
album.match_files(self.unmatched_files.files)
album.update()
self.tagger.remove_album(self)
else:
del self.tagger.albums[self.id]
self.release_group.loaded_albums.discard(self.id)
self.id = mbid
self.tagger.albums[mbid] = self
self.load(priority=True, refresh=True)
def update_metadata_images(self):
if not self.update_metadata_images_enabled:
return
if update_metadata_images(self):
self.update(False)
self.metadata_images_changed.emit()
def keep_original_images(self):
self.enable_update_metadata_images(False)
for track in self.tracks:
track.keep_original_images()
for file in list(self.unmatched_files.files):
file.keep_original_images()
self.enable_update_metadata_images(True)
self.update_metadata_images()
class NatAlbum(Album):
def __init__(self):
super().__init__("NATS")
self.loaded = True
self.update()
def update(self, update_tracks=True, update_selection=True):
config = get_config()
self.enable_update_metadata_images(False)
old_album_title = self.metadata["album"]
self.metadata["album"] = config.setting["nat_name"]
for track in self.tracks:
if old_album_title == track.metadata["album"]:
track.metadata["album"] = self.metadata["album"]
for file in track.files:
track.update_file_metadata(file)
self.enable_update_metadata_images(True)
super().update(update_tracks, update_selection)
def _finalize_loading(self, error):
self.update()
def can_refresh(self):
return False
def can_browser_lookup(self):
return False
def can_view_info(self):
return False
_album_post_removal_processors = PluginFunctions(label="album_post_removal_processors")
def register_album_post_removal_processor(function, priority=PluginPriority.NORMAL):
"""Registers an album-removed processor.
Args:
function: function to call after album removal, it will be passed the album object
priority: optional, PluginPriority.NORMAL by default
Returns:
None
"""
_album_post_removal_processors.register(function.__module__, function, priority)
def run_album_post_removal_processors(album_object):
_album_post_removal_processors.run(album_object)
|
options | tags_compatibility_wave | # -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2006 Lukáš Lalinský
# Copyright (C) 2019-2021 Philipp Wolfer
# Copyright (C) 2021 Laurent Monin
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from picard.config import BoolOption, TextOption, get_config
from picard.formats.wav import WAVFile
from picard.ui.options import OptionsPage, register_options_page
from picard.ui.ui_options_tags_compatibility_wave import Ui_TagsCompatibilityOptionsPage
class TagsCompatibilityWaveOptionsPage(OptionsPage):
NAME = "tags_compatibility_wave"
TITLE = N_("WAVE")
PARENT = "tags"
SORT_ORDER = 60
ACTIVE = True
HELP_URL = "/config/options_tags_compatibility_wave.html"
options = [
BoolOption("setting", "write_wave_riff_info", True),
BoolOption("setting", "remove_wave_riff_info", False),
TextOption("setting", "wave_riff_info_encoding", "windows-1252"),
]
def __init__(self, parent=None):
super().__init__(parent)
self.ui = Ui_TagsCompatibilityOptionsPage()
self.ui.setupUi(self)
def load(self):
config = get_config()
self.ui.write_wave_riff_info.setChecked(config.setting["write_wave_riff_info"])
self.ui.remove_wave_riff_info.setChecked(
config.setting["remove_wave_riff_info"]
)
if config.setting["wave_riff_info_encoding"] == "utf-8":
self.ui.wave_riff_info_enc_utf8.setChecked(True)
else:
self.ui.wave_riff_info_enc_cp1252.setChecked(True)
def save(self):
config = get_config()
config.setting[
"write_wave_riff_info"
] = self.ui.write_wave_riff_info.isChecked()
config.setting[
"remove_wave_riff_info"
] = self.ui.remove_wave_riff_info.isChecked()
if self.ui.wave_riff_info_enc_utf8.isChecked():
config.setting["wave_riff_info_encoding"] = "utf-8"
else:
config.setting["wave_riff_info_encoding"] = "windows-1252"
if WAVFile.supports_tag("artist"):
register_options_page(TagsCompatibilityWaveOptionsPage)
|
gadgets | drag | # This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2011 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
Event-filtering objects and helper functions to drag things.
"""
from PyQt5.QtCore import QEvent, QFileInfo, QMimeData, QObject, Qt, QUrl
from PyQt5.QtGui import QDrag
from PyQt5.QtWidgets import QApplication, QFileIconProvider
class ComboDrag(QObject):
"""Enables dragging from a QComboBox.
Instantiate this with a QComboBox as parent to enable dragging the
current item.
By default, drags a filename got from the current index under the
Qt.EditRole. Change the role by changing the 'role' instance attribute.
"""
column = 0
role = Qt.EditRole
def __init__(self, combobox):
super().__init__(combobox)
self._dragpos = None
combobox.installEventFilter(self)
def eventFilter(self, combobox, ev):
if ev.type() == QEvent.MouseButtonPress and ev.button() == Qt.LeftButton:
self._dragpos = ev.pos()
return not combobox.isEditable()
elif (
ev.type() == QEvent.MouseMove
and ev.buttons() & Qt.LeftButton
and combobox.count() > 0
):
return self.mouseMoved(combobox, ev.pos()) or False
elif (
ev.type() == QEvent.MouseButtonRelease
and ev.button() == Qt.LeftButton
and not combobox.isEditable()
):
combobox.mousePressEvent(ev)
return False
def mouseMoved(self, combobox, pos):
if (
self._dragpos is not None
and (pos - self._dragpos).manhattanLength()
>= QApplication.startDragDistance()
):
self.startDrag(combobox)
return True
def startDrag(self, combobox):
index = combobox.model().index(combobox.currentIndex(), self.column)
filename = combobox.model().data(index, self.role)
icon = combobox.model().data(index, Qt.DecorationRole)
dragFile(combobox, filename, icon, Qt.CopyAction)
class Dragger(QObject):
"""Drags anything from any widget.
Use dragger.installEventFilter(widget) to have it drag.
"""
def __init__(self, parent=None):
super().__init__(parent)
self._dragpos = None
if parent:
parent.installEventFilter(self)
def eventFilter(self, widget, ev):
if ev.type() == QEvent.MouseButtonPress and ev.button() == Qt.LeftButton:
self._dragpos = ev.pos()
return True
elif ev.type() == QEvent.MouseMove and ev.buttons() & Qt.LeftButton:
return self.mouseMoved(widget, ev.pos()) or False
return False
def mouseMoved(self, widget, pos):
if (
self._dragpos is not None
and (pos - self._dragpos).manhattanLength()
>= QApplication.startDragDistance()
):
self.startDrag(widget)
return True
def startDrag(self, widget):
"""Reimplement to start a drag."""
class FileDragger(Dragger):
def filename(self):
"""Should return the filename to drag."""
def startDrag(self, widget):
filename = self.filename()
if filename:
dragFile(widget, filename)
def dragFile(widget, filename, icon=None, dropactions=Qt.CopyAction):
"""Starts dragging the given local file from the widget."""
if icon is None or icon.isNull():
icon = QFileIconProvider().icon(QFileInfo(filename))
drag = QDrag(widget)
data = QMimeData()
data.setUrls([QUrl.fromLocalFile(filename)])
drag.setMimeData(data)
drag.setPixmap(icon.pixmap(32))
drag.exec_(dropactions)
|
postprocessor | metadatafromtitle | from __future__ import unicode_literals
import re
from .common import PostProcessor
class MetadataFromTitlePP(PostProcessor):
def __init__(self, downloader, titleformat):
super(MetadataFromTitlePP, self).__init__(downloader)
self._titleformat = titleformat
self._titleregex = (
self.format_to_regex(titleformat)
if re.search(r"%\(\w+\)s", titleformat)
else titleformat
)
def format_to_regex(self, fmt):
r"""
Converts a string like
'%(title)s - %(artist)s'
to a regex like
'(?P<title>.+)\ \-\ (?P<artist>.+)'
"""
lastpos = 0
regex = ""
# replace %(..)s with regex group and escape other string parts
for match in re.finditer(r"%\((\w+)\)s", fmt):
regex += re.escape(fmt[lastpos : match.start()])
regex += r"(?P<" + match.group(1) + ">.+)"
lastpos = match.end()
if lastpos < len(fmt):
regex += re.escape(fmt[lastpos:])
return regex
def run(self, info):
title = info["title"]
match = re.match(self._titleregex, title)
if match is None:
self._downloader.to_screen(
'[fromtitle] Could not interpret title of video as "%s"'
% self._titleformat
)
return [], info
for attribute, value in match.groupdict().items():
if value is None:
continue
info[attribute] = value
self._downloader.to_screen(
"[fromtitle] parsed %s: %s"
% (attribute, value if value is not None else "NA")
)
return [], info
|
common | path | # -*- Mode: Python; test-case-name: whipper.test.test_common_path -*-
# vi:si:et:sw=4:sts=4:ts=4
# Copyright (C) 2009 Thomas Vander Stichele
# This file is part of whipper.
#
# whipper is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# whipper is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with whipper. If not, see <http://www.gnu.org/licenses/>.
import re
class PathFilter:
"""Filter path components for safe storage on file systems."""
def __init__(
self, dot=True, posix=True, vfat=False, whitespace=False, printable=False
):
"""
Init PathFilter.
:param dot: whether to strip leading dot
:param posix: whether to strip illegal chars in *nix OSes
:param vfat: whether to strip illegal chars in VFAT filesystems
:param whitespace: whether to strip all whitespace chars
:param printable: whether to strip all non printable ASCII chars
"""
self._dot = dot
self._posix = posix
self._vfat = vfat
self._whitespace = whitespace
self._printable = printable
def filter(self, path):
R_CH = "_"
if self._dot:
if path[:1] == ".": # Slicing tolerant to empty strings
path = R_CH + path[1:]
if self._posix:
path = re.sub(r"[/\x00]", R_CH, path)
if self._vfat:
path = re.sub(r"[\x00-\x1F\x7F\"*/:<>?\\|]", R_CH, path)
if self._whitespace:
path = re.sub(r"\s", R_CH, path)
if self._printable:
path = re.sub(r"[^\x20-\x7E]", R_CH, path)
return path
|
aliceVision | PhotometricStereo | __version__ = "1.0"
from meshroom.core import desc
class PhotometricStereo(desc.CommandLineNode):
commandLine = "aliceVision_photometricStereo {allParams}"
category = "Photometry"
documentation = """
Reconstruction using Photometric Stereo. A normal map is evaluated from several photographs taken from the same point of view, but under different lighting conditions.
The lighting conditions are assumed to be known.
"""
inputs = [
desc.File(
name="inputPath",
label="SfMData",
description="Input SfMData file.",
value="",
uid=[0],
),
desc.File(
name="pathToJSONLightFile",
label="Light File",
description="Path to a JSON file containing the lighting information.\n"
"If empty, .txt files are expected in the image folder.",
value="defaultJSON.txt",
uid=[0],
),
desc.File(
name="maskPath",
label="Mask Folder Path",
description="Path to a folder containing masks or to a mask directly.",
value="",
uid=[0],
),
desc.ChoiceParam(
name="SHOrder",
label="Spherical Harmonics Order",
description="Order of the spherical harmonics:\n"
" - 0: directional.\n"
" - 1: directional + ambiant.\n"
" - 2: second order spherical harmonics.",
values=["0", "1", "2"],
value="0",
exclusive=True,
advanced=True,
uid=[0],
),
desc.BoolParam(
name="removeAmbiant",
label="Remove Ambiant Light",
description="True if the ambiant light is to be removed on the PS images, false otherwise.",
value=False,
advanced=True,
uid=[0],
),
desc.BoolParam(
name="isRobust",
label="Use Robust Algorithm",
description="True to use the robust algorithm, false otherwise.",
value=False,
advanced=True,
uid=[0],
),
desc.IntParam(
name="downscale",
label="Downscale Factor",
description="Downscale factor for faster results.",
value=1,
range=(1, 10, 1),
advanced=True,
uid=[0],
),
desc.ChoiceParam(
name="verboseLevel",
label="Verbose Level",
description="Verbosity level (fatal, error, warning, info, debug, trace).",
value="info",
values=["fatal", "error", "warning", "info", "debug", "trace"],
exclusive=True,
uid=[],
),
]
outputs = [
desc.File(
name="outputPath",
label="Output Folder",
description="Path to the output folder.",
value=desc.Node.internalFolder,
uid=[],
),
desc.File(
name="outputSfmData",
label="SfMData",
description="Output path for the SfMData file.",
value=desc.Node.internalFolder + "/sfmData.sfm",
uid=[],
group="", # remove from command line
),
desc.File(
name="outputSfmDataAlbedo",
label="SfMData Albedo",
description="Output SfMData file containing the albedo information.",
value=desc.Node.internalFolder + "/albedoMaps.sfm",
uid=[],
group="", # remove from command line
),
desc.File(
name="outputSfmDataNormal",
label="SfMData Normal",
description="Output SfMData file containing the normal maps information.",
value=desc.Node.internalFolder + "/normalMaps.sfm",
uid=[],
group="", # remove from command line
),
# these attributes are only here to describe more accurately the output of the node
# by specifying that it generates 2 sequences of images
# (see in Viewer2D.qml how these attributes can be used)
desc.File(
name="normals",
label="Normal Maps Camera",
description="Generated normal maps in the camera coordinate system.",
semantic="image",
value=desc.Node.internalFolder + "<POSE_ID>_normals.exr",
uid=[],
group="", # do not export on the command line
),
desc.File(
name="normalsWorld",
label="Normal Maps World",
description="Generated normal maps in the world coordinate system.",
semantic="image",
value=desc.Node.internalFolder + "<POSE_ID>_normals_w.exr",
uid=[],
group="", # do not export on the command line
),
desc.File(
name="albedo",
label="Albedo Maps",
description="Generated albedo maps.",
semantic="image",
value=desc.Node.internalFolder + "<POSE_ID>_albedo.exr",
uid=[],
group="", # do not export on the command line
),
]
|
PyObjCTest | test_nsprintpanel | from AppKit import *
from PyObjCTools.TestSupport import *
try:
unicode
except NameError:
unicode = str
class TestNSPrintPanel(TestCase):
def testConstants(self):
self.assertEqual(NSPrintPanelShowsCopies, 0x01)
self.assertEqual(NSPrintPanelShowsPageRange, 0x02)
self.assertEqual(NSPrintPanelShowsPaperSize, 0x04)
self.assertEqual(NSPrintPanelShowsOrientation, 0x08)
self.assertEqual(NSPrintPanelShowsScaling, 0x10)
self.assertEqual(NSPrintPanelShowsPageSetupAccessory, 0x100)
self.assertEqual(NSPrintPanelShowsPreview, 0x20000)
self.assertIsInstance(NSPrintPhotoJobStyleHint, unicode)
@min_os_level("10.5")
def testConstants10_5(self):
self.assertIsInstance(NSPrintPanelAccessorySummaryItemNameKey, unicode)
self.assertIsInstance(NSPrintPanelAccessorySummaryItemDescriptionKey, unicode)
def testMethods(self):
self.assertArgIsSEL(
NSPrintPanel.beginSheetWithPrintInfo_modalForWindow_delegate_didEndSelector_contextInfo_,
3,
b"v@:@" + objc._C_NSInteger + b"^v",
)
self.assertArgHasType(
NSPrintPanel.beginSheetWithPrintInfo_modalForWindow_delegate_didEndSelector_contextInfo_,
4,
b"^v",
)
@min_os_level("10.6")
def testConstants10_6(self):
self.assertEqual(NSPrintPanelShowsPrintSelection, 1 << 5)
self.assertIsInstance(NSPrintAllPresetsJobStyleHint, unicode)
self.assertIsInstance(NSPrintNoPresetsJobStyleHint, unicode)
if __name__ == "__main__":
main()
|
dev | gen_versioninfo | #!/usr/bin/env python3
# SPDX-FileCopyrightText: Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# SPDX-License-Identifier: GPL-3.0-or-later
"""Generate file_version_info.txt for Pyinstaller use with Windows builds."""
import os.path
import sys
# pylint: disable=import-error,no-member,useless-suppression
from PyInstaller.utils.win32 import versioninfo as vs
# pylint: enable=import-error,no-member,useless-suppression
sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
import qutebrowser
from scripts import utils
def main():
utils.change_cwd()
out_filename = "misc/file_version_info.txt"
filevers = qutebrowser.__version_info__ + (0,)
prodvers = qutebrowser.__version_info__ + (0,)
str_filevers = qutebrowser.__version__
str_prodvers = qutebrowser.__version__
comment_text = qutebrowser.__doc__
copyright_text = qutebrowser.__copyright__
trademark_text = (
"qutebrowser is free software under the GNU General " "Public License"
)
# https://www.science.co.il/language/Locale-codes.php#definitions
# https://msdn.microsoft.com/en-us/library/windows/desktop/dd317756.aspx
en_us = 1033 # 0x0409
utf_16 = 1200 # 0x04B0
ffi = vs.FixedFileInfo(filevers, prodvers)
kids = [
vs.StringFileInfo(
[
# 0x0409: MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US)
# 0x04B0: codepage 1200 (UTF-16LE)
vs.StringTable(
"040904B0",
[
vs.StringStruct("Comments", comment_text),
vs.StringStruct("CompanyName", "qutebrowser.org"),
vs.StringStruct("FileDescription", "qutebrowser"),
vs.StringStruct("FileVersion", str_filevers),
vs.StringStruct("InternalName", "qutebrowser"),
vs.StringStruct("LegalCopyright", copyright_text),
vs.StringStruct("LegalTrademarks", trademark_text),
vs.StringStruct("OriginalFilename", "qutebrowser.exe"),
vs.StringStruct("ProductName", "qutebrowser"),
vs.StringStruct("ProductVersion", str_prodvers),
],
),
]
),
vs.VarFileInfo([vs.VarStruct("Translation", [en_us, utf_16])]),
]
file_version_info = vs.VSVersionInfo(ffi, kids)
with open(out_filename, "w", encoding="utf-8") as f:
f.write(str(file_version_info))
if __name__ == "__main__":
main()
|
StartPage | OpenSettings | # ***************************************************************************
# * Copyright (c) 2020 Yorik van Havre <yorik@uncreated.net> *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
import FreeCADGui
FreeCADGui.showPreferences("Start")
|
migrations | 0009_matching_gui | from django.db import migrations
def data_migration(apps, schema_editor):
StandardTaskConfig = apps.get_model("main", "StandardTaskConfig")
TaskConfig = apps.get_model("main", "TaskConfig")
MicroServiceChainLink = apps.get_model("main", "MicroServiceChainLink")
MicroServiceChainLinkExitCode = apps.get_model(
"main", "MicroServiceChainLinkExitCode"
)
StandardTaskConfig.objects.create(
pk="cbe200ab-a634-4902-a0e6-8ed1858538d4",
requires_output_lock=False,
execute="dipGenerationHelper",
arguments='--sipUUID "%SIPUUID%" --sipPath "%SIPDirectory%"',
)
TaskConfig.objects.create(
pk="5e0ac12e-6ce7-4d11-bd75-e14167210df4",
tasktype_id="36b2e239-4a57-4aa5-8ebc-7a29139baca6",
tasktypepkreference="cbe200ab-a634-4902-a0e6-8ed1858538d4",
description="Pre-processing for DIP generation",
)
MicroServiceChainLink.objects.create(
pk="5749c11f-ed08-4965-8d8e-1473b1016073",
microservicegroup="Prepare DIP",
defaultexitmessage="Failed",
currenttask_id="5e0ac12e-6ce7-4d11-bd75-e14167210df4",
defaultnextchainlink_id="7d728c39-395f-4892-8193-92f086c0546f",
)
MicroServiceChainLinkExitCode.objects.create(
id="4447a11c-5c3b-4092-91fa-de613317cc82",
microservicechainlink_id="5749c11f-ed08-4965-8d8e-1473b1016073",
exitcode=0,
nextmicroservicechainlink_id="61a8de9c-7b25-4f0f-b218-ad4dde261eed",
exitmessage="Completed successfully",
)
MicroServiceChainLinkExitCode.objects.filter(
microservicechainlink_id="6ee25a55-7c08-4c9a-a114-c200a37146c4"
).update(nextmicroservicechainlink_id="5749c11f-ed08-4965-8d8e-1473b1016073")
class Migration(migrations.Migration):
dependencies = [("main", "0008_fpcommandoutput")]
operations = [migrations.RunPython(data_migration)]
|
OAuth2 | Models | # Copyright (c) 2021 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
from copy import deepcopy
from typing import Any, Dict, List, Optional, Union
from cura.OAuth2.KeyringAttribute import KeyringAttribute
class BaseModel:
def __init__(self, **kwargs: Any) -> None:
self.__dict__.update(kwargs)
class OAuth2Settings(BaseModel):
"""OAuth OAuth2Settings data template."""
CALLBACK_PORT = None # type: Optional[int]
OAUTH_SERVER_URL = None # type: Optional[str]
CLIENT_ID = None # type: Optional[str]
CLIENT_SCOPES = None # type: Optional[str]
CALLBACK_URL = None # type: Optional[str]
AUTH_DATA_PREFERENCE_KEY = "" # type: str
AUTH_SUCCESS_REDIRECT = "https://ultimaker.com" # type: str
AUTH_FAILED_REDIRECT = "https://ultimaker.com" # type: str
class UserProfile(BaseModel):
"""User profile data template."""
user_id = None # type: Optional[str]
username = None # type: Optional[str]
profile_image_url = None # type: Optional[str]
organization_id = None # type: Optional[str]
subscriptions = None # type: Optional[List[Dict[str, Any]]]
class AuthenticationResponse(BaseModel):
"""Authentication data template."""
# Data comes from the token response with success flag and error message added.
success = True # type: bool
token_type = None # type: Optional[str]
expires_in = None # type: Optional[str]
scope = None # type: Optional[str]
err_message = None # type: Optional[str]
received_at = None # type: Optional[str]
access_token = KeyringAttribute()
refresh_token = KeyringAttribute()
def __init__(self, **kwargs: Any) -> None:
self.access_token = kwargs.pop("access_token", None)
self.refresh_token = kwargs.pop("refresh_token", None)
super(AuthenticationResponse, self).__init__(**kwargs)
def dump(self) -> Dict[str, Union[bool, Optional[str]]]:
"""
Dumps the dictionary of Authentication attributes. KeyringAttributes are transformed to public attributes
If the keyring was used, these will have a None value, otherwise they will have the secret value
:return: Dictionary of Authentication attributes
"""
dumped = deepcopy(vars(self))
dumped["access_token"] = dumped.pop("_access_token")
dumped["refresh_token"] = dumped.pop("_refresh_token")
return dumped
class ResponseStatus(BaseModel):
"""Response status template."""
code = 200 # type: int
message = "" # type: str
class ResponseData(BaseModel):
"""Response data template."""
status = None # type: ResponseStatus
data_stream = None # type: Optional[bytes]
redirect_uri = None # type: Optional[str]
content_type = "text/html" # type: str
HTTP_STATUS = {
"""Possible HTTP responses.""" "OK": ResponseStatus(code=200, message="OK"),
"NOT_FOUND": ResponseStatus(code=404, message="NOT FOUND"),
"REDIRECT": ResponseStatus(code=302, message="REDIRECT"),
} # type: Dict[str, ResponseStatus]
|
Draft | InitGui | # ***************************************************************************
# * Copyright (c) 2009 Yorik van Havre <yorik@uncreated.net> *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
"""Initialization of the Draft workbench (graphical interface)."""
import os
import FreeCAD
import FreeCADGui
__title__ = "FreeCAD Draft Workbench - Init file"
__author__ = "Yorik van Havre <yorik@uncreated.net>"
__url__ = "https://www.freecad.org"
class DraftWorkbench(FreeCADGui.Workbench):
"""The Draft Workbench definition."""
def __init__(self):
def QT_TRANSLATE_NOOP(context, text):
return text
__dirname__ = os.path.join(FreeCAD.getResourceDir(), "Mod", "Draft")
_tooltip = "The Draft workbench is used for 2D drafting on a grid"
self.__class__.Icon = os.path.join(
__dirname__, "Resources", "icons", "DraftWorkbench.svg"
)
self.__class__.MenuText = QT_TRANSLATE_NOOP("draft", "Draft")
self.__class__.ToolTip = QT_TRANSLATE_NOOP("draft", _tooltip)
def Initialize(self):
"""When the workbench is first loaded."""
def QT_TRANSLATE_NOOP(context, text):
return text
# Run self-tests
dependencies_OK = False
try:
from pivy import coin
if FreeCADGui.getSoDBVersion() != coin.SoDB.getVersion():
raise AssertionError(
"FreeCAD and Pivy use different versions "
"of Coin. "
"This will lead to unexpected behaviour."
)
except AssertionError:
FreeCAD.Console.PrintWarning(
"Error: FreeCAD and Pivy "
"use different versions of Coin. "
"This will lead to unexpected "
"behaviour.\n"
)
except ImportError:
FreeCAD.Console.PrintWarning(
"Error: Pivy not found, " "Draft Workbench will be disabled.\n"
)
except Exception:
FreeCAD.Console.PrintWarning(
"Error: Unknown error " "while trying to load Pivy.\n"
)
else:
dependencies_OK = True
if not dependencies_OK:
return
# Import Draft tools, icons
try:
import Draft_rc
import DraftGui
import DraftTools
FreeCADGui.addLanguagePath(":/translations")
FreeCADGui.addIconPath(":/icons")
except Exception as exc:
FreeCAD.Console.PrintError(exc)
FreeCAD.Console.PrintError(
"Error: Initializing one or more "
"of the Draft modules failed, "
"Draft will not work as expected.\n"
)
# Set up command lists
import draftutils.init_tools as it
self.drawing_commands = it.get_draft_drawing_commands()
self.annotation_commands = it.get_draft_annotation_commands()
self.modification_commands = it.get_draft_modification_commands()
self.utility_commands_menu = it.get_draft_utility_commands_menu()
self.utility_commands_toolbar = it.get_draft_utility_commands_toolbar()
self.context_commands = it.get_draft_context_commands()
# Set up toolbars
it.init_toolbar(
self,
QT_TRANSLATE_NOOP("Workbench", "Draft creation tools"),
self.drawing_commands,
)
it.init_toolbar(
self,
QT_TRANSLATE_NOOP("Workbench", "Draft annotation tools"),
self.annotation_commands,
)
it.init_toolbar(
self,
QT_TRANSLATE_NOOP("Workbench", "Draft modification tools"),
self.modification_commands,
)
it.init_toolbar(
self,
QT_TRANSLATE_NOOP("Workbench", "Draft utility tools"),
self.utility_commands_toolbar,
)
it.init_toolbar(
self,
QT_TRANSLATE_NOOP("Workbench", "Draft snap"),
it.get_draft_snap_commands(),
)
# Set up menus
it.init_menu(
self, [QT_TRANSLATE_NOOP("Workbench", "&Drafting")], self.drawing_commands
)
it.init_menu(
self,
[QT_TRANSLATE_NOOP("Workbench", "&Annotation")],
self.annotation_commands,
)
it.init_menu(
self,
[QT_TRANSLATE_NOOP("Workbench", "&Modification")],
self.modification_commands,
)
it.init_menu(
self,
[QT_TRANSLATE_NOOP("Workbench", "&Utilities")],
self.utility_commands_menu,
)
# Set up preferences pages
if hasattr(FreeCADGui, "draftToolBar"):
if not hasattr(FreeCADGui.draftToolBar, "loadedPreferences"):
FreeCADGui.addPreferencePage(
":/ui/preferences-draft.ui", QT_TRANSLATE_NOOP("QObject", "Draft")
)
FreeCADGui.addPreferencePage(
":/ui/preferences-draftinterface.ui",
QT_TRANSLATE_NOOP("QObject", "Draft"),
)
FreeCADGui.addPreferencePage(
":/ui/preferences-draftsnap.ui",
QT_TRANSLATE_NOOP("QObject", "Draft"),
)
FreeCADGui.addPreferencePage(
":/ui/preferences-draftvisual.ui",
QT_TRANSLATE_NOOP("QObject", "Draft"),
)
FreeCADGui.addPreferencePage(
":/ui/preferences-drafttexts.ui",
QT_TRANSLATE_NOOP("QObject", "Draft"),
)
FreeCADGui.draftToolBar.loadedPreferences = True
FreeCADGui.getMainWindow().mainWindowClosed.connect(self.Deactivated)
FreeCAD.Console.PrintLog("Loading Draft workbench, done.\n")
def Activated(self):
"""When entering the workbench."""
if hasattr(FreeCADGui, "draftToolBar"):
FreeCADGui.draftToolBar.Activated()
if hasattr(FreeCADGui, "Snapper"):
FreeCADGui.Snapper.show()
import draftutils.init_draft_statusbar as dsb
dsb.show_draft_statusbar()
FreeCAD.Console.PrintLog("Draft workbench activated.\n")
def Deactivated(self):
"""When quitting the workbench."""
if hasattr(FreeCADGui, "draftToolBar"):
FreeCADGui.draftToolBar.Deactivated()
if hasattr(FreeCADGui, "Snapper"):
FreeCADGui.Snapper.hide()
import draftutils.init_draft_statusbar as dsb
dsb.hide_draft_statusbar()
FreeCAD.Console.PrintLog("Draft workbench deactivated.\n")
def ContextMenu(self, recipient):
"""Define an optional custom context menu."""
self.appendContextMenu("Utilities", self.context_commands)
def GetClassName(self):
"""Type of workbench."""
return "Gui::PythonWorkbench"
FreeCADGui.addWorkbench(DraftWorkbench)
# Preference pages for importing and exporting various file formats
# are independent of the loading of the workbench and can be loaded at startup
import Draft_rc
from PySide.QtCore import QT_TRANSLATE_NOOP
FreeCADGui.addPreferencePage(
":/ui/preferences-dxf.ui", QT_TRANSLATE_NOOP("QObject", "Import-Export")
)
FreeCADGui.addPreferencePage(
":/ui/preferences-dwg.ui", QT_TRANSLATE_NOOP("QObject", "Import-Export")
)
FreeCADGui.addPreferencePage(
":/ui/preferences-svg.ui", QT_TRANSLATE_NOOP("QObject", "Import-Export")
)
FreeCADGui.addPreferencePage(
":/ui/preferences-oca.ui", QT_TRANSLATE_NOOP("QObject", "Import-Export")
)
FreeCAD.__unit_test__ += ["TestDraftGui"]
|
blocks | qa_stream_mux | #!/usr/bin/env python
#
# Copyright 2004,2005,2007,2010,2012,2013,2014 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
import os
import pmt
from gnuradio import blocks, gr, gr_unittest
class test_stream_mux(gr_unittest.TestCase):
def setUp(self):
os.environ["GR_CONF_CONTROLPORT_ON"] = "False"
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def help_stream_2ff(self, N, stream_sizes):
v0 = blocks.vector_source_f(
N
* [
1,
],
False,
)
v1 = blocks.vector_source_f(
N
* [
2,
],
False,
)
mux = blocks.stream_mux(gr.sizeof_float, stream_sizes)
dst = blocks.vector_sink_f()
self.tb.connect(v0, (mux, 0))
self.tb.connect(v1, (mux, 1))
self.tb.connect(mux, dst)
self.tb.run()
return dst.data()
def help_stream_ramp_2ff(self, N, stream_sizes):
r1 = list(range(N))
r2 = list(range(N))
r2.reverse()
v0 = blocks.vector_source_f(r1, False)
v1 = blocks.vector_source_f(r2, False)
mux = blocks.stream_mux(gr.sizeof_float, stream_sizes)
dst = blocks.vector_sink_f()
self.tb.connect(v0, (mux, 0))
self.tb.connect(v1, (mux, 1))
self.tb.connect(mux, dst)
self.tb.run()
return dst.data()
def help_stream_tag_propagation(self, N, stream_sizes):
src_data1 = (
stream_sizes[0]
* N
* [
1,
]
)
src_data2 = (
stream_sizes[1]
* N
* [
2,
]
)
src_data3 = (
stream_sizes[2]
* N
* [
3,
]
)
# stream_mux scheme (3,2,4)
src1 = blocks.vector_source_f(src_data1)
src2 = blocks.vector_source_f(src_data2)
src3 = blocks.vector_source_f(src_data3)
tag_stream1 = blocks.stream_to_tagged_stream(
gr.sizeof_float, 1, stream_sizes[0], "src1"
)
tag_stream2 = blocks.stream_to_tagged_stream(
gr.sizeof_float, 1, stream_sizes[1], "src2"
)
tag_stream3 = blocks.stream_to_tagged_stream(
gr.sizeof_float, 1, stream_sizes[2], "src3"
)
mux = blocks.stream_mux(gr.sizeof_float, stream_sizes)
dst = blocks.vector_sink_f()
self.tb.connect(src1, tag_stream1)
self.tb.connect(src2, tag_stream2)
self.tb.connect(src3, tag_stream3)
self.tb.connect(tag_stream1, (mux, 0))
self.tb.connect(tag_stream2, (mux, 1))
self.tb.connect(tag_stream3, (mux, 2))
self.tb.connect(mux, dst)
self.tb.run()
return (dst.data(), dst.tags())
def test_stream_2NN_ff(self):
N = 40
stream_sizes = [10, 10]
result_data = self.help_stream_2ff(N, stream_sizes)
exp_data = [
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
]
self.assertEqual(exp_data, result_data)
def test_stream_ramp_2NN_ff(self):
N = 40
stream_sizes = [10, 10]
result_data = self.help_stream_ramp_2ff(N, stream_sizes)
exp_data = [
0.0,
1.0,
2.0,
3.0,
4.0,
5.0,
6.0,
7.0,
8.0,
9.0,
39.0,
38.0,
37.0,
36.0,
35.0,
34.0,
33.0,
32.0,
31.0,
30.0,
10.0,
11.0,
12.0,
13.0,
14.0,
15.0,
16.0,
17.0,
18.0,
19.0,
29.0,
28.0,
27.0,
26.0,
25.0,
24.0,
23.0,
22.0,
21.0,
20.0,
20.0,
21.0,
22.0,
23.0,
24.0,
25.0,
26.0,
27.0,
28.0,
29.0,
19.0,
18.0,
17.0,
16.0,
15.0,
14.0,
13.0,
12.0,
11.0,
10.0,
30.0,
31.0,
32.0,
33.0,
34.0,
35.0,
36.0,
37.0,
38.0,
39.0,
9.0,
8.0,
7.0,
6.0,
5.0,
4.0,
3.0,
2.0,
1.0,
0.0,
]
self.assertEqual(exp_data, result_data)
def test_stream_2NM_ff(self):
N = 40
stream_sizes = [7, 9]
self.help_stream_2ff(N, stream_sizes)
result_data = self.help_stream_2ff(N, stream_sizes)
exp_data = [
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
2.0,
2.0,
2.0,
2.0,
]
self.assertEqual(exp_data, result_data)
def test_stream_2MN_ff(self):
N = 37
stream_sizes = [7, 9]
self.help_stream_2ff(N, stream_sizes)
result_data = self.help_stream_2ff(N, stream_sizes)
exp_data = [
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
2.0,
]
self.assertEqual(exp_data, result_data)
def test_stream_2N0_ff(self):
N = 30
stream_sizes = [7, 0]
self.help_stream_2ff(N, stream_sizes)
result_data = self.help_stream_2ff(N, stream_sizes)
exp_data = [
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
]
self.assertEqual(exp_data, result_data)
def test_stream_20N_ff(self):
N = 30
stream_sizes = [0, 9]
self.help_stream_2ff(N, stream_sizes)
result_data = self.help_stream_2ff(N, stream_sizes)
exp_data = [
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
]
self.assertEqual(exp_data, result_data)
def test_largeN_ff(self):
stream_sizes = [3, 8191]
r1 = [
1,
] * stream_sizes[0]
r2 = [
2,
] * stream_sizes[1]
v0 = blocks.vector_source_f(r1, repeat=False)
v1 = blocks.vector_source_f(r2, repeat=False)
mux = blocks.stream_mux(gr.sizeof_float, stream_sizes)
dst = blocks.vector_sink_f()
self.tb.connect(v0, (mux, 0))
self.tb.connect(v1, (mux, 1))
self.tb.connect(mux, dst)
self.tb.run()
self.assertEqual(r1 + r2, dst.data())
def test_tag_propagation(self):
N = 10 # Block length
stream_sizes = [1, 2, 3]
expected_result = N * (
stream_sizes[0]
* [
1,
]
+ stream_sizes[1]
* [
2,
]
+ stream_sizes[2]
* [
3,
]
)
# check the data
(result, tags) = self.help_stream_tag_propagation(N, stream_sizes)
self.assertFloatTuplesAlmostEqual(expected_result, result, places=6)
# check the tags
expected_tag_offsets_src1 = [sum(stream_sizes) * i for i in range(N)]
expected_tag_offsets_src2 = [
stream_sizes[0] + sum(stream_sizes) * i for i in range(N)
]
expected_tag_offsets_src3 = [
stream_sizes[0] + stream_sizes[1] + sum(stream_sizes) * i for i in range(N)
]
tags_src1 = [tag for tag in tags if pmt.eq(tag.key, pmt.intern("src1"))]
tags_src2 = [tag for tag in tags if pmt.eq(tag.key, pmt.intern("src2"))]
tags_src3 = [tag for tag in tags if pmt.eq(tag.key, pmt.intern("src3"))]
for i in range(len(expected_tag_offsets_src1)):
self.assertEqual(expected_tag_offsets_src1[i], tags_src1[i].offset)
for i in range(len(expected_tag_offsets_src2)):
self.assertEqual(expected_tag_offsets_src2[i], tags_src2[i].offset)
for i in range(len(expected_tag_offsets_src3)):
self.assertEqual(expected_tag_offsets_src3[i], tags_src3[i].offset)
if __name__ == "__main__":
gr_unittest.run(test_stream_mux)
|
ops | summarize_test | """
Unit tests for summarization.
"""
__copyright__ = "Copyright (C) 2014-2016 Martin Blais"
__license__ = "GNU GPLv2"
import collections
import datetime
import re
import unittest
from datetime import date
from beancount import loader
from beancount.core import convert, data, flags, interpolate, inventory
from beancount.ops import summarize
from beancount.parser import cmptest, options, printer
from beancount.utils import misc_utils
class TestOpenClose(cmptest.TestCase):
@loader.load_doc()
def setUp(self, entries, errors, options_map):
"""
option "account_previous_earnings" "Earnings:Previous"
option "account_previous_balances" "Opening-Balances"
option "account_previous_conversions" "Conversions:Previous"
option "account_current_earnings" "Earnings:Current"
option "account_current_conversions" "Conversions:Current"
option "conversion_currency" "NOTHING"
2012-01-01 open Income:Salary
2012-01-01 open Expenses:Taxes
2012-01-01 open Assets:US:Checking
2012-01-01 open Assets:CA:Checking
2012-03-01 * "Some income and expense to be summarized"
Income:Salary 10000 USD
Expenses:Taxes 3600 USD
Assets:US:Checking -13600 USD
2012-03-02 * "Some conversion to be summarized"
Assets:US:Checking -5000 USD @ 1.2 CAD
Assets:CA:Checking 6000 CAD
;; 2012-06-01 BEGIN --------------------------------
2012-08-01 * "Some income and expense to show"
Income:Salary 11000 USD
Expenses:Taxes 3200 USD
Assets:US:Checking -14200 USD
2012-08-02 * "Some other conversion to be summarized"
Assets:US:Checking -3000 USD @ 1.25 CAD
Assets:CA:Checking 3750 CAD
;; 2012-09-01 END --------------------------------
2012-11-01 * "Some income and expense to be truncated"
Income:Salary 10000 USD
Expenses:Taxes 3600 USD
Assets:US:Checking -13600 USD
"""
self.assertFalse(errors)
self.entries = entries
self.options_map = options_map
self.account_types = options.get_account_types(options_map)
def do_open(self, entries, date, *args):
return summarize.open(entries, date, *args)
def do_close(self, entries, date, *args):
return summarize.close(entries, date, *args)
def do_clear(self, entries, date, *args):
return summarize.clear(entries, date, *args)
def test_open(self):
date = datetime.date(2012, 6, 1)
opened_entries, index = self.do_open(
self.entries,
date,
self.account_types,
"NOTHING",
"Equity:Earnings:Previous",
"Equity:Opening-Balances",
"Equity:Conversions:Previous",
)
self.assertEqualEntries(
"""
2012-01-01 open Income:Salary
2012-01-01 open Expenses:Taxes
2012-01-01 open Assets:US:Checking
2012-01-01 open Assets:CA:Checking
2012-05-31 'S "Opening balance for 'Assets:CA:Checking' (Summarization)"
Assets:CA:Checking 6,000 CAD
Equity:Opening-Balances -6,000 CAD
2012-05-31 'S "Opening balance for 'Assets:US:Checking' (Summarization)"
Assets:US:Checking -18,600 USD
Equity:Opening-Balances 18,600 USD
2012-05-31 'S "Opening balance for 'Equity:Earnings:Previous' (Summarization)"
Equity:Earnings:Previous 13,600 USD
Equity:Opening-Balances -13,600 USD
2012-05-31 'S "Opening balance for 'Equity:Conversions:Previous' (Summarization)"
Equity:Conversions:Previous 5,000 USD
Equity:Opening-Balances -5,000 USD
Equity:Conversions:Previous -6,000 CAD
Equity:Opening-Balances 6,000 CAD
;; 2012-06-01 BEGIN --------------------------------
2012-08-01 * "Some income and expense to show"
Income:Salary 11,000 USD
Expenses:Taxes 3,200 USD
Assets:US:Checking -14,200 USD
2012-08-02 * "Some other conversion to be summarized"
Assets:US:Checking -3,000 USD @ 1.25 CAD ; -3,750 CAD
Assets:CA:Checking 3,750 CAD ; 3,750 CAD
2012-11-01 * "Some income and expense to be truncated"
Income:Salary 10,000 USD
Expenses:Taxes 3,600 USD
Assets:US:Checking -13,600 USD
""",
opened_entries,
)
# Check the index is correctly beginning after the list of summarizing entries.
self.assertEqual(8, index)
# Check that our original example list of entries does not balance.
input_balance = interpolate.compute_entries_balance(self.entries)
self.assertFalse(input_balance.is_empty())
# Check that the summarized entries add up to precisely zero.
summarized_entries = opened_entries[:index]
balances = interpolate.compute_entries_balance(summarized_entries)
self.assertTrue(balances.is_empty())
# Check further conversions aren't accounted for (the close operation
# takes care of this).
opened_balance = interpolate.compute_entries_balance(opened_entries)
self.assertFalse(opened_balance.is_empty())
def test_close(self):
date = datetime.date(2012, 9, 1)
closed_entries, index = self.do_close(
self.entries, date, "NOTHING", "Equity:Conversions:Current"
)
self.assertEqualEntries(
"""
2012-01-01 open Income:Salary
2012-01-01 open Expenses:Taxes
2012-01-01 open Assets:US:Checking
2012-01-01 open Assets:CA:Checking
2012-03-01 * "Some income and expense to be summarized"
Income:Salary 10000 USD
Expenses:Taxes 3600 USD
Assets:US:Checking -13600 USD
2012-03-02 * "Some conversion to be summarized"
Assets:US:Checking -5000 USD @ 1.2 CAD
Assets:CA:Checking 6000 CAD
;; 2012-06-01 BEGIN --------------------------------
2012-08-01 * "Some income and expense to show"
Income:Salary 11000 USD
Expenses:Taxes 3200 USD
Assets:US:Checking -14200 USD
2012-08-02 * "Some other conversion to be summarized"
Assets:US:Checking -3000 USD @ 1.25 CAD
Assets:CA:Checking 3750 CAD
;; 2012-09-01 END --------------------------------
2012-08-31 'C "Conversion for (-8000 USD, 9750 CAD)"
Equity:Conversions:Current 8000 USD @ 0 NOTHING
Equity:Conversions:Current -9750 CAD @ 0 NOTHING
""",
closed_entries,
)
# Check the index is correctly beginning after the list of summarizing entries.
self.assertEqual(8, index)
# Check that our original example list of entries does not balance.
input_balance = interpolate.compute_entries_balance(self.entries)
self.assertFalse(input_balance.is_empty())
# Check that the truncated entries does not balance.
balances = interpolate.compute_entries_balance(closed_entries[:index])
self.assertFalse(balances.is_empty())
# Check that the closed entries add up to precisely zero.
balances = interpolate.compute_entries_balance(closed_entries)
self.assertTrue(balances.is_empty())
def test_clear(self):
date = datetime.date(2013, 1, 1)
clear_entries, index = self.do_clear(
self.entries, date, self.account_types, "Equity:Earnings:Current"
)
self.assertEqualEntries(
"""
2012-01-01 open Income:Salary
2012-01-01 open Expenses:Taxes
2012-01-01 open Assets:US:Checking
2012-01-01 open Assets:CA:Checking
2012-03-01 * "Some income and expense to be summarized"
Income:Salary 10000 USD
Expenses:Taxes 3600 USD
Assets:US:Checking -13600 USD
2012-03-02 * "Some conversion to be summarized"
Assets:US:Checking -5000 USD @ 1.2 CAD
Assets:CA:Checking 6000 CAD
;; 2012-06-01 BEGIN --------------------------------
2012-08-01 * "Some income and expense to show"
Income:Salary 11000 USD
Expenses:Taxes 3200 USD
Assets:US:Checking -14200 USD
2012-08-02 * "Some other conversion to be summarized"
Assets:US:Checking -3000 USD @ 1.25 CAD
Assets:CA:Checking 3750 CAD
;; 2012-09-01 END --------------------------------
2012-11-01 * "Some income and expense to be truncated"
Income:Salary 10000 USD
Expenses:Taxes 3600 USD
Assets:US:Checking -13600 USD
2012-12-31 'T "Transfer balance for 'Expenses:Taxes' (Transfer balance)"
Expenses:Taxes -10400 USD
Equity:Earnings:Current 10400 USD
2012-12-31 'T "Transfer balance for 'Income:Salary' (Transfer balance)"
Income:Salary -31000 USD
Equity:Earnings:Current 31000 USD
""",
clear_entries,
)
# Check the index is correctly beginning after the list of summarizing entries.
self.assertEqual(9, index)
# Check that the cleared entries do not necessarily up to precisely zero
# without closing.
balances = interpolate.compute_entries_balance(clear_entries)
self.assertFalse(balances.is_empty())
def test_open_close_clear(self):
# Test out the full use case of a balance sheet for a particular year,
# opening, closing and clearing.
begin_date = datetime.date(2012, 6, 1)
end_date = datetime.date(2012, 9, 1)
clear_date = datetime.date(2013, 1, 1)
opened_entries, index = self.do_open(
self.entries,
begin_date,
self.account_types,
"NOTHING",
"Equity:Earnings:Previous",
"Equity:Opening-Balances",
"Equity:Conversions:Previous",
)
closed_entries, index = self.do_close(
opened_entries, end_date, "NOTHING", "Equity:Conversions:Current"
)
clear_entries, index = self.do_clear(
closed_entries, clear_date, self.account_types, "Equity:Earnings:Current"
)
self.assertEqualEntries(
"""
2012-01-01 open Income:Salary
2012-01-01 open Expenses:Taxes
2012-01-01 open Assets:US:Checking
2012-01-01 open Assets:CA:Checking
2012-05-31 'S "Opening balance for 'Assets:CA:Checking' (Summarization)"
Assets:CA:Checking 6,000 CAD
Equity:Opening-Balances -6,000 CAD
2012-05-31 'S "Opening balance for 'Assets:US:Checking' (Summarization)"
Assets:US:Checking -18,600 USD
Equity:Opening-Balances 18,600 USD
2012-05-31 'S "Opening balance for 'Equity:Earnings:Previous' (Summarization)"
Equity:Earnings:Previous 13,600 USD
Equity:Opening-Balances -13,600 USD
2012-05-31 'S "Opening balance for 'Equity:Conversions:Previous' (Summarization)"
Equity:Conversions:Previous 5,000 USD
Equity:Opening-Balances -5,000 USD
Equity:Conversions:Previous -6,000 CAD
Equity:Opening-Balances 6,000 CAD
;; 2012-06-01 BEGIN --------------------------------
2012-08-01 * "Some income and expense to show"
Income:Salary 11,000 USD
Expenses:Taxes 3,200 USD
Assets:US:Checking -14,200 USD
2012-08-02 * "Some other conversion to be summarized"
Assets:US:Checking -3,000 USD @ 1.25 CAD ; -3,750 CAD
Assets:CA:Checking 3,750 CAD ; 3,750 CAD
;; 2012-09-01 END --------------------------------
2012-08-31 'C "Conversion for (-3000 USD, 3750 CAD)"
Equity:Conversions:Current 3,000 USD @ 0 NOTHING
Equity:Conversions:Current -3,750 CAD @ 0 NOTHING
2012-12-31 'T "Transfer balance for 'Income:Salary' (Transfer balance)"
Income:Salary -11,000 USD
Equity:Earnings:Current 11,000 USD
2012-12-31 'T "Transfer balance for 'Expenses:Taxes' (Transfer balance)"
Expenses:Taxes -3,200 USD
Equity:Earnings:Current 3,200 USD
""",
clear_entries,
)
class TestOpenCloseWithOptions(TestOpenClose):
"Same test as the previous, but invoking all with options."
def do_open(self, entries, date, *args):
return summarize.open_opt(entries, date, self.options_map)
def do_close(self, entries, date, *args):
return summarize.close_opt(entries, date, self.options_map)
def do_clear(self, entries, date, *args):
return summarize.clear_opt(entries, date, self.options_map)
class TestClamp(cmptest.TestCase):
@loader.load_doc()
def test_clamp(self, entries, errors, options_map):
"""
2012-01-01 open Income:Salary
2012-01-01 open Expenses:Taxes
2012-01-01 open Assets:US:Checking
2012-01-01 open Assets:CA:Checking
2012-03-01 * "Some income and expense to be summarized"
Income:Salary 10000.00 USD
Expenses:Taxes 3600.00 USD
Assets:US:Checking -13600.00 USD
2012-03-02 * "Some conversion to be summarized"
Assets:US:Checking -5000.00 USD @ 1.2 CAD
Assets:CA:Checking 6000.00 CAD
;; 2012-06-01 BEGIN --------------------------------
2012-08-01 * "Some income and expense to show"
Income:Salary 11000.00 USD
Expenses:Taxes 3200.00 USD
Assets:US:Checking -14200.00 USD
2012-08-02 * "Some other conversion to be summarized"
Assets:US:Checking -3000.00 USD @ 1.25 CAD
Assets:CA:Checking 3750.00 CAD
;; 2012-09-01 END --------------------------------
2012-11-01 * "Some income and expense to be truncated"
Income:Salary 10000.00 USD
Expenses:Taxes 3600.00 USD
Assets:US:Checking -13600.00 USD
"""
self.assertFalse(errors)
begin_date = datetime.date(2012, 6, 1)
end_date = datetime.date(2012, 9, 1)
account_types = options.get_account_types(options_map)
clamped_entries, index = summarize.clamp(
entries,
begin_date,
end_date,
account_types,
"NOTHING",
"Equity:Earnings",
"Equity:Opening-Balances",
"Equity:Conversions",
)
self.assertEqualEntries(
"""
2012-01-01 open Income:Salary
2012-01-01 open Expenses:Taxes
2012-01-01 open Assets:US:Checking
2012-01-01 open Assets:CA:Checking
2012-05-31 'S "Opening balance for 'Assets:CA:Checking' (Summarization)"
Assets:CA:Checking 6000.00 CAD
Equity:Opening-Balances -6000.00 CAD
2012-05-31 'S "Opening balance for 'Assets:US:Checking' (Summarization)"
Assets:US:Checking -18600.00 USD
Equity:Opening-Balances 18600.00 USD
2012-05-31 'S "Opening balance for 'Equity:Earnings' (Summarization)"
Equity:Earnings 13600.00 USD
Equity:Opening-Balances -13600.00 USD
;; 2012-06-01 BEGIN --------------------------------
2012-08-01 * "Some income and expense to show"
Income:Salary 11000.00 USD
Expenses:Taxes 3200.00 USD
Assets:US:Checking -14200.00 USD
2012-08-02 * "Some other conversion to be summarized"
Assets:US:Checking -3000.00 USD @ 1.25 CAD
Assets:CA:Checking 3750.00 CAD
;; 2012-09-01 END --------------------------------
2012-08-31 'C "Conversion for (-3000.00 USD, 3750.00 CAD)"
Equity:Conversions 3000.00 USD @ 0 NOTHING
Equity:Conversions -3750.00 CAD @ 0 NOTHING
""",
clamped_entries,
)
self.assertEqual(7, index)
input_balance = interpolate.compute_entries_balance(entries)
self.assertFalse(input_balance.is_empty())
clamped_balance = interpolate.compute_entries_balance(clamped_entries)
self.assertTrue(clamped_balance.is_empty())
class TestCap(cmptest.TestCase):
@loader.load_doc()
def test_cap(self, entries, errors, options_map):
"""
plugin "beancount.plugins.auto_accounts"
2014-03-01 * "Some income and expense"
Income:Salary 10000.00 USD
Expenses:Taxes 3500.00 USD
Assets:US:Checking
2014-02-01 * "Some conversion"
Assets:US:Checking -5000.00 USD @ 1.2 CAD
Assets:CA:Checking 6000.00 CAD
"""
self.assertFalse(errors)
account_types = options.get_account_types(options_map)
capd_entries = summarize.cap(
entries, account_types, "NOTHING", "Equity:Earnings", "Equity:Conversions"
)
self.assertIncludesEntries(entries, capd_entries)
self.assertIncludesEntries(
"""
2014-03-01 'T "Transfer balance for 'Expenses:Taxes' (Transfer balance)"
Expenses:Taxes -3500.00 USD
Equity:Earnings 3500.00 USD
2014-03-01 'T "Transfer balance for 'Income:Salary' (Transfer balance)"
Income:Salary -10000.00 USD
Equity:Earnings 10000.00 USD
2014-03-01 'C "Conversion for (-5000.00 USD, 6000.00 CAD)"
Equity:Conversions 5000.00 USD @ 0 NOTHING
Equity:Conversions -6000.00 CAD @ 0 NOTHING
""",
capd_entries,
)
self.assertEqual(9, len(capd_entries))
INPUT_PLUGINS = """
option "plugin_processing_mode" "raw"
"""
INPUT_OPEN = """
;; These should be preserved after summarization.
2010-01-01 open Assets:US:Chase:Checking
2010-01-01 open Assets:US:Investing:HOOL
2010-01-01 open Assets:CA:BMO:Checking
2010-01-01 open Liabilities:US:Chase:CreditCard
2010-01-01 open Income:US:Employer:Salary
2010-01-01 open Expenses:Taxes
2010-01-01 open Expenses:Restaurant
2010-01-01 open Expenses:Flights
2010-01-01 open Expenses:Internet
"""
INPUT_PRICES_REDUNDANT = """
;; These prices are redundant; only the last price will be preserved after
;; summarization.
2010-02-01 price USD 1.10 CAD
2010-03-01 price USD 1.11 CAD
2010-04-01 price USD 1.12 CAD
2010-05-01 price USD 1.13 CAD
2010-08-01 price USD 1.14 CAD
2010-10-01 price USD 1.15 CAD
"""
INPUT_PRICES_LAST = """
;; This is the last price before the period, will be preserved.
2010-12-01 price USD 1.16 CAD
"""
INPUT_BEFORE = """
;; An account that gets capped before the period, should not appear in the
;; output.
2010-01-01 open Assets:US:Temporary
2010-11-22 close Assets:US:Temporary
2010-11-16 *
Income:US:Employer:Salary -5000.00 USD
Assets:US:Chase:Checking 3000.00 USD
Expenses:Taxes 2000.00 USD
2010-11-20 * "First hit on credit card account"
Liabilities:US:Chase:CreditCard -67.20 USD
Expenses:Restaurant 67.20 USD
2010-11-26 * "Second hit on credit card account (same account)"
Liabilities:US:Chase:CreditCard -345.23 USD
Expenses:Flights 345.23 USD
2010-11-30 *
Assets:US:Chase:Checking -80.02 USD
Expenses:Internet 80.02 USD
2010-12-05 * "Unit held at cost"
Assets:US:Investing:HOOL 5 HOOL {510.00 USD}
Assets:US:Chase:Checking -2550 USD
2010-12-05 * "Conversion"
Assets:US:Chase:Checking -910.00 USD
Assets:CA:BMO:Checking 1000.00 CAD @ 0.91 USD
2010-12-16 *
Income:US:Employer:Salary -5000.00 USD
Assets:US:Chase:Checking 3000.00 USD
Expenses:Taxes 2000.00 USD
"""
INPUT_PERIOD = """
2011-02-01 price USD 1.17 CAD
2011-04-01 price USD 1.18 CAD
2011-01-16 *
Income:US:Employer:Salary -5000.00 USD
Assets:US:Chase:Checking 3000.00 USD
Expenses:Taxes 2000.00 USD
2011-01-20 * "Dinner at Cull & Pistol"
Liabilities:US:Chase:CreditCard -89.23 USD
Expenses:Restaurant 89.23 USD
2011-02-01 open Assets:Cash
2011-02-02 * "Cafe Mogador"
Expenses:Restaurant 37.92 USD
Assets:Cash -37.92 USD
2011-02-16 *
Income:US:Employer:Salary -5000.00 USD
Assets:US:Chase:Checking 3000.00 USD
Expenses:Taxes 2000.00 USD
"""
INPUT_PERIOD_REMOVED = """
2011-03-15 balance Assets:US:Chase:Checking 8459.98 USD
"""
# Join all the inputs.
INPUT = (
INPUT_PLUGINS
+ INPUT_OPEN
+ INPUT_PRICES_REDUNDANT
+ INPUT_PRICES_LAST
+ INPUT_BEFORE
+ INPUT_PERIOD
+ INPUT_PERIOD_REMOVED
)
class TestTransferBalances(cmptest.TestCase):
TRANSFER_ACCOUNT = "Equity:Transfer"
def setUp(self):
self.entries, errors, __ = loader.load_string(INPUT)
printer.print_errors(errors)
self.assertFalse(errors)
def test_transfer_balances__empty(self):
xfer_entries = summarize.transfer_balances(
[],
datetime.date(2011, 1, 1),
lambda account: account.startswith("Assets:US:Chase"),
self.TRANSFER_ACCOUNT,
)
self.assertEqual([], xfer_entries)
def test_transfer_balances__middle_assets(self):
date = datetime.date(2011, 1, 1)
xfer_entries = summarize.transfer_balances(
self.entries,
date,
lambda account: account.startswith("Assets:US:Chase"),
self.TRANSFER_ACCOUNT,
)
self.assertIncludesEntries(
(
INPUT_OPEN
+ INPUT_PRICES_REDUNDANT
+ INPUT_PRICES_LAST
+ INPUT_BEFORE
+ INPUT_PERIOD
),
xfer_entries,
)
self.assertIncludesEntries(
"""
2010-12-31 'T "Transfer balance for 'Assets:US:Chase:Checking' (Transfer balance)"
Assets:US:Chase:Checking -2459.98 USD
Equity:Transfer 2459.98 USD
""",
xfer_entries,
)
self.assertEqual(len(self.entries) + 1 - 1, len(xfer_entries))
def test_transfer_balances__middle_at_cost(self):
date = datetime.date(2011, 1, 1)
xfer_entries = summarize.transfer_balances(
self.entries,
date,
lambda account: account.startswith("Assets:US:Investing"),
self.TRANSFER_ACCOUNT,
)
self.assertIncludesEntries(self.entries, xfer_entries)
self.assertIncludesEntries(
"""
2010-12-31 'T "Transfer balance for 'Assets:US:Investing:HOOL' (Transfer balance)"
Assets:US:Investing:HOOL -5 HOOL {510.00 USD, 2010-12-05} ; -2550.00 USD
Equity:Transfer 2550.00 USD ; 2550.00 USD
""",
xfer_entries,
)
self.assertEqual(len(self.entries) + 1, len(xfer_entries))
def test_transfer_balances__end_assets_implicit(self):
xfer_entries = summarize.transfer_balances(
self.entries,
datetime.date(2011, 3, 1),
lambda account: account.startswith("Assets:US:Chase"),
self.TRANSFER_ACCOUNT,
)
self.assertIncludesEntries(
(
INPUT_OPEN
+ INPUT_PRICES_REDUNDANT
+ INPUT_PRICES_LAST
+ INPUT_BEFORE
+ INPUT_PERIOD
),
xfer_entries,
)
self.assertIncludesEntries(
"""
2011-02-28 'T "Transfer balance for 'Assets:US:Chase:Checking' (Transfer balance)"
Assets:US:Chase:Checking -8459.98 USD
Equity:Transfer 8459.98 USD
""",
xfer_entries,
)
self.assertEqual(len(self.entries) + 1 - 1, len(xfer_entries))
def test_transfer_balances__end_assets_explicit(self):
xfer_entries = summarize.transfer_balances(
self.entries,
None,
lambda account: account.startswith("Assets:US:Chase"),
self.TRANSFER_ACCOUNT,
)
self.assertIncludesEntries(self.entries, xfer_entries)
self.assertIncludesEntries(
"""
2011-04-01 'T "Transfer balance for 'Assets:US:Chase:Checking' (Transfer balance)"
Assets:US:Chase:Checking -8459.98 USD
Equity:Transfer 8459.98 USD
""",
xfer_entries,
)
self.assertEqual(len(self.entries) + 1, len(xfer_entries))
def test_transfer_balances__middle_income(self):
date = datetime.date(2011, 1, 1)
xfer_entries = summarize.transfer_balances(
self.entries,
date,
lambda account: re.match("(Income|Expenses):", account),
self.TRANSFER_ACCOUNT,
)
self.assertIncludesEntries(self.entries, xfer_entries)
self.assertIncludesEntries(
"""
2010-12-31 'T "Transfer balance for 'Expenses:Flights' (Transfer balance)"
Expenses:Flights -345.23 USD
Equity:Transfer 345.23 USD
2010-12-31 'T "Transfer balance for 'Expenses:Internet' (Transfer balance)"
Expenses:Internet -80.02 USD
Equity:Transfer 80.02 USD
2010-12-31 'T "Transfer balance for 'Expenses:Restaurant' (Transfer balance)"
Expenses:Restaurant -67.20 USD
Equity:Transfer 67.20 USD
2010-12-31 'T "Transfer balance for 'Expenses:Taxes' (Transfer balance)"
Expenses:Taxes -4000.00 USD
Equity:Transfer 4000.00 USD
2010-12-31 'T "Transfer balance for 'Income:US:Employer:Salary' (Transfer balance)"
Income:US:Employer:Salary 10000.00 USD
Equity:Transfer -10000.00 USD
""",
xfer_entries,
)
self.assertEqual(len(self.entries) + 5, len(xfer_entries))
class TestSummarize(cmptest.TestCase):
OPENING_ACCOUNT = "Equity:Opening-Balances"
def test_summarize__complete(self):
entries, errors, options_map = loader.load_string(INPUT)
self.assertFalse(errors)
summarize_date = datetime.date(2011, 1, 1)
summarized_entries, index = summarize.summarize(
entries, summarize_date, self.OPENING_ACCOUNT
)
# Make sure all the active open entries have been preserved.
self.assertIncludesEntries(INPUT_OPEN, summarized_entries)
self.assertExcludesEntries(INPUT_BEFORE, summarized_entries)
self.assertExcludesEntries(INPUT_PRICES_REDUNDANT, summarized_entries)
self.assertIncludesEntries(INPUT_PRICES_LAST, summarized_entries)
self.assertIncludesEntries(INPUT_PERIOD, summarized_entries)
summarizing_entries = [
entry
for entry in summarized_entries
if (isinstance(entry, data.Transaction) and entry.flag == flags.FLAG_SUMMARIZE)
]
self.assertEqualEntries(
"""
2010-12-31 'S "Opening balance for 'Assets:CA:BMO:Checking' (Summarization)"
Assets:CA:BMO:Checking 1000.00 CAD
Equity:Opening-Balances -1000.00 CAD
2010-12-31 'S "Opening balance for 'Assets:US:Chase:Checking' (Summarization)"
Assets:US:Chase:Checking 2459.98 USD
Equity:Opening-Balances -2459.98 USD
2010-12-31 'S "Opening balance for 'Assets:US:Investing:HOOL' (Summarization)"
Assets:US:Investing:HOOL 5 HOOL {510.00 USD, 2010-12-05} ; 2550.00 USD
Equity:Opening-Balances -2550.00 USD ; -2550.00 USD
2010-12-31 'S "Opening balance for 'Expenses:Flights' (Summarization)"
Expenses:Flights 345.23 USD
Equity:Opening-Balances -345.23 USD
2010-12-31 'S "Opening balance for 'Expenses:Internet' (Summarization)"
Expenses:Internet 80.02 USD
Equity:Opening-Balances -80.02 USD
2010-12-31 'S "Opening balance for 'Expenses:Restaurant' (Summarization)"
Expenses:Restaurant 67.20 USD
Equity:Opening-Balances -67.20 USD
2010-12-31 'S "Opening balance for 'Expenses:Taxes' (Summarization)"
Expenses:Taxes 4000.00 USD
Equity:Opening-Balances -4000.00 USD
2010-12-31 'S "Opening balance for 'Income:US:Employer:Salary' (Summarization)"
Income:US:Employer:Salary -10000.00 USD
Equity:Opening-Balances 10000.00 USD
2010-12-31 'S "Opening balance for 'Liabilities:US:Chase:CreditCard' (Summarization)"
Liabilities:US:Chase:CreditCard -412.43 USD
Equity:Opening-Balances 412.43 USD
""",
summarizing_entries,
)
# Check that all the transactions before the index are summarizing ones
# and dated before the summarizing date.
before_transactions = [
entry
for entry in summarized_entries[:index]
if isinstance(entry, data.Transaction)
]
self.assertTrue(
all(entry.flag == flags.FLAG_SUMMARIZE for entry in before_transactions)
)
self.assertTrue(all(entry.date < summarize_date for entry in before_transactions))
# Check that all the transactions after the index are not summarizing
# ones and dated after the summarizing date.
after_transactions = [
entry
for entry in summarized_entries[index:]
if isinstance(entry, data.Transaction)
]
self.assertFalse(
any(entry.flag == flags.FLAG_SUMMARIZE for entry in after_transactions)
)
self.assertFalse(any(entry.date < summarize_date for entry in after_transactions))
@loader.load_doc()
def test_summarize__ordering_non_transactions(self, entries, _, __):
"""
2016-01-15 price HOOL 123.45 USD
2016-02-01 open Assets:Invest:Cash
2016-02-01 open Assets:Invest:HOOL
2016-02-16 *
Assets:Invest:HOOL 10 HOOL {143.45 USD}
Assets:Invest:Cash
2016-04-01 *
Assets:Invest:HOOL 2 HOOL {156.32 USD}
Assets:Invest:Cash
"""
summarize_date = datetime.date(2016, 3, 1)
summarized_entries, index = summarize.summarize(
entries, summarize_date, self.OPENING_ACCOUNT
)
self.assertTrue(misc_utils.is_sorted(summarized_entries, lambda entry: entry.date))
class TestConversions(cmptest.TestCase):
ACCOUNT = "Equity:Conversions"
@loader.load_doc()
def setUp(self, entries, _, __):
"""
2012-01-01 open Income:US:Job
2012-01-01 open Assets:US:Checking
2012-01-01 open Assets:CA:Invest
2012-01-01 open Assets:CA:Invest:NT
2012-03-01 * "Earn some money"
Income:US:Job -1000.00 USD
Assets:US:Checking 1000.00 USD
2012-03-02 * "Transfer to Investment"
Assets:US:Checking -800.00 USD
Assets:CA:Invest 800.00 CAD @ 1 USD
2012-03-03 * "Buy some stock"
Assets:CA:Invest -600.00 CAD
Assets:CA:Invest:NT 60 NT {10 CAD}
2012-05-01 * "Transfer some money back"
Assets:CA:Invest -100.00 CAD @ 1 USD
Assets:US:Checking 100.00 USD
"""
self.entries = entries
def test_conversions__empty(self):
date = datetime.date(2012, 2, 1)
conversion_entries = summarize.conversions(
self.entries, self.ACCOUNT, "NOTHING", date
)
self.assertEqualEntries(self.entries, conversion_entries)
converted_balance = interpolate.compute_entries_balance(
conversion_entries, date=date
)
self.assertTrue(converted_balance.reduce(convert.get_cost).is_empty())
def test_conversions__not_needed(self):
date = datetime.date(2012, 3, 2)
conversion_entries = summarize.conversions(
self.entries, self.ACCOUNT, "NOTHING", date
)
self.assertEqualEntries(self.entries, conversion_entries)
converted_balance = interpolate.compute_entries_balance(
conversion_entries, date=date
)
self.assertTrue(converted_balance.reduce(convert.get_cost).is_empty())
def test_conversions__needed_middle(self):
date = datetime.date(2012, 3, 3)
conversion_entries = summarize.conversions(
self.entries, self.ACCOUNT, "NOTHING", date
)
self.assertIncludesEntries(self.entries, conversion_entries)
self.assertIncludesEntries(
"""
2012-03-02 'C "Conversion for (-800.00 USD, 800.00 CAD)"
Equity:Conversions 800.00 USD @ 0 NOTHING
Equity:Conversions -800.00 CAD @ 0 NOTHING
""",
conversion_entries,
)
converted_balance = interpolate.compute_entries_balance(
conversion_entries, date=date
)
self.assertTrue(converted_balance.reduce(convert.get_cost).is_empty())
def test_conversions__with_transactions_at_cost(self):
date = datetime.date(2012, 3, 10)
conversion_entries = summarize.conversions(self.entries, self.ACCOUNT, "XFER", date)
self.assertIncludesEntries(self.entries, conversion_entries)
self.assertIncludesEntries(
"""
2012-03-09 'C "Conversion for (-800.00 USD, 200.00 CAD, 60 NT {10 CAD, 2012-03-03})"
Equity:Conversions 800.00 USD @ 0 XFER
Equity:Conversions -800.00 CAD @ 0 XFER
""",
conversion_entries,
)
converted_balance = interpolate.compute_entries_balance(
conversion_entries, date=date
)
self.assertTrue(converted_balance.reduce(convert.get_cost).is_empty())
def test_conversions__multiple(self):
date = datetime.date(2012, 5, 10)
conversion_entries = summarize.conversions(
self.entries, self.ACCOUNT, "NOTHING", date
)
self.assertIncludesEntries(self.entries, conversion_entries)
self.assertIncludesEntries(
"""
2012-05-09 'C "Conversion for (-700.00 USD, 100.00 CAD, 60 NT {10 CAD, 2012-03-03})"
Equity:Conversions 700.00 USD @ 0 NOTHING
Equity:Conversions -700.00 CAD @ 0 NOTHING
""",
conversion_entries,
)
converted_balance = interpolate.compute_entries_balance(conversion_entries)
self.assertTrue(converted_balance.reduce(convert.get_cost).is_empty())
def test_conversions__no_date(self):
conversion_entries = summarize.conversions(self.entries, self.ACCOUNT, "NOTHING")
self.assertIncludesEntries(self.entries, conversion_entries)
self.assertIncludesEntries(
"""
2012-05-01 'C "Conversion for (-700.00 USD, 100.00 CAD, 60 NT {10 CAD, 2012-03-03})"
Equity:Conversions 700.00 USD @ 0 NOTHING
Equity:Conversions -700.00 CAD @ 0 NOTHING
""",
conversion_entries,
)
converted_balance = interpolate.compute_entries_balance(conversion_entries)
self.assertTrue(converted_balance.reduce(convert.get_cost).is_empty())
@loader.load_doc()
def test_conversions__non_empty_but_empty_cost(self, entries, _, __):
"""
2012-01-01 open Assets:Checking
2012-01-01 open Assets:Invest
2012-03-01 *
Assets:Checking -800.00 USD
Assets:Invest 40 HOOL {20.00 USD}
"""
conversion_entries = summarize.conversions(entries, self.ACCOUNT, "XFER")
self.assertEqualEntries(entries, conversion_entries)
converted_balance = interpolate.compute_entries_balance(entries)
self.assertTrue(converted_balance.reduce(convert.get_cost).is_empty())
class TestTruncate(cmptest.TestCase):
@loader.load_doc()
def setUp(self, entries, _, __):
"""
2014-01-01 open Assets:US:Bank:Checking
2014-01-01 open Equity:Opening-Balances
2014-03-10 * "A"
Assets:US:Bank:Checking 1 USD
Equity:Opening-Balances -1 USD
2014-03-11 * "B"
Assets:US:Bank:Checking 1 USD
Equity:Opening-Balances -1 USD
2014-03-12 * "C"
Assets:US:Bank:Checking 1 USD
Equity:Opening-Balances -1 USD
2014-03-13 * "D1"
Assets:US:Bank:Checking 1 USD
Equity:Opening-Balances -1 USD
2014-03-13 * "D2"
Assets:US:Bank:Checking 1 USD
Equity:Opening-Balances -1 USD
2014-03-14 * "E"
Assets:US:Bank:Checking 1 USD
Equity:Opening-Balances -1 USD
"""
self.entries = entries
def test_truncate__before(self):
truncated_entries = summarize.truncate(self.entries, datetime.date(2014, 2, 15))
self.assertEqualEntries(
"""
2014-01-01 open Assets:US:Bank:Checking
2014-01-01 open Equity:Opening-Balances
""",
truncated_entries,
)
def test_truncate__normal1(self):
truncated_entries = summarize.truncate(self.entries, datetime.date(2014, 3, 13))
self.assertEqualEntries(
"""
2014-01-01 open Assets:US:Bank:Checking
2014-01-01 open Equity:Opening-Balances
2014-03-10 * "A"
Assets:US:Bank:Checking 1 USD
Equity:Opening-Balances -1 USD
2014-03-11 * "B"
Assets:US:Bank:Checking 1 USD
Equity:Opening-Balances -1 USD
2014-03-12 * "C"
Assets:US:Bank:Checking 1 USD
Equity:Opening-Balances -1 USD
""",
truncated_entries,
)
def test_truncate__normal2(self):
truncated_entries = summarize.truncate(self.entries, datetime.date(2014, 3, 14))
self.assertEqualEntries(
"""
2014-01-01 open Assets:US:Bank:Checking
2014-01-01 open Equity:Opening-Balances
2014-03-10 * "A"
Assets:US:Bank:Checking 1 USD
Equity:Opening-Balances -1 USD
2014-03-11 * "B"
Assets:US:Bank:Checking 1 USD
Equity:Opening-Balances -1 USD
2014-03-12 * "C"
Assets:US:Bank:Checking 1 USD
Equity:Opening-Balances -1 USD
2014-03-13 * "D1"
Assets:US:Bank:Checking 1 USD
Equity:Opening-Balances -1 USD
2014-03-13 * "D2"
Assets:US:Bank:Checking 1 USD
Equity:Opening-Balances -1 USD
""",
truncated_entries,
)
def test_truncate__after(self):
truncated_entries = summarize.truncate(self.entries, datetime.date(2014, 3, 15))
self.assertEqual(self.entries, truncated_entries)
class TestEntriesFromBalance(cmptest.TestCase):
SOURCE_ACCOUNT = "Equity:Opening-Balances"
META = data.new_metadata("<test>", 0)
def test_create_entries_from_balances__empty(self):
balances = collections.defaultdict(inventory.Inventory)
_ = balances["Assets:US:Bank:Empty"]
entries = summarize.create_entries_from_balances(
balances,
datetime.date.today(),
self.SOURCE_ACCOUNT,
True,
self.META,
"!",
"narration",
)
self.assertEqual([], entries)
def setUp(self):
self.balances = collections.defaultdict(inventory.Inventory)
self.balances["Assets:US:Investment"] = inventory.from_string(
"10 HOOL {500.00 USD, 2014-01-01}"
)
self.balances["Assets:US:Bank:Checking"] = inventory.from_string("1823.23 USD")
def test_create_entries_from_balances__simple(self):
entries = summarize.create_entries_from_balances(
self.balances,
datetime.date(2014, 1, 1),
self.SOURCE_ACCOUNT,
True,
self.META,
"!",
"Narration for {account} at {date}",
)
self.assertEqualEntries(
"""
2014-01-01 ! "Narration for Assets:US:Bank:Checking at 2014-01-01"
Assets:US:Bank:Checking 1823.23 USD
Equity:Opening-Balances -1823.23 USD
2014-01-01 ! "Narration for Assets:US:Investment at 2014-01-01"
Assets:US:Investment 10 HOOL {500.00 USD}
Equity:Opening-Balances -5000.00 USD
""",
entries,
)
def test_create_entries_from_balances__reverse(self):
entries = summarize.create_entries_from_balances(
self.balances,
datetime.date(2014, 1, 1),
self.SOURCE_ACCOUNT,
False,
self.META,
"*",
"Narration for {account} at {date}",
)
self.assertEqualEntries(
"""
2014-01-01 * "Narration for Assets:US:Bank:Checking at 2014-01-01"
Assets:US:Bank:Checking -1823.23 USD
Equity:Opening-Balances 1823.23 USD
2014-01-01 * "Narration for Assets:US:Investment at 2014-01-01"
Assets:US:Investment -10 HOOL {500.00 USD}
Equity:Opening-Balances 5000.00 USD
""",
entries,
)
class TestBalanceByAccount(cmptest.TestCase):
@loader.load_doc()
def setUp(self, entries, _, __):
"""
2001-01-01 open Assets:AccountA
2001-01-01 open Assets:AccountB
2001-01-01 open Equity:Opening-Balances
2014-02-01 *
Assets:AccountA 10 USD
Equity:Opening-Balances
2014-03-01 *
Assets:AccountA 1 USD
Assets:AccountB 12 USD
Equity:Opening-Balances
"""
self.entries = entries
def test_balance_by_account__no_end_date(self):
# Test with no end date.
balances, index = summarize.balance_by_account(self.entries)
self.assertEqual(len(self.entries), index)
self.assertEqual(
{
"Assets:AccountA": inventory.from_string("11 USD"),
"Equity:Opening-Balances": inventory.from_string("-23 USD"),
"Assets:AccountB": inventory.from_string("12 USD"),
},
balances,
)
def test_balance_by_account__first_date(self):
# Test on the first date (should be empty).
balances, index = summarize.balance_by_account(
self.entries, datetime.date(2014, 2, 1)
)
self.assertEqual(3, index)
self.assertEqual({}, balances)
def test_balance_by_account__middle(self):
# Test in the middle.
balances, index = summarize.balance_by_account(
self.entries, datetime.date(2014, 2, 10)
)
self.assertEqual(4, index)
self.assertEqual(
{
"Assets:AccountA": inventory.from_string("10 USD"),
"Equity:Opening-Balances": inventory.from_string("-10 USD"),
},
balances,
)
class TestOpenAtDate(cmptest.TestCase):
@loader.load_doc()
def setUp(self, entries, _, __):
"""
2011-01-01 open Assets:AccountA
2011-02-01 open Assets:AccountB
2011-03-01 open Assets:AccountC
2011-03-15 close Assets:AccountA
2011-04-01 open Assets:AccountD
2011-05-01 open Assets:AccountE
2011-06-01 open Assets:AccountF
2011-07-01 open Assets:AccountG
2011-08-01 open Assets:AccountH
2011-09-01 open Assets:AccountI
2011-10-01 open Assets:AccountJ
2011-11-01 open Assets:AccountK
2011-12-01 open Assets:AccountL
2012-07-01 close Assets:AccountG
2012-07-01 close Assets:AccountH
2012-07-01 close Assets:AccountI
2012-07-01 close Assets:AccountJ
2012-07-01 close Assets:AccountK
2012-07-01 close Assets:AccountL
"""
self.assertTrue(entries)
self.entries = entries
def test_get_open_entries__before(self):
self.assertEqualEntries(
"""
""",
summarize.get_open_entries(self.entries, date(2010, 12, 1)),
)
def test_get_open_entries__first_entry_open(self):
# On the day of the first entry is open.
self.assertEqualEntries(
"""
""",
summarize.get_open_entries(self.entries, date(2011, 1, 1)),
)
def test_get_open_entries__after_first_entry_open(self):
# On the day after the first entry is open.
self.assertEqualEntries(
"""
2011-01-01 open Assets:AccountA
""",
summarize.get_open_entries(self.entries, date(2011, 1, 2)),
)
def test_get_open_entries__first_close(self):
# On the day of the first close.
self.assertEqualEntries(
"""
2011-01-01 open Assets:AccountA
2011-02-01 open Assets:AccountB
2011-03-01 open Assets:AccountC
""",
summarize.get_open_entries(self.entries, date(2011, 3, 15)),
)
def test_get_open_entries__after_first_close(self):
# On the day after the first close.
self.assertEqualEntries(
"""
2011-02-01 open Assets:AccountB
2011-03-01 open Assets:AccountC
""",
summarize.get_open_entries(self.entries, date(2011, 3, 16)),
)
def test_get_open_entries__after_new_opens(self):
# Other days after new opens.
self.assertEqualEntries(
"""
2011-02-01 open Assets:AccountB
2011-03-01 open Assets:AccountC
2011-04-01 open Assets:AccountD
2011-05-01 open Assets:AccountE
""",
summarize.get_open_entries(self.entries, date(2011, 5, 3)),
)
def test_get_open_entries__after_all_opens(self):
# After all opens.
self.assertEqualEntries(
"""
2011-02-01 open Assets:AccountB
2011-03-01 open Assets:AccountC
2011-04-01 open Assets:AccountD
2011-05-01 open Assets:AccountE
2011-06-01 open Assets:AccountF
2011-07-01 open Assets:AccountG
2011-08-01 open Assets:AccountH
2011-09-01 open Assets:AccountI
2011-10-01 open Assets:AccountJ
2011-11-01 open Assets:AccountK
2011-12-01 open Assets:AccountL
""",
summarize.get_open_entries(self.entries, date(2012, 1, 1)),
)
def test_get_open_entries__after_all_entries(self):
# After all entries.
self.assertEqualEntries(
"""
2011-02-01 open Assets:AccountB
2011-03-01 open Assets:AccountC
2011-04-01 open Assets:AccountD
2011-05-01 open Assets:AccountE
2011-06-01 open Assets:AccountF
""",
summarize.get_open_entries(self.entries, date(2013, 1, 1)),
)
@loader.load_doc(expect_errors=True)
def test_get_open_entries__duplicate_open(self, entries, errors, _):
"""
2011-01-01 open Assets:AccountA
2011-02-01 open Assets:AccountA
"""
self.assertEqualEntries(
"""
2011-01-01 open Assets:AccountA
""",
summarize.get_open_entries(entries, date(2013, 1, 1)),
)
@loader.load_doc(expect_errors=True)
def test_get_open_entries__closed_twice(self, entries, errors, _):
"""
2011-01-01 open Assets:AccountA
2011-02-01 close Assets:AccountA
2011-02-02 close Assets:AccountA
"""
self.assertEqualEntries(
"""
""",
summarize.get_open_entries(entries, date(2013, 1, 1)),
)
@loader.load_doc(expect_errors=True)
def test_get_open_entries__closed_without_open(self, entries, errors, _):
"""
2011-02-02 close Assets:AccountA
"""
self.assertEqualEntries(
"""
""",
summarize.get_open_entries(entries, date(2013, 1, 1)),
)
if __name__ == "__main__":
unittest.main()
|
deluge | configmanager | #
# Copyright (C) 2007 Andrew Resch <andrewresch@gmail.com>
#
# This file is part of Deluge and is licensed under GNU General Public License 3.0, or later, with
# the additional special exception to link portions of this program with the OpenSSL library.
# See LICENSE for more details.
#
import logging
import os
import deluge.common
import deluge.log
from deluge.config import Config
log = logging.getLogger(__name__)
class _ConfigManager:
def __init__(self):
log.debug("ConfigManager started..")
self.config_files = {}
self.__config_directory = None
@property
def config_directory(self):
if self.__config_directory is None:
self.__config_directory = deluge.common.get_default_config_dir()
return self.__config_directory
def __del__(self):
del self.config_files
def set_config_dir(self, directory):
"""
Sets the config directory.
:param directory: str, the directory where the config info should be
:returns bool: True if successfully changed directory, False if not
"""
if not directory:
return False
# Ensure absolute dirpath
directory = os.path.abspath(directory)
log.info("Setting config directory to: %s", directory)
if not os.path.exists(directory):
# Try to create the config folder if it doesn't exist
try:
os.makedirs(directory)
except OSError as ex:
log.error("Unable to make config directory: %s", ex)
return False
elif not os.path.isdir(directory):
log.error("Config directory needs to be a directory!")
return False
self.__config_directory = directory
# Reset the config_files so we don't get config from old config folder
# XXX: Probably should have it go through the config_files dict and try
# to reload based on the new config directory
self.save()
self.config_files = {}
deluge.log.tweak_logging_levels()
return True
def get_config_dir(self):
return self.config_directory
def close(self, config):
"""Closes a config file."""
try:
del self.config_files[config]
except KeyError:
pass
def save(self):
"""Saves all the configs to disk."""
for value in self.config_files.values():
value.save()
# We need to return True to keep the timer active
return True
def get_config(self, config_file, defaults=None, file_version=1):
"""Get a reference to the Config object for this filename"""
log.debug("Getting config: %s", config_file)
# Create the config object if not already created
if config_file not in self.config_files:
self.config_files[config_file] = Config(
config_file,
defaults,
config_dir=self.config_directory,
file_version=file_version,
)
return self.config_files[config_file]
# Singleton functions
_configmanager = _ConfigManager()
def ConfigManager(config, defaults=None, file_version=1): # NOQA: N802
return _configmanager.get_config(
config, defaults=defaults, file_version=file_version
)
def set_config_dir(directory):
"""Sets the config directory, else just uses default"""
return _configmanager.set_config_dir(deluge.common.decode_bytes(directory))
def get_config_dir(filename=None):
if filename is not None:
return os.path.join(_configmanager.get_config_dir(), filename)
else:
return _configmanager.get_config_dir()
def close(config):
return _configmanager.close(config)
|
gaphor | abc | from __future__ import annotations
import abc
from typing import TYPE_CHECKING, Iterable
if TYPE_CHECKING:
from gaphor.core.modeling import Element
from gaphor.diagram.diagramtoolbox import (
DiagramType,
ElementCreateInfo,
ToolboxDefinition,
)
class Service(metaclass=abc.ABCMeta):
"""Base interface for all services in Gaphor."""
@abc.abstractmethod
def shutdown(self) -> None:
"""Shutdown the services, free resources."""
class ActionProvider(metaclass=abc.ABCMeta):
"""An action provider is a special service that provides actions via
``@action`` decorators on its methods (see gaphor/action.py)."""
@abc.abstractmethod
def __init__(self):
pass
class ModelingLanguage(metaclass=abc.ABCMeta):
"""A model provider is a special service that provides an entrypoint to a
model implementation, such as UML, SysML, RAAML."""
@property
@abc.abstractmethod
def name(self) -> str:
"""Human-readable name of the modeling language."""
@property
@abc.abstractmethod
def toolbox_definition(self) -> ToolboxDefinition:
"""Get structure for the toolbox."""
@property
@abc.abstractmethod
def diagram_types(self) -> Iterable[DiagramType]:
"""Iterate diagram types."""
@property
@abc.abstractmethod
def element_types(self) -> Iterable[ElementCreateInfo]:
"""Iterate element types."""
@abc.abstractmethod
def lookup_element(self, name: str) -> type[Element] | None:
"""Look up a model element type by (class) name."""
|
midifile | output | # Python midifile package -- parse, load and play MIDI files.
# Copyright (c) 2011 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
Writes MIDI events to a MIDI output.
"""
import contextlib
from . import event
class Output:
"""Abstract base class for a MIDI output.
Inherit to implement the actual writing to MIDI ports.
The midiplayer.Player calls midi_event and all_notes_off.
"""
def midi_event(self, midi):
"""Handles a list or dict of MIDI events from a Song (midisong.py)."""
if isinstance(midi, dict):
# dict mapping track to events?
midi = sum(map(midi.get, sorted(midi)), [])
self.send_events(midi)
def reset(self):
"""Restores the MIDI output to an initial state.
Sets the program to 0, the volume to 90 and sends reset_controllers
messages to all channels.
"""
self.reset_controllers()
self.set_main_volume(90)
self.set_program_change(0)
def set_main_volume(self, volume, channel=-1):
channels = range(16) if channel == -1 else (channel,)
with self.sender() as send:
for c in channels:
send(event.ControllerEvent(c, event.MIDI_CTL_MSB_MAIN_VOLUME, volume))
def set_program_change(self, program, channel=-1):
channels = range(16) if channel == -1 else (channel,)
with self.sender() as send:
for c in channels:
send(event.ProgramChangeEvent(c, program))
def reset_controllers(self, channel=-1):
"""Sends an all_notes_off message to a channel.
If the channel is -1 (the default), sends the message to all channels.
"""
channels = range(16) if channel == -1 else (channel,)
with self.sender() as send:
for c in channels:
send(event.ControllerEvent(c, event.MIDI_CTL_RESET_CONTROLLERS, 0))
def all_sounds_off(self, channel=-1):
"""Sends an all_notes_off message to a channel.
If the channel is -1 (the default), sends the message to all channels.
"""
channels = range(16) if channel == -1 else (channel,)
with self.sender() as send:
for c in channels:
send(event.ControllerEvent(c, event.MIDI_CTL_ALL_NOTES_OFF, 0))
send(event.ControllerEvent(c, event.MIDI_CTL_ALL_SOUNDS_OFF, 0))
def send_events(self, events):
"""Writes the list of events to the output port.
Each event is one of the event types in event.py
Implement to do the actual writing.
"""
pass
@contextlib.contextmanager
def sender(self):
"""Returns a context manager to call for each event to send.
When the context manager exits, the events are sent using the
send_events() method.
"""
l = []
yield l.append
if l:
self.send_events(l)
class PortMidiOutput(Output):
"""Writes events to a PortMIDI Output instance.
The PortMIDI Output instance should be in the output attribute.
"""
output = None
def send_events(self, events):
"""Writes the list of events to the PortMIDI output port."""
l = []
for e in events:
m = self.convert_event(e)
if m:
l.append([m, 0])
while len(l) > 1024:
self.output.write(l[:1024])
l = l[1024:]
if l:
self.output.write(l)
def convert_event(self, e):
"""Returns a list of integers representing a MIDI message from event."""
t = type(e)
if t is event.NoteEvent:
return self.convert_note_event(e)
elif t is event.PitchBendEvent:
return self.convert_pitchbend_event(e)
elif t is event.ProgramChangeEvent:
return self.convert_programchange_event(e)
elif t is event.ControllerEvent:
return self.convert_controller_event(e)
def convert_note_event(self, e):
return [e.type * 16 + e.channel, e.note, e.value]
def convert_programchange_event(self, e):
return [0xC0 + e.channel, e.number]
def convert_controller_event(self, e):
return [0xB0 + e.channel, e.number, e.value]
def convert_pitchbend_event(self, e):
return [0xE0 + e.channel, e.value & 0x7F, e.value >> 7]
|
admin | teams | from CTFd.admin import admin
from CTFd.models import Challenges, Teams, Tracking
from CTFd.utils.decorators import admins_only
from flask import render_template, request, url_for
from sqlalchemy.sql import not_
@admin.route("/admin/teams")
@admins_only
def teams_listing():
q = request.args.get("q")
field = request.args.get("field")
page = abs(request.args.get("page", 1, type=int))
filters = []
if q:
# The field exists as an exposed column
if Teams.__mapper__.has_property(field):
filters.append(getattr(Teams, field).like("%{}%".format(q)))
teams = (
Teams.query.filter(*filters)
.order_by(Teams.id.asc())
.paginate(page=page, per_page=50)
)
args = dict(request.args)
args.pop("page", 1)
return render_template(
"admin/teams/teams.html",
teams=teams,
prev_page=url_for(request.endpoint, page=teams.prev_num, **args),
next_page=url_for(request.endpoint, page=teams.next_num, **args),
q=q,
field=field,
)
@admin.route("/admin/teams/new")
@admins_only
def teams_new():
return render_template("admin/teams/new.html")
@admin.route("/admin/teams/<int:team_id>")
@admins_only
def teams_detail(team_id):
team = Teams.query.filter_by(id=team_id).first_or_404()
# Get members
members = team.members
member_ids = [member.id for member in members]
# Get Solves for all members
solves = team.get_solves(admin=True)
fails = team.get_fails(admin=True)
awards = team.get_awards(admin=True)
score = team.get_score(admin=True)
place = team.get_place(admin=True)
# Get missing Challenges for all members
# TODO: How do you mark a missing challenge for a team?
solve_ids = [s.challenge_id for s in solves]
missing = Challenges.query.filter(not_(Challenges.id.in_(solve_ids))).all()
# Get addresses for all members
addrs = (
Tracking.query.filter(Tracking.user_id.in_(member_ids))
.order_by(Tracking.date.desc())
.all()
)
return render_template(
"admin/teams/team.html",
team=team,
members=members,
score=score,
place=place,
solves=solves,
fails=fails,
missing=missing,
awards=awards,
addrs=addrs,
)
|
beetsplug | parentwork | # This file is part of beets.
# Copyright 2017, Dorian Soergel.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Gets parent work, its disambiguation and id, composer, composer sort name
and work composition date
"""
import musicbrainzngs
from beets import ui
from beets.plugins import BeetsPlugin
def direct_parent_id(mb_workid, work_date=None):
"""Given a Musicbrainz work id, find the id one of the works the work is
part of and the first composition date it encounters.
"""
work_info = musicbrainzngs.get_work_by_id(
mb_workid, includes=["work-rels", "artist-rels"]
)
if "artist-relation-list" in work_info["work"] and work_date is None:
for artist in work_info["work"]["artist-relation-list"]:
if artist["type"] == "composer":
if "end" in artist.keys():
work_date = artist["end"]
if "work-relation-list" in work_info["work"]:
for direct_parent in work_info["work"]["work-relation-list"]:
if (
direct_parent["type"] == "parts"
and direct_parent.get("direction") == "backward"
):
direct_id = direct_parent["work"]["id"]
return direct_id, work_date
return None, work_date
def work_parent_id(mb_workid):
"""Find the parent work id and composition date of a work given its id."""
work_date = None
while True:
new_mb_workid, work_date = direct_parent_id(mb_workid, work_date)
if not new_mb_workid:
return mb_workid, work_date
mb_workid = new_mb_workid
return mb_workid, work_date
def find_parentwork_info(mb_workid):
"""Get the MusicBrainz information dict about a parent work, including
the artist relations, and the composition date for a work's parent work.
"""
parent_id, work_date = work_parent_id(mb_workid)
work_info = musicbrainzngs.get_work_by_id(parent_id, includes=["artist-rels"])
return work_info, work_date
class ParentWorkPlugin(BeetsPlugin):
def __init__(self):
super().__init__()
self.config.add(
{
"auto": False,
"force": False,
}
)
if self.config["auto"]:
self.import_stages = [self.imported]
def commands(self):
def func(lib, opts, args):
self.config.set_args(opts)
force_parent = self.config["force"].get(bool)
write = ui.should_write()
for item in lib.items(ui.decargs(args)):
changed = self.find_work(item, force_parent)
if changed:
item.store()
if write:
item.try_write()
command = ui.Subcommand(
"parentwork", help="fetch parent works, composers and dates"
)
command.parser.add_option(
"-f",
"--force",
dest="force",
action="store_true",
default=None,
help="re-fetch when parent work is already present",
)
command.func = func
return [command]
def imported(self, session, task):
"""Import hook for fetching parent works automatically."""
force_parent = self.config["force"].get(bool)
for item in task.imported_items():
self.find_work(item, force_parent)
item.store()
def get_info(self, item, work_info):
"""Given the parent work info dict, fetch parent_composer,
parent_composer_sort, parentwork, parentwork_disambig, mb_workid and
composer_ids.
"""
parent_composer = []
parent_composer_sort = []
parentwork_info = {}
composer_exists = False
if "artist-relation-list" in work_info["work"]:
for artist in work_info["work"]["artist-relation-list"]:
if artist["type"] == "composer":
composer_exists = True
parent_composer.append(artist["artist"]["name"])
parent_composer_sort.append(artist["artist"]["sort-name"])
if "end" in artist.keys():
parentwork_info["parentwork_date"] = artist["end"]
parentwork_info["parent_composer"] = ", ".join(parent_composer)
parentwork_info["parent_composer_sort"] = ", ".join(parent_composer_sort)
if not composer_exists:
self._log.debug(
"no composer for {}; add one at " "https://musicbrainz.org/work/{}",
item,
work_info["work"]["id"],
)
parentwork_info["parentwork"] = work_info["work"]["title"]
parentwork_info["mb_parentworkid"] = work_info["work"]["id"]
if "disambiguation" in work_info["work"]:
parentwork_info["parentwork_disambig"] = work_info["work"]["disambiguation"]
else:
parentwork_info["parentwork_disambig"] = None
return parentwork_info
def find_work(self, item, force):
"""Finds the parent work of a recording and populates the tags
accordingly.
The parent work is found recursively, by finding the direct parent
repeatedly until there are no more links in the chain. We return the
final, topmost work in the chain.
Namely, the tags parentwork, parentwork_disambig, mb_parentworkid,
parent_composer, parent_composer_sort and work_date are populated.
"""
if not item.mb_workid:
self._log.info(
"No work for {}, \
add one at https://musicbrainz.org/recording/{}",
item,
item.mb_trackid,
)
return
hasparent = hasattr(item, "parentwork")
work_changed = True
if hasattr(item, "parentwork_workid_current"):
work_changed = item.parentwork_workid_current != item.mb_workid
if force or not hasparent or work_changed:
try:
work_info, work_date = find_parentwork_info(item.mb_workid)
except musicbrainzngs.musicbrainz.WebServiceError as e:
self._log.debug("error fetching work: {}", e)
return
parent_info = self.get_info(item, work_info)
parent_info["parentwork_workid_current"] = item.mb_workid
if "parent_composer" in parent_info:
self._log.debug(
"Work fetched: {} - {}",
parent_info["parentwork"],
parent_info["parent_composer"],
)
else:
self._log.debug(
"Work fetched: {} - no parent composer", parent_info["parentwork"]
)
elif hasparent:
self._log.debug("{}: Work present, skipping", item)
return
# apply all non-null values to the item
for key, value in parent_info.items():
if value:
item[key] = value
if work_date:
item["work_date"] = work_date
return ui.show_model_changes(
item,
fields=[
"parentwork",
"parentwork_disambig",
"mb_parentworkid",
"parent_composer",
"parent_composer_sort",
"work_date",
"parentwork_workid_current",
"parentwork_date",
],
)
|
dialogs | colordlg | # -*- coding: utf-8 -*-
#
# Copyright (C) 2015 by Ihor E. Novikov
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
from copy import deepcopy
import wal
from sk1 import _, config
from sk1.pwidgets.colorctrls import (
CMYK_PALETTE,
GRAY_PALETTE,
RGB_PALETTE,
CmykMixer,
ColorColorRefPanel,
GrayMixer,
MiniPalette,
RgbMixer,
)
from uc2 import uc2const
MIXERS = {
uc2const.COLOR_CMYK: CmykMixer,
uc2const.COLOR_RGB: RgbMixer,
uc2const.COLOR_GRAY: GrayMixer,
}
PALETTES = {
uc2const.COLOR_CMYK: CMYK_PALETTE,
uc2const.COLOR_RGB: RGB_PALETTE,
uc2const.COLOR_GRAY: GRAY_PALETTE,
}
class ChangeColorDialog(wal.OkCancelDialog):
cms = None
orig_color = None
new_color = None
mixer = None
refpanel = None
palette = None
def __init__(self, parent, title, cms, color):
self.cms = cms
self.orig_color = color
self.new_color = deepcopy(self.orig_color)
size = config.change_color_dlg_size
wal.OkCancelDialog.__init__(
self,
parent,
title,
style=wal.VERTICAL,
resizable=True,
size=size,
action_button=wal.BUTTON_APPLY,
)
self.set_minsize(config.change_color_dlg_minsize)
def build(self):
self.pack(wal.HPanel(self), fill=True, expand=True)
cs = self.orig_color[0]
self.mixer = MIXERS[cs](self, self.cms, onchange=self.mixer_changed)
self.pack(self.mixer)
self.pack(wal.HPanel(self), fill=True, expand=True)
self.pack(wal.HLine(self), fill=True, padding=5)
hpanel = wal.HPanel(self)
self.refpanel = ColorColorRefPanel(
hpanel,
self.cms,
self.orig_color,
self.new_color,
on_orig=self.refpanel_click,
)
hpanel.pack(self.refpanel)
hpanel.pack(wal.HPanel(hpanel), fill=True, expand=True)
self.palette = MiniPalette(
hpanel, self.cms, PALETTES[cs], onclick=self.palette_click
)
hpanel.pack(self.palette)
self.pack(hpanel, fill=True)
self.update_data()
def get_result(self):
return self.mixer.get_color()
def show(self):
ret = None
if self.show_modal() == wal.BUTTON_OK:
ret = self.get_result()
w, h = self.get_size()
if wal.is_unity_16_04():
h = max(h - 28, config.change_color_dlg_minsize[1])
config.change_color_dlg_size = (w, h)
self.destroy()
return ret
def mixer_changed(self):
self.new_color = self.mixer.get_color()
self.update_data()
def refpanel_click(self):
self.new_color = deepcopy(self.orig_color)
self.update_data()
def palette_click(self, color):
self.new_color = color
self.update_data()
def update_data(self):
self.mixer.set_color(self.new_color)
self.refpanel.update(self.orig_color, self.new_color)
def change_color_dlg(parent, cms, color, title=_("Change color")):
return ChangeColorDialog(parent, title, cms, color).show()
|
lib | jsontemplates | # The contents of this file are subject to the Common Public Attribution
# License Version 1.0. (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://code.reddit.com/LICENSE. The License is based on the Mozilla Public
# License Version 1.1, but Sections 14 and 15 have been added to cover use of
# software over a computer network and provide for limited attribution for the
# Original Developer. In addition, Exhibit A has been modified to be consistent
# with Exhibit B.
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for
# the specific language governing rights and limitations under the License.
#
# The Original Code is reddit.
#
# The Original Developer is the Initial Developer. The Initial Developer of
# the Original Code is reddit Inc.
#
# All portions of the code written by reddit are Copyright (c) 2006-2015 reddit
# Inc. All Rights Reserved.
###############################################################################
import calendar
import time
from collections import defaultdict
import pytz
from mako.template import Template
from pylons import app_globals as g
from pylons import response
from pylons import tmpl_context as c
from pylons.i18n import _
from r2.config import feature
from r2.config.extensions import get_api_subtype
from r2.lib import hooks
from r2.lib.filters import _force_unicode, safemarkdown, spaceCompress
from r2.models import (
Account,
Comment,
Link,
Report,
Subreddit,
SubredditUserRelations,
Trophy,
)
from r2.models.token import OAuth2Scope, extra_oauth2_scope
from r2.models.wiki import ImagesByWikiPage
from utils import iters, to36, tup
from wrapped import CacheStub, StringTemplate, Templated, Wrapped
def make_typename(typ):
return "t%s" % to36(typ._type_id)
def make_fullname(typ, _id):
return "%s_%s" % (make_typename(typ), to36(_id))
class ObjectTemplate(StringTemplate):
def __init__(self, d):
self.d = d
def update(self, kw):
def _update(obj):
if isinstance(obj, (str, unicode)):
return _force_unicode(obj)
elif isinstance(obj, dict):
return dict((k, _update(v)) for k, v in obj.iteritems())
elif isinstance(obj, (list, tuple)):
return map(_update, obj)
elif isinstance(obj, CacheStub) and kw.has_key(obj.name):
return kw[obj.name]
else:
return obj
res = _update(self.d)
return ObjectTemplate(res)
def finalize(self, kw={}):
return self.update(kw).d
class JsonTemplate(Template):
def __init__(self):
pass
def render(self, thing=None, *a, **kw):
return ObjectTemplate({})
class TakedownJsonTemplate(JsonTemplate):
def render(self, thing=None, *a, **kw):
return thing.explanation
class ThingTemplate(object):
@classmethod
def render(cls, thing):
"""
Return a JSON representation of a Wrapped Thing object.
The Thing object should be Wrapped and been run through add_props just
like is required for regular HTML rendering. The return value is an
ObjectTemplate wrapped dictionary.
"""
api_subtype = get_api_subtype()
# the argument is named `thing` due to specifics of wrapped
item = thing
if api_subtype:
# special handling for rendering a nested template as a different
# style (usually html)
data = cls.get_rendered(item, render_style=api_subtype)
else:
data = cls.get_json(item)
d = {
"kind": cls.get_kind(item),
"data": data,
}
return ObjectTemplate(d)
@classmethod
def get_kind(cls, item):
thing = item.lookups[0]
return make_typename(thing.__class__)
@classmethod
def get_json(cls, item):
data = {
"created": time.mktime(item._date.timetuple()),
"created_utc": time.mktime(item._date.astimezone(pytz.UTC).timetuple())
- time.timezone,
"id": item._id36,
"name": item._fullname,
}
return data
@classmethod
def get_rendered(cls, item, render_style):
data = {
"id": item._fullname,
"content": item.render(style=render_style),
}
return data
class ThingJsonTemplate(JsonTemplate):
_data_attrs_ = dict(
created="created",
created_utc="created_utc",
id="_id36",
name="_fullname",
)
@classmethod
def data_attrs(cls, **kw):
d = cls._data_attrs_.copy()
d.update(kw)
return d
def kind(self, wrapped):
"""
Returns a string literal which identifies the type of this
thing. For subclasses of Thing, it will be 't's + kind_id.
"""
_thing = wrapped.lookups[0] if isinstance(wrapped, Wrapped) else wrapped
return make_typename(_thing.__class__)
def rendered_data(self, thing):
"""
Called only when get_api_type is non-None (i.e., a JSON
request has been made with partial rendering of the object to
be returned)
Canonical Thing data representation for JS, which is currently
a dictionary of three elements (translated into a JS Object
when sent out). The elements are:
* id : Thing _fullname of thing.
* content : rendered representation of the thing by
calling render on it using the style of get_api_subtype().
"""
res = dict(id=thing._fullname, content=thing.render(style=get_api_subtype()))
return res
def raw_data(self, thing):
"""
Complement to rendered_data. Called when a dictionary of
thing data attributes is to be sent across the wire.
"""
attrs = dict(self._data_attrs_)
if hasattr(self, "_optional_data_attrs"):
for attr, attrv in self._optional_data_attrs.iteritems():
if hasattr(thing, attr):
attrs[attr] = attrv
return dict((k, self.thing_attr(thing, v)) for k, v in attrs.iteritems())
def thing_attr(self, thing, attr):
"""
For the benefit of subclasses, to lookup attributes which may
require more work than a simple getattr (for example, 'author'
which has to be gotten from the author_id attribute on most
things).
"""
if attr == "author":
if thing.author._deleted:
return "[deleted]"
return thing.author.name
if attr == "created":
return time.mktime(thing._date.timetuple())
elif attr == "created_utc":
return (
time.mktime(thing._date.astimezone(pytz.UTC).timetuple())
- time.timezone
)
elif attr == "child":
child = getattr(thing, "child", None)
if child:
return child.render()
else:
return ""
if attr == "distinguished":
distinguished = getattr(thing, attr, "no")
if distinguished == "no":
return None
return distinguished
return getattr(thing, attr, None)
def data(self, thing):
if get_api_subtype():
return self.rendered_data(thing)
else:
return self.raw_data(thing)
def render(self, thing=None, action=None, *a, **kw):
return ObjectTemplate(dict(kind=self.kind(thing), data=self.data(thing)))
class SubredditJsonTemplate(ThingJsonTemplate):
_data_attrs_ = ThingJsonTemplate.data_attrs(
accounts_active="accounts_active_count",
banner_img="banner_img",
banner_size="banner_size",
collapse_deleted_comments="collapse_deleted_comments",
comment_score_hide_mins="comment_score_hide_mins",
community_rules="community_rules",
description="description",
description_html="description_html",
display_name="name",
header_img="header",
header_size="header_size",
header_title="header_title",
icon_img="icon_img",
icon_size="icon_size",
# key_color="key_color",
lang="lang",
over18="over_18",
public_description="public_description",
public_description_html="public_description_html",
public_traffic="public_traffic",
# related_subreddits="related_subreddits",
hide_ads="hide_ads",
quarantine="quarantine",
show_media="show_media",
show_media_preview="show_media_preview",
submission_type="link_type",
submit_link_label="submit_link_label",
submit_text_label="submit_text_label",
submit_text="submit_text",
submit_text_html="submit_text_html",
subreddit_type="type",
subscribers="_ups",
suggested_comment_sort="suggested_comment_sort",
title="title",
url="path",
user_is_banned="is_banned",
user_is_muted="is_muted",
user_is_contributor="is_contributor",
user_is_moderator="is_moderator",
user_is_subscriber="is_subscriber",
user_sr_theme_enabled="user_sr_style_enabled",
wiki_enabled="wiki_enabled",
)
# subreddit *attributes* (right side of the equals)
# that are accessible even if the user can't view the subreddit
_public_attrs = {
"_id36",
# subreddit ID with prefix
"_fullname",
# Creation date
"created",
"created_utc",
# Canonically-cased subreddit name
"name",
# Canonical subreddit URL, relative to reddit.com
"path",
# Text shown on the access denied page
"public_description",
"public_description_html",
# Title shown in search
"title",
# Type of subreddit, so people know that it's private
"type",
}
def raw_data(self, thing):
data = ThingJsonTemplate.raw_data(self, thing)
# remove this when feature is enabled and use _data_attrs instead
if feature.is_enabled("mobile_settings"):
data["key_color"] = self.thing_attr(thing, "key_color")
if feature.is_enabled("related_subreddits"):
data["related_subreddits"] = self.thing_attr(thing, "related_subreddits")
permissions = getattr(thing, "mod_permissions", None)
if permissions:
permissions = [perm for perm, has in permissions.iteritems() if has]
data["mod_permissions"] = permissions
return data
def thing_attr(self, thing, attr):
if attr not in self._public_attrs and not thing.can_view(c.user):
return None
if attr == "_ups" and (thing.hide_subscribers or thing.hide_num_users_info):
return 0
elif attr == "description_html":
return safemarkdown(thing.description)
elif attr == "public_description_html":
return safemarkdown(thing.public_description)
elif attr == "is_moderator":
if c.user_is_loggedin:
return thing.moderator
return None
elif attr == "is_contributor":
if c.user_is_loggedin:
return thing.contributor
return None
elif attr == "is_subscriber":
if c.user_is_loggedin:
return thing.subscriber
return None
elif attr == "is_banned":
if c.user_is_loggedin:
return thing.banned
return None
elif attr == "is_muted":
if c.user_is_loggedin:
return thing.muted
return None
elif attr == "submit_text_html":
return safemarkdown(thing.submit_text)
elif attr == "user_sr_style_enabled":
if c.user_is_loggedin:
return c.user.use_subreddit_style(thing)
else:
return True
elif attr == "wiki_enabled":
is_admin_or_mod = c.user_is_loggedin and (
c.user_is_admin or thing.is_moderator_with_perms(c.user, "wiki")
)
return thing.wikimode == "anyone" or (
thing.wikimode == "modonly" and is_admin_or_mod
)
else:
return ThingJsonTemplate.thing_attr(self, thing, attr)
class LabeledMultiDescriptionJsonTemplate(ThingJsonTemplate):
_data_attrs_ = dict(
body_html="description_html",
body_md="description_md",
)
def kind(self, wrapped):
return "LabeledMultiDescription"
def thing_attr(self, thing, attr):
if attr == "description_html":
# if safemarkdown is passed a falsy string it returns None :/
description_html = safemarkdown(thing.description_md) or ""
return description_html
else:
return ThingJsonTemplate.thing_attr(self, thing, attr)
class LabeledMultiJsonTemplate(LabeledMultiDescriptionJsonTemplate):
_data_attrs_ = ThingJsonTemplate.data_attrs(
can_edit="can_edit",
copied_from="copied_from",
description_html="description_html",
description_md="description_md",
display_name="display_name",
key_color="key_color",
icon_name="icon_id",
icon_url="icon_url",
name="name",
path="path",
subreddits="srs",
visibility="visibility",
weighting_scheme="weighting_scheme",
)
del _data_attrs_["id"]
def __init__(self, expand_srs=False):
super(LabeledMultiJsonTemplate, self).__init__()
self.expand_srs = expand_srs
def kind(self, wrapped):
return "LabeledMulti"
@classmethod
def sr_props(cls, thing, srs, expand=False):
sr_props = dict(thing.sr_props)
if expand:
sr_dicts = get_trimmed_sr_dicts(srs, c.user)
for sr in srs:
sr_props[sr._id]["data"] = sr_dicts[sr._id]
return [dict(sr_props[sr._id], name=sr.name) for sr in srs]
def thing_attr(self, thing, attr):
if attr == "srs":
return self.sr_props(thing, thing.srs, expand=self.expand_srs)
elif attr == "can_edit":
return c.user_is_loggedin and thing.can_edit(c.user)
elif attr == "copied_from":
if thing.can_edit(c.user):
return thing.copied_from
else:
return None
elif attr == "display_name":
return thing.display_name or thing.name
else:
super_ = super(LabeledMultiJsonTemplate, self)
return super_.thing_attr(thing, attr)
def get_trimmed_sr_dicts(srs, user):
if c.user_is_loggedin:
sr_user_relations = Subreddit.get_sr_user_relations(user, srs)
else:
# backwards compatibility: for loggedout users don't return boolean,
# instead return None for all relations.
NO_SR_USER_RELATIONS = SubredditUserRelations(
subscriber=None,
moderator=None,
contributor=None,
banned=None,
muted=None,
)
sr_user_relations = defaultdict(lambda: NO_SR_USER_RELATIONS)
ret = {}
for sr in srs:
relations = sr_user_relations[sr._id]
can_view = sr.can_view(user)
subscribers = sr._ups if not sr.hide_subscribers else 0
data = dict(
name=sr._fullname,
display_name=sr.name,
url=sr.path,
banner_img=sr.banner_img if can_view else None,
banner_size=sr.banner_size if can_view else None,
header_img=sr.header if can_view else None,
header_size=sr.header_size if can_view else None,
icon_img=sr.icon_img if can_view else None,
icon_size=sr.icon_size if can_view else None,
key_color=sr.key_color if can_view else None,
subscribers=subscribers if can_view else None,
user_is_banned=relations.banned if can_view else None,
user_is_muted=relations.muted if can_view else None,
user_is_contributor=relations.contributor if can_view else None,
user_is_moderator=relations.moderator if can_view else None,
user_is_subscriber=relations.subscriber if can_view else None,
)
if feature.is_enabled("mobile_settings"):
data["key_color"] = sr.key_color if can_view else None
ret[sr._id] = data
return ret
class IdentityJsonTemplate(ThingJsonTemplate):
_data_attrs_ = ThingJsonTemplate.data_attrs(
comment_karma="comment_karma",
has_verified_email="email_verified",
is_gold="gold",
is_mod="is_mod",
link_karma="link_karma",
name="name",
hide_from_robots="pref_hide_from_robots",
)
_private_data_attrs = dict(
inbox_count="inbox_count",
over_18="pref_over_18",
gold_creddits="gold_creddits",
gold_expiration="gold_expiration",
is_suspended="in_timeout",
suspension_expiration_utc="timeout_expiration_utc",
features="features",
)
_public_attrs = {
"name",
"is_suspended",
}
def raw_data(self, thing):
viewable = True
attrs = self._data_attrs_.copy()
if c.user_is_loggedin and thing._id == c.user._id:
attrs.update(self._private_data_attrs)
# Add a public indication when a user is permanently in timeout.
elif thing.in_timeout and thing.timeout_expiration is None:
attrs.update({"is_suspended": "in_timeout"})
viewable = False
if thing.pref_hide_from_robots:
response.headers["X-Robots-Tag"] = "noindex, nofollow"
data = {
k: self.thing_attr(thing, v)
for k, v in attrs.iteritems()
if viewable or k in self._public_attrs
}
try:
self.add_message_data(data, thing)
except OAuth2Scope.InsufficientScopeError:
# No access to privatemessages, but the rest of
# the identity information is sufficient.
pass
# Add as private data attributes states about this user. This is used
# for feature flagging by user state on first-party API clients.
if c.user_is_loggedin and thing._id == c.user._id:
data["is_employee"] = thing.employee
data["in_beta"] = thing.pref_beta
return data
@extra_oauth2_scope("privatemessages")
def add_message_data(self, data, thing):
if c.user_is_loggedin and thing._id == c.user._id:
data["has_mail"] = self.thing_attr(thing, "has_mail")
data["has_mod_mail"] = self.thing_attr(thing, "has_mod_mail")
def thing_attr(self, thing, attr):
from r2.lib.template_helpers import display_comment_karma, display_link_karma
if attr == "is_mod":
t = thing.lookups[0] if isinstance(thing, Wrapped) else thing
return t.is_moderator_somewhere
elif attr == "has_mail":
return bool(c.have_messages)
elif attr == "has_mod_mail":
return bool(c.have_mod_messages)
elif attr == "comment_karma":
return display_comment_karma(thing.comment_karma)
elif attr == "link_karma":
return display_link_karma(thing.link_karma)
elif attr == "gold_expiration":
if not thing.gold:
return None
return calendar.timegm(thing.gold_expiration.utctimetuple())
elif attr == "timeout_expiration_utc":
expiration_date = thing.timeout_expiration
if not expiration_date:
return None
return calendar.timegm(expiration_date.utctimetuple())
elif attr == "features":
return feature.all_enabled(c.user)
return ThingJsonTemplate.thing_attr(self, thing, attr)
class AccountJsonTemplate(IdentityJsonTemplate):
_data_attrs_ = IdentityJsonTemplate.data_attrs(is_friend="is_friend")
_private_data_attrs = dict(
modhash="modhash", **IdentityJsonTemplate._private_data_attrs
)
def thing_attr(self, thing, attr):
if attr == "is_friend":
return c.user_is_loggedin and thing._id in c.user.friends
elif attr == "modhash":
return c.modhash
return IdentityJsonTemplate.thing_attr(self, thing, attr)
class PrefsJsonTemplate(ThingJsonTemplate):
_data_attrs_ = dict((k[len("pref_") :], k) for k in Account._preference_attrs)
def __init__(self, fields=None):
if fields is not None:
_data_attrs_ = {}
for field in fields:
if field not in self._data_attrs_:
raise KeyError(field)
_data_attrs_[field] = self._data_attrs_[field]
self._data_attrs_ = _data_attrs_
def thing_attr(self, thing, attr):
if attr == "pref_clickgadget":
return bool(thing.pref_clickgadget)
return ThingJsonTemplate.thing_attr(self, thing, attr)
def get_mod_attributes(item):
data = {}
if c.user_is_loggedin and item.can_ban:
data["num_reports"] = item.reported
data["report_reasons"] = Report.get_reasons(item)
ban_info = getattr(item, "ban_info", {})
if item._spam:
data["approved_by"] = None
if ban_info.get("moderator_banned"):
data["banned_by"] = ban_info.get("banner")
else:
data["banned_by"] = True
else:
data["approved_by"] = ban_info.get("unbanner")
data["banned_by"] = None
else:
data["num_reports"] = None
data["report_reasons"] = None
data["approved_by"] = None
data["banned_by"] = None
return data
def get_author_attributes(item):
data = {}
if not item.author._deleted:
author = item.author
sr_id = item.subreddit._id
data["author"] = author.name
if author.flair_enabled_in_sr(sr_id):
flair_text = getattr(author, "flair_%s_text" % sr_id, None)
flair_css = getattr(author, "flair_%s_css_class" % sr_id, None)
else:
flair_text = None
flair_css = None
data["author_flair_text"] = flair_text
data["author_flair_css_class"] = flair_css
else:
data["author"] = "[deleted]"
data["author_flair_text"] = None
data["author_flair_css_class"] = None
return data
def get_distinguished_attributes(item):
data = {}
distinguished = getattr(item, "distinguished", "no")
data["distinguished"] = distinguished if distinguished != "no" else None
return data
def get_edited_attributes(item):
data = {}
if isinstance(item.editted, bool):
data["edited"] = item.editted
else:
editted_timetuple = item.editted.astimezone(pytz.UTC).timetuple()
data["edited"] = time.mktime(editted_timetuple) - time.timezone
return data
def get_report_reason_attributes(item):
if c.user_is_loggedin and c.user.in_timeout:
data = {
"user_reports": [],
"mod_reports": [],
}
else:
data = {
"user_reports": item.user_reports,
"mod_reports": item.mod_reports,
}
return data
def get_removal_reason_attributes(item):
data = {}
if getattr(item, "admin_takedown", None):
data["removal_reason"] = "legal"
else:
data["removal_reason"] = None
return data
def get_media_embed_attributes(item):
from r2.lib.media import get_media_embed
data = {
"media_embed": {},
"secure_media_embed": {},
}
media_object = item.media_object
if media_object and not isinstance(media_object, basestring):
media_embed = get_media_embed(media_object)
if media_embed:
data["media_embed"] = {
"scrolling": media_embed.scrolling,
"width": media_embed.width,
"height": media_embed.height,
"content": media_embed.content,
}
secure_media_object = item.secure_media_object
if secure_media_object and not isinstance(secure_media_object, basestring):
secure_media_embed = get_media_embed(secure_media_object)
if secure_media_embed:
data["secure_media_embed"] = {
"scrolling": secure_media_embed.scrolling,
"width": secure_media_embed.width,
"height": secure_media_embed.height,
"content": secure_media_embed.content,
}
return data
def get_selftext_attributes(item):
data = {}
if not item.expunged:
data["selftext"] = item.selftext
data["selftext_html"] = safemarkdown(item.selftext)
else:
data["selftext"] = "[removed]"
data["selftext_html"] = safemarkdown(_("[removed]"))
return data
def generate_image_links(preview_object, file_type=None, censor_nsfw=False):
PREVIEW_RESOLUTIONS = (108, 216, 320, 640, 960, 1080)
PREVIEW_MAX_RATIO = 2
# Determine which previews would be feasible with our given dims
source_width = preview_object["width"]
source_height = preview_object["height"]
source_ratio = float(source_height) / source_width
# previews with a ratio above the max will be cropped to a lower ratio
max_ratio = float(PREVIEW_MAX_RATIO)
preview_ratio = min(source_ratio, max_ratio)
preview_resolutions = []
for w in PREVIEW_RESOLUTIONS:
if w > source_width:
continue
url = g.image_resizing_provider.resize_image(
preview_object,
w,
file_type=file_type,
censor_nsfw=censor_nsfw,
max_ratio=PREVIEW_MAX_RATIO,
)
h = int(w * preview_ratio)
preview_resolutions.append(
{
"url": url,
"width": w,
"height": h,
}
)
url = g.image_resizing_provider.resize_image(
preview_object,
file_type=file_type,
censor_nsfw=censor_nsfw,
)
return {
"source": {
"url": url,
"width": source_width,
"height": source_height,
},
"resolutions": preview_resolutions,
}
class LinkJsonTemplate(ThingTemplate):
@classmethod
def get_json(cls, item):
data = ThingTemplate.get_json(item)
data.update(get_mod_attributes(item))
data.update(get_author_attributes(item))
data.update(get_distinguished_attributes(item))
data.update(get_edited_attributes(item))
data.update(get_media_embed_attributes(item))
data.update(get_report_reason_attributes(item))
data.update(get_removal_reason_attributes(item))
data.update(get_selftext_attributes(item))
data.update(
{
"archived": not item.votable,
"visited": item.visited,
"clicked": False,
"contest_mode": item.contest_mode,
"domain": item.domain,
"downs": 0,
"gilded": item.gildings,
"hidden": item.hidden,
"hide_score": item.hide_score,
"is_self": item.is_self,
"likes": item.likes,
"link_flair_css_class": item.flair_css_class,
"link_flair_text": item.flair_text,
"locked": item.locked,
"media": item.media_object,
"secure_media": item.secure_media_object,
"num_comments": item.num_comments,
"over_18": item.over_18,
"quarantine": item.quarantine,
"permalink": item.permalink,
"saved": item.saved,
"score": item.score,
"stickied": item.stickied,
"subreddit": item.subreddit.name,
"subreddit_id": item.subreddit._fullname,
"suggested_sort": item.sort_if_suggested(sr=item.subreddit),
"thumbnail": item.thumbnail,
"title": item.title,
"ups": item.score,
"url": item.url,
}
)
if hasattr(item, "action_type"):
data["action_type"] = item.action_type
if hasattr(item, "sr_detail"):
data["sr_detail"] = item.sr_detail
if hasattr(item, "show_media"):
data["show_media"] = item.show_media
if c.permalink_page:
data["upvote_ratio"] = item.upvote_ratio
preview_object = item.preview_image
if preview_object:
preview_is_gif = preview_object.get("url", "").endswith(".gif")
data["preview"] = {}
data["post_hint"] = item.post_hint
# For gifs, the default preview should be a static image, with the
# full gif as a variant
if preview_is_gif:
images = generate_image_links(preview_object, file_type="jpg")
else:
images = generate_image_links(preview_object)
images["id"] = preview_object["uid"]
images["variants"] = {}
if item.nsfw:
images["variants"]["nsfw"] = generate_image_links(
preview_object, censor_nsfw=True, file_type="png"
)
if preview_is_gif:
images["variants"]["gif"] = generate_image_links(preview_object)
images["variants"]["mp4"] = generate_image_links(
preview_object, file_type="mp4"
)
data["preview"]["images"] = [images]
return data
@classmethod
def get_rendered(cls, item, render_style):
data = ThingTemplate.get_rendered(item, render_style)
data.update(
{
"sr": item.subreddit._fullname,
}
)
return data
class PromotedLinkJsonTemplate(LinkJsonTemplate):
@classmethod
def get_json(cls, item):
data = LinkJsonTemplate.get_json(item)
data.update(
{
"promoted": item.promoted,
"imp_pixel": getattr(item, "imp_pixel", None),
"href_url": item.href_url,
"adserver_imp_pixel": getattr(item, "adserver_imp_pixel", None),
"adserver_click_url": getattr(item, "adserver_click_url", None),
"mobile_ad_url": item.mobile_ad_url,
"disable_comments": item.disable_comments,
"third_party_tracking": item.third_party_tracking,
"third_party_tracking_2": item.third_party_tracking_2,
}
)
del data["subreddit"]
del data["subreddit_id"]
return data
class CommentJsonTemplate(ThingTemplate):
@classmethod
def get_parent_id(cls, item):
from r2.models import Comment, Link
if getattr(item, "parent_id", None):
return make_fullname(Comment, item.parent_id)
else:
return make_fullname(Link, item.link_id)
@classmethod
def get_link_name(cls, item):
from r2.models import Link
return make_fullname(Link, item.link_id)
@classmethod
def render_child(cls, item):
child = getattr(item, "child", None)
if child:
return child.render()
else:
return ""
@classmethod
def get_json(cls, item):
from r2.models import Link
data = ThingTemplate.get_json(item)
data.update(get_mod_attributes(item))
data.update(get_author_attributes(item))
data.update(get_distinguished_attributes(item))
data.update(get_edited_attributes(item))
data.update(get_report_reason_attributes(item))
data.update(get_removal_reason_attributes(item))
data.update(
{
"archived": not item.votable,
"body": item.body,
"body_html": spaceCompress(safemarkdown(item.body)),
"controversiality": 1 if item.is_controversial else 0,
"downs": 0,
"gilded": item.gildings,
"likes": item.likes,
"link_id": cls.get_link_name(item),
"saved": item.saved,
"score": item.score,
"score_hidden": item.score_hidden,
"subreddit": item.subreddit.name,
"subreddit_id": item.subreddit._fullname,
"ups": item.score,
"replies": cls.render_child(item),
"parent_id": cls.get_parent_id(item),
}
)
if feature.is_enabled("sticky_comments"):
data["stickied"] = item.link.sticky_comment_id == item._id
if hasattr(item, "action_type"):
data["action_type"] = item.action_type
if c.profilepage:
data["quarantine"] = item.subreddit.quarantine
data["over_18"] = item.link.is_nsfw
data["link_title"] = item.link.title
data["link_author"] = item.link_author.name
if item.link.is_self:
link_url = item.link.make_permalink(item.subreddit, force_domain=True)
else:
link_url = item.link.url
data["link_url"] = link_url
return data
@classmethod
def get_rendered(cls, item, render_style):
data = ThingTemplate.get_rendered(item, render_style)
data.update(
{
"replies": cls.render_child(item),
"contentText": item.body,
"contentHTML": spaceCompress(safemarkdown(item.body)),
"link": cls.get_link_name(item),
"parent": cls.get_parent_id(item),
}
)
return data
class MoreCommentJsonTemplate(ThingTemplate):
@classmethod
def get_kind(cls, item):
return "more"
@classmethod
def get_json(cls, item):
data = {
"children": [to36(comment_id) for comment_id in item.children],
"count": item.count,
"id": item._id36,
"name": item._fullname,
"parent_id": CommentJsonTemplate.get_parent_id(item),
}
return data
@classmethod
def get_rendered(cls, item, render_style):
data = ThingTemplate.get_rendered(item, render_style)
data.update(
{
"replies": "",
"contentText": "",
"contentHTML": "",
"link": CommentJsonTemplate.get_link_name(item),
"parent": CommentJsonTemplate.get_parent_id(item),
}
)
return data
class MessageJsonTemplate(ThingJsonTemplate):
_data_attrs_ = ThingJsonTemplate.data_attrs(
author="author",
body="body",
body_html="body_html",
context="context",
created="created",
dest="dest",
distinguished="distinguished",
first_message="first_message",
first_message_name="first_message_name",
new="new",
parent_id="parent_id",
replies="child",
subject="subject",
subreddit="subreddit",
was_comment="was_comment",
)
def thing_attr(self, thing, attr):
from r2.models import Comment, Link, Message
if attr == "was_comment":
return thing.was_comment
elif attr == "context":
return "" if not thing.was_comment else thing.permalink + "?context=3"
elif attr == "dest":
if thing.to_id:
return thing.to.name
else:
return "#" + thing.subreddit.name
elif attr == "subreddit":
if thing.sr_id:
return thing.subreddit.name
return None
elif attr == "body_html":
return safemarkdown(thing.body)
elif attr == "author" and getattr(thing, "hide_author", False):
return None
elif attr == "parent_id":
if thing.was_comment:
if getattr(thing, "parent_id", None):
return make_fullname(Comment, thing.parent_id)
else:
return make_fullname(Link, thing.link_id)
elif getattr(thing, "parent_id", None):
return make_fullname(Message, thing.parent_id)
elif attr == "first_message_name":
if getattr(thing, "first_message", None):
return make_fullname(Message, thing.first_message)
return ThingJsonTemplate.thing_attr(self, thing, attr)
def raw_data(self, thing):
d = ThingJsonTemplate.raw_data(self, thing)
if thing.was_comment:
d["link_title"] = thing.link_title
d["likes"] = thing.likes
return d
def rendered_data(self, wrapped):
from r2.models import Message
parent_id = wrapped.parent_id
if parent_id:
parent_id = make_fullname(Message, parent_id)
d = ThingJsonTemplate.rendered_data(self, wrapped)
d["parent"] = parent_id
d["contentText"] = self.thing_attr(wrapped, "body")
d["contentHTML"] = self.thing_attr(wrapped, "body_html")
return d
class RedditJsonTemplate(JsonTemplate):
def render(self, thing=None, *a, **kw):
return ObjectTemplate(thing.content().render() if thing else {})
class PanestackJsonTemplate(JsonTemplate):
def render(self, thing=None, *a, **kw):
res = [t.render() for t in thing.stack if t] if thing else []
res = [x for x in res if x]
if not res:
return {}
return ObjectTemplate(res if len(res) > 1 else res[0])
class NullJsonTemplate(JsonTemplate):
def render(self, thing=None, *a, **kw):
return ""
def get_def(self, name):
return self
class ListingJsonTemplate(ThingJsonTemplate):
_data_attrs_ = dict(
after="after",
before="before",
children="things",
modhash="modhash",
)
def thing_attr(self, thing, attr):
if attr == "modhash":
return c.modhash
elif attr == "things":
res = []
for a in thing.things:
a.childlisting = False
r = a.render()
res.append(r)
return res
return ThingJsonTemplate.thing_attr(self, thing, attr)
def rendered_data(self, thing):
return self.thing_attr(thing, "things")
def kind(self, wrapped):
return "Listing"
class SearchListingJsonTemplate(ListingJsonTemplate):
def raw_data(self, thing):
data = ThingJsonTemplate.raw_data(self, thing)
def format_sr(sr, count):
return {"name": sr.name, "url": sr.path, "count": count}
facets = {}
if thing.subreddit_facets:
facets["subreddits"] = [
format_sr(sr, count) for sr, count in thing.subreddit_facets
]
data["facets"] = facets
return data
class UserListingJsonTemplate(ListingJsonTemplate):
def raw_data(self, thing):
if not thing.nextprev:
return {"children": self.rendered_data(thing)}
return ListingJsonTemplate.raw_data(self, thing)
def kind(self, wrapped):
return "Listing" if wrapped.nextprev else "UserList"
class UserListJsonTemplate(ThingJsonTemplate):
_data_attrs_ = dict(
children="users",
)
def thing_attr(self, thing, attr):
if attr == "users":
res = []
for a in thing.user_rows:
r = a.render()
res.append(r)
return res
return ThingJsonTemplate.thing_attr(self, thing, attr)
def rendered_data(self, thing):
return self.thing_attr(thing, "users")
def kind(self, wrapped):
return "UserList"
class UserTableItemJsonTemplate(ThingJsonTemplate):
_data_attrs_ = dict(
id="_fullname",
name="name",
)
def thing_attr(self, thing, attr):
return ThingJsonTemplate.thing_attr(self, thing.user, attr)
def render(self, thing, *a, **kw):
return ObjectTemplate(self.data(thing))
class RelTableItemJsonTemplate(UserTableItemJsonTemplate):
_data_attrs_ = UserTableItemJsonTemplate.data_attrs(
date="date",
)
def thing_attr(self, thing, attr):
rel_attr, splitter, attr = attr.partition(".")
if attr == "note":
# return empty string instead of None for missing note
return ThingJsonTemplate.thing_attr(self, thing.rel, attr) or ""
elif attr:
return ThingJsonTemplate.thing_attr(self, thing.rel, attr)
elif rel_attr == "date":
# make date UTC
date = self.thing_attr(thing, "rel._date")
date = time.mktime(date.astimezone(pytz.UTC).timetuple())
return date - time.timezone
else:
return UserTableItemJsonTemplate.thing_attr(self, thing, rel_attr)
class FriendTableItemJsonTemplate(RelTableItemJsonTemplate):
def inject_data(self, thing, d):
if c.user.gold and thing.type == "friend":
d["note"] = self.thing_attr(thing, "rel.note")
return d
def rendered_data(self, thing):
d = RelTableItemJsonTemplate.rendered_data(self, thing)
return self.inject_data(thing, d)
def raw_data(self, thing):
d = RelTableItemJsonTemplate.raw_data(self, thing)
return self.inject_data(thing, d)
class BannedTableItemJsonTemplate(RelTableItemJsonTemplate):
_data_attrs_ = RelTableItemJsonTemplate.data_attrs(
note="rel.note",
)
class MutedTableItemJsonTemplate(RelTableItemJsonTemplate):
pass
class InvitedModTableItemJsonTemplate(RelTableItemJsonTemplate):
_data_attrs_ = RelTableItemJsonTemplate.data_attrs(
mod_permissions="permissions",
)
def thing_attr(self, thing, attr):
if attr == "permissions":
permissions = thing.permissions.items()
return [perm for perm, has in permissions if has]
else:
return RelTableItemJsonTemplate.thing_attr(self, thing, attr)
class OrganicListingJsonTemplate(ListingJsonTemplate):
def kind(self, wrapped):
return "OrganicListing"
class TrafficJsonTemplate(JsonTemplate):
def render(self, thing, *a, **kw):
res = {}
for interval in ("hour", "day", "month"):
# we don't actually care about the column definitions (used for
# charting) here, so just pass an empty list.
interval_data = thing.get_data_for_interval(interval, [])
# turn the python datetimes into unix timestamps and flatten data
res[interval] = [
(calendar.timegm(date.timetuple()),) + data
for date, data in interval_data
]
return ObjectTemplate(res)
class WikiJsonTemplate(JsonTemplate):
def render(self, thing, *a, **kw):
try:
content = thing.content()
except AttributeError:
content = thing.listing
return ObjectTemplate(content.render() if thing else {})
class WikiPageListingJsonTemplate(ThingJsonTemplate):
def kind(self, thing):
return "wikipagelisting"
def data(self, thing):
pages = [p.name for p in thing.linear_pages]
return pages
class WikiViewJsonTemplate(ThingJsonTemplate):
def kind(self, thing):
return "wikipage"
def data(self, thing):
edit_date = (
time.mktime(thing.edit_date.timetuple()) if thing.edit_date else None
)
edit_by = None
if thing.edit_by and not thing.edit_by._deleted:
edit_by = Wrapped(thing.edit_by).render()
return dict(
content_md=thing.page_content_md,
content_html=thing.page_content,
revision_by=edit_by,
revision_date=edit_date,
may_revise=thing.may_revise,
)
class WikiSettingsJsonTemplate(ThingJsonTemplate):
def kind(self, thing):
return "wikipagesettings"
def data(self, thing):
editors = [Wrapped(e).render() for e in thing.mayedit]
return dict(permlevel=thing.permlevel, listed=thing.listed, editors=editors)
class WikiRevisionJsonTemplate(ThingJsonTemplate):
def render(self, thing, *a, **kw):
timestamp = time.mktime(thing.date.timetuple()) if thing.date else None
author = thing.get_author()
if author and not author._deleted:
author = Wrapped(author).render()
else:
author = None
return ObjectTemplate(
dict(
author=author,
id=str(thing._id),
timestamp=timestamp,
reason=thing._get("reason"),
page=thing.page,
)
)
class FlairListJsonTemplate(JsonTemplate):
def render(self, thing, *a, **kw):
def row_to_json(row):
if hasattr(row, "user"):
return dict(
user=row.user.name,
flair_text=row.flair_text,
flair_css_class=row.flair_css_class,
)
else:
# prev/next link
return dict(after=row.after, reverse=row.previous)
json_rows = [row_to_json(row) for row in thing.flair]
result = dict(users=[row for row in json_rows if "user" in row])
for row in json_rows:
if "after" in row:
if row["reverse"]:
result["prev"] = row["after"]
else:
result["next"] = row["after"]
return ObjectTemplate(result)
class FlairCsvJsonTemplate(JsonTemplate):
def render(self, thing, *a, **kw):
return ObjectTemplate([l.__dict__ for l in thing.results_by_line])
class FlairSelectorJsonTemplate(JsonTemplate):
def _template_dict(self, flair):
return {
"flair_template_id": flair.flair_template_id,
"flair_position": flair.flair_position,
"flair_text": flair.flair_text,
"flair_css_class": flair.flair_css_class,
"flair_text_editable": flair.flair_text_editable,
}
def render(self, thing, *a, **kw):
"""Render a list of flair choices into JSON
Sample output:
{
"choices": [
{
"flair_css_class": "flair-444",
"flair_position": "right",
"flair_template_id": "5668d204-9388-11e3-8109-080027a38559",
"flair_text": "444",
"flair_text_editable": true
},
{
"flair_css_class": "flair-nouser",
"flair_position": "right",
"flair_template_id": "58e34d7a-9388-11e3-ab01-080027a38559",
"flair_text": "nouser",
"flair_text_editable": true
},
{
"flair_css_class": "flair-bar",
"flair_position": "right",
"flair_template_id": "fb01cc04-9391-11e3-b1d6-080027a38559",
"flair_text": "foooooo",
"flair_text_editable": true
}
],
"current": {
"flair_css_class": "444",
"flair_position": "right",
"flair_template_id": "5668d204-9388-11e3-8109-080027a38559",
"flair_text": "444"
}
}
"""
choices = [self._template_dict(choice) for choice in thing.choices]
current_flair = {
"flair_text": thing.text,
"flair_css_class": thing.css_class,
"flair_position": thing.position,
"flair_template_id": thing.matching_template,
}
return ObjectTemplate({"current": current_flair, "choices": choices})
class StylesheetTemplate(ThingJsonTemplate):
_data_attrs_ = dict(
images="_images",
stylesheet="stylesheet_contents",
subreddit_id="_fullname",
)
def kind(self, wrapped):
return "stylesheet"
def images(self):
sr_images = ImagesByWikiPage.get_images(c.site, "config/stylesheet")
images = []
for name, url in sr_images.iteritems():
images.append({"name": name, "link": "url(%%%%%s%%%%)" % name, "url": url})
return images
def thing_attr(self, thing, attr):
if attr == "_images":
return self.images()
elif attr == "_fullname":
return c.site._fullname
return ThingJsonTemplate.thing_attr(self, thing, attr)
class SubredditSettingsTemplate(ThingJsonTemplate):
_data_attrs_ = dict(
allow_images="site.allow_images",
collapse_deleted_comments="site.collapse_deleted_comments",
comment_score_hide_mins="site.comment_score_hide_mins",
content_options="site.link_type",
default_set="site.allow_top",
description="site.description",
domain="site.domain",
exclude_banned_modqueue="site.exclude_banned_modqueue",
header_hover_text="site.header_title",
# key_color='site.key_color',
language="site.lang",
over_18="site.over_18",
public_description="site.public_description",
public_traffic="site.public_traffic",
# related_subreddits='site.related_subreddits',
hide_ads="site.hide_ads",
show_media="site.show_media",
show_media_preview="site.show_media_preview",
submit_link_label="site.submit_link_label",
submit_text_label="site.submit_text_label",
submit_text="site.submit_text",
subreddit_id="site._fullname",
subreddit_type="site.type",
suggested_comment_sort="site.suggested_comment_sort",
title="site.title",
wiki_edit_age="site.wiki_edit_age",
wiki_edit_karma="site.wiki_edit_karma",
wikimode="site.wikimode",
spam_links="site.spam_links",
spam_selfposts="site.spam_selfposts",
spam_comments="site.spam_comments",
)
def kind(self, wrapped):
return "subreddit_settings"
def thing_attr(self, thing, attr):
if attr.startswith("site.") and thing.site:
return getattr(thing.site, attr[5:])
if attr == "related_subreddits" and thing.site:
# string used for form input
return "\n".join(thing.site.related_subreddits)
return ThingJsonTemplate.thing_attr(self, thing, attr)
def raw_data(self, thing):
data = ThingJsonTemplate.raw_data(self, thing)
# remove this when feature is enabled and use _data_attrs instead
if feature.is_enabled("mobile_settings"):
data["key_color"] = self.thing_attr(thing, "key_color")
if feature.is_enabled("related_subreddits"):
data["related_subreddits"] = self.thing_attr(thing, "related_subreddits")
return data
class UploadedImageJsonTemplate(JsonTemplate):
def render(self, thing, *a, **kw):
return ObjectTemplate(
{
"errors": list(k for (k, v) in thing.errors if v),
"img_src": thing.img_src,
}
)
class ModActionTemplate(ThingJsonTemplate):
_data_attrs_ = dict(
action="action",
created_utc="date",
description="description",
details="details",
id="_fullname",
mod="moderator",
mod_id36="mod_id36",
sr_id36="sr_id36",
subreddit="subreddit",
target_author="target_author",
target_fullname="target_fullname",
target_permalink="target_permalink",
target_title="target_title",
target_body="target_body",
)
def thing_attr(self, thing, attr):
if attr == "date":
return (
time.mktime(thing.date.astimezone(pytz.UTC).timetuple()) - time.timezone
)
elif attr == "target_author":
if thing.target_author and thing.target_author._deleted:
return "[deleted]"
elif thing.target_author:
return thing.target_author.name
return ""
elif attr == "target_permalink":
try:
return thing.target.make_permalink_slow()
except AttributeError:
return None
elif attr == "moderator":
return thing.moderator.name
elif attr == "subreddit":
return thing.subreddit.name
elif attr == "target_title" and isinstance(thing.target, Link):
return thing.target.title
elif attr == "target_body" and isinstance(thing.target, Comment):
return thing.target.body
elif (
attr == "target_body"
and isinstance(thing.target, Link)
and getattr(thing.target, "selftext", None)
):
return thing.target.selftext
return ThingJsonTemplate.thing_attr(self, thing, attr)
def kind(self, wrapped):
return "modaction"
class PolicyViewJsonTemplate(ThingJsonTemplate):
_data_attrs_ = dict(
body_html="body_html",
display_rev="display_rev",
revs="revs",
toc_html="toc_html",
)
def kind(self, wrapped):
return "Policy"
class KarmaListJsonTemplate(ThingJsonTemplate):
def data(self, karmas):
from r2.lib.template_helpers import display_comment_karma, display_link_karma
karmas = [
{
"sr": sr,
"link_karma": display_link_karma(link_karma),
"comment_karma": display_comment_karma(comment_karma),
}
for sr, (link_karma, comment_karma) in karmas.iteritems()
]
return karmas
def kind(self, wrapped):
return "KarmaList"
def get_usertrophies(user):
trophies = Trophy.by_account(user)
def visible_trophy(trophy):
return trophy._thing2.awardtype != "invisible"
trophies = filter(visible_trophy, trophies)
resp = TrophyListJsonTemplate().render(trophies)
return resp.finalize()
class TrophyJsonTemplate(ThingJsonTemplate):
_data_attrs_ = dict(
award_id="award._id36",
description="description",
name="award.title",
id="_id36",
icon_40="icon_40",
icon_70="icon_70",
url="trophy_url",
)
def thing_attr(self, thing, attr):
if attr == "icon_40":
return "https:" + thing._thing2.imgurl % 40
elif attr == "icon_70":
return "https:" + thing._thing2.imgurl % 70
rel_attr, splitter, attr = attr.partition(".")
if attr:
return ThingJsonTemplate.thing_attr(self, thing._thing2, attr)
else:
return ThingJsonTemplate.thing_attr(self, thing, rel_attr)
def kind(self, thing):
return ThingJsonTemplate.kind(self, thing._thing2)
class TrophyListJsonTemplate(ThingJsonTemplate):
def data(self, trophies):
trophies = [Wrapped(t).render() for t in trophies]
return dict(trophies=trophies)
def kind(self, wrapped):
return "TrophyList"
class RulesJsonTemplate(JsonTemplate):
def render(self, thing, *a, **kw):
rules = {}
rules["site_rules"] = thing.site_rules
rules["rules"] = thing.rules
for rule in rules["rules"]:
if rule.get("description"):
rule["description_html"] = safemarkdown(rule["description"])
if not rule.get("kind"):
rule["kind"] = "all"
return ObjectTemplate(rules)
|
logic | converters | # encoding: utf-8
import json
from typing import Any
import ckan.lib.navl.dictization_functions as df
import ckan.logic.validators as validators
import ckan.model as model
from ckan.common import _, aslist
from ckan.types import (
Context,
DataValidator,
FlattenDataDict,
FlattenErrorDict,
FlattenKey,
)
def convert_to_extras(
key: FlattenKey, data: FlattenDataDict, errors: FlattenErrorDict, context: Context
) -> Any:
"""Convert given field into an extra field."""
# Get the current extras index
current_indexes = [k[1] for k in data.keys() if len(k) > 1 and k[0] == "extras"]
new_index = max(current_indexes) + 1 if current_indexes else 0
data[("extras", new_index, "key")] = key[-1]
data[("extras", new_index, "value")] = data[key]
def convert_from_extras(
key: FlattenKey, data: FlattenDataDict, errors: FlattenErrorDict, context: Context
) -> Any:
"""Restore field using object's extras."""
def remove_from_extras(data: FlattenDataDict, key: FlattenKey):
to_remove = []
for data_key, _data_value in data.items():
if data_key[0] == "extras" and data_key[1] == key:
to_remove.append(data_key)
for item in to_remove:
del data[item]
for data_key, data_value in data.items():
if data_key[0] == "extras" and data_key[-1] == "key" and data_value == key[-1]:
data[key] = data[("extras", data_key[1], "value")]
break
else:
return
remove_from_extras(data, data_key[1])
def extras_unicode_convert(extras: FlattenDataDict, context: Context):
"""Convert every value of the dictionary into string."""
for extra in extras:
extras[extra] = str(extras[extra])
return extras
def free_tags_only(
key: FlattenKey, data: FlattenDataDict, errors: FlattenErrorDict, context: Context
) -> Any:
"""Ensure that none of the tags belong to a vocabulary."""
tag_number = key[1]
if not data.get(("tags", tag_number, "vocabulary_id")):
return
for k in list(data.keys()):
if k[0] == "tags" and k[1] == tag_number:
del data[k]
def convert_to_tags(vocab: Any) -> DataValidator:
"""Convert list of tag names into a list of tag dictionaries"""
def func(
key: FlattenKey,
data: FlattenDataDict,
errors: FlattenErrorDict,
context: Context,
):
new_tags = data.get(key)
if not new_tags:
return
if isinstance(new_tags, str):
new_tags = [new_tags]
# get current number of tags
n = 0
for k in data.keys():
if k[0] == "tags":
n = max(n, k[1] + 1)
v = model.Vocabulary.get(vocab)
if not v:
raise df.Invalid(_('Tag vocabulary "%s" does not exist') % vocab)
context["vocabulary"] = v
for tag in new_tags:
validators.tag_in_vocabulary_validator(tag, context)
for num, tag in enumerate(new_tags):
data[("tags", num + n, "name")] = tag
data[("tags", num + n, "vocabulary_id")] = v.id
return func
def convert_from_tags(vocab: Any) -> DataValidator:
def func(
key: FlattenKey,
data: FlattenDataDict,
errors: FlattenErrorDict,
context: Context,
):
v = model.Vocabulary.get(vocab)
if not v:
raise df.Invalid(_('Tag vocabulary "%s" does not exist') % vocab)
tags = []
for k in data.keys():
if k[0] == "tags":
if data[k].get("vocabulary_id") == v.id:
name = data[k].get("display_name", data[k]["name"])
tags.append(name)
data[key] = tags
return func
def convert_user_name_or_id_to_id(user_name_or_id: Any, context: Context) -> Any:
"""Return the user id for the given user name or id.
The point of this function is to convert user names to ids. If you have
something that may be a user name or a user id you can pass it into this
function and get the user id out either way.
Also validates that a user with the given name or id exists.
:returns: the id of the user with the given user name or id
:rtype: string
:raises: ckan.lib.navl.dictization_functions.Invalid if no user can be
found with the given id or user name
"""
session = context["session"]
result = session.query(model.User).filter_by(id=user_name_or_id).first()
if not result:
result = session.query(model.User).filter_by(name=user_name_or_id).first()
if not result:
raise df.Invalid("%s: %s" % (_("Not found"), _("User")))
return result.id
def convert_package_name_or_id_to_id(package_name_or_id: Any, context: Context) -> Any:
"""Return the package id for the given package name or id.
The point of this function is to convert package names to ids. If you have
something that may be a package name or id you can pass it into this
function and get the id out either way.
Also validates that a package with the given name or id exists.
:returns: the id of the package with the given name or id
:rtype: string
:raises: ckan.lib.navl.dictization_functions.Invalid if there is no
package with the given name or id
"""
session = context["session"]
result = session.query(model.Package).filter_by(id=package_name_or_id).first()
if not result:
result = session.query(model.Package).filter_by(name=package_name_or_id).first()
if not result:
raise df.Invalid("%s: %s" % (_("Not found"), _("Dataset")))
return result.id
def convert_group_name_or_id_to_id(group_name_or_id: Any, context: Context) -> Any:
"""Return the group id for the given group name or id.
The point of this function is to convert group names to ids. If you have
something that may be a group name or id you can pass it into this
function and get the id out either way.
Also validates that a group with the given name or id exists.
:returns: the id of the group with the given name or id
:rtype: string
:raises: ckan.lib.navl.dictization_functions.Invalid if there is no
group with the given name or id
"""
session = context["session"]
result = session.query(model.Group).filter_by(id=group_name_or_id).first()
if not result:
result = session.query(model.Group).filter_by(name=group_name_or_id).first()
if not result:
raise df.Invalid("%s: %s" % (_("Not found"), _("Group")))
return result.id
def convert_to_json_if_string(value: Any, context: Context) -> Any:
"""Parse string value as a JSON object."""
if isinstance(value, str):
try:
return json.loads(value)
except ValueError:
raise df.Invalid(_("Could not parse as valid JSON"))
else:
return value
def as_list(value: Any):
"""Convert whitespace separated string into a list of strings."""
return aslist(value)
def convert_to_list_if_string(value: Any) -> Any:
"""Transform string into one-item list"""
if isinstance(value, str):
return [value]
else:
return value
def json_or_string(value: Any) -> Any:
"""
parse string values as json, return string if that fails
"""
if isinstance(value, str):
try:
return json.loads(value)
except ValueError:
pass
return value
def json_list_or_string(value: Any) -> Any:
"""
parse string values as json or comma-separated lists, return
string as a one-element list if that fails
"""
if isinstance(value, str):
try:
return json.loads(value)
except ValueError:
pass
return value.split(",")
return value
def remove_whitespace(value: Any, context: Context) -> Any:
"""Trim whitespaces from the value."""
if isinstance(value, str):
return value.strip()
return value
|
aliceVision | FeatureExtraction | __version__ = "1.3"
from meshroom.core import desc
class FeatureExtraction(desc.AVCommandLineNode):
commandLine = "aliceVision_featureExtraction {allParams}"
size = desc.DynamicNodeSize("input")
parallelization = desc.Parallelization(blockSize=40)
commandLineRange = "--rangeStart {rangeStart} --rangeSize {rangeBlockSize}"
category = "Sparse Reconstruction"
documentation = """
This node extracts distinctive groups of pixels that are, to some extent, invariant to changing camera viewpoints during image acquisition.
Hence, a feature in the scene should have similar feature descriptions in all images.
This node implements multiple methods:
* **SIFT**
The most standard method. This is the default and recommended value for all use cases.
* **AKAZE**
AKAZE can be interesting solution to extract features in challenging condition. It could be able to match wider angle than SIFT but has drawbacks.
It may extract to many features, the repartition is not always good.
It is known to be good on challenging surfaces such as skin.
* **CCTAG**
CCTag is a marker type with 3 or 4 crowns. You can put markers in the scene during the shooting session to automatically re-orient and re-scale the scene to a known size.
It is robust to motion-blur, depth-of-field, occlusion. Be careful to have enough white margin around your CCTags.
## Online
[https://alicevision.org/#photogrammetry/natural_feature_extraction](https://alicevision.org/#photogrammetry/natural_feature_extraction)
"""
inputs = [
desc.File(
name="input",
label="SfMData",
description="Input SfMData file.",
value="",
uid=[0],
),
desc.File(
name="masksFolder",
label="Masks Folder",
description="Use masks to filter features. Filename should be the same or the image UID.",
value="",
uid=[0],
),
desc.ChoiceParam(
name="maskExtension",
label="Mask Extension",
description="File extension for masks.",
value="png",
values=["png", "exr", "jpg"],
exclusive=True,
uid=[0],
),
desc.BoolParam(
name="maskInvert",
label="Invert Masks",
description="Invert mask values.",
value=False,
uid=[0],
),
desc.ChoiceParam(
name="describerTypes",
label="Describer Types",
description="Describer types used to describe an image.",
value=["dspsift"],
values=[
"sift",
"sift_float",
"sift_upright",
"dspsift",
"akaze",
"akaze_liop",
"akaze_mldb",
"cctag3",
"cctag4",
"sift_ocv",
"akaze_ocv",
"tag16h5",
],
exclusive=False,
uid=[0],
joinChar=",",
),
desc.ChoiceParam(
name="describerPreset",
label="Describer Density",
description="Control the ImageDescriber density (low, medium, normal, high, ultra).\n"
"Warning: Use ULTRA only on small datasets.",
value="normal",
values=["low", "medium", "normal", "high", "ultra", "custom"],
exclusive=True,
uid=[0],
group=lambda node: "allParams"
if node.describerPreset.value != "custom"
else None,
),
desc.IntParam(
name="maxNbFeatures",
label="Max Nb Features",
description="Maximum number of features extracted (0 means default value based on Describer Density).",
value=0,
range=(0, 100000, 1000),
uid=[0],
advanced=True,
enabled=lambda node: (node.describerPreset.value == "custom"),
),
desc.ChoiceParam(
name="describerQuality",
label="Describer Quality",
description="Control the ImageDescriber quality (low, medium, normal, high, ultra).",
value="normal",
values=["low", "medium", "normal", "high", "ultra"],
exclusive=True,
uid=[0],
),
desc.ChoiceParam(
name="contrastFiltering",
label="Contrast Filtering",
description="Contrast filtering method to ignore features with too low contrast that can be considered as noise:\n"
" - Static: Fixed threshold.\n"
" - AdaptiveToMedianVariance: Based on image content analysis.\n"
" - NoFiltering: Disable contrast filtering.\n"
" - GridSortOctaves: Grid Sort but per octaves (and only per scale at the end).\n"
" - GridSort: Grid sort per octaves and at the end (scale * peakValue).\n"
" - GridSortScaleSteps: Grid sort per octaves and at the end (scale and then peakValue).\n"
" - NonExtremaFiltering: Filter non-extrema peakValues.\n",
value="GridSort",
values=[
"Static",
"AdaptiveToMedianVariance",
"NoFiltering",
"GridSortOctaves",
"GridSort",
"GridSortScaleSteps",
"GridSortOctaveSteps",
"NonExtremaFiltering",
],
exclusive=True,
advanced=True,
uid=[0],
),
desc.FloatParam(
name="relativePeakThreshold",
label="Relative Peak Threshold",
description="Peak threshold relative to median of gradients.",
value=0.01,
range=(0.01, 1.0, 0.001),
advanced=True,
uid=[0],
enabled=lambda node: (
node.contrastFiltering.value == "AdaptiveToMedianVariance"
),
),
desc.BoolParam(
name="gridFiltering",
label="Grid Filtering",
description="Enable grid filtering. Highly recommended to ensure usable number of features.",
value=True,
advanced=True,
uid=[0],
),
desc.ChoiceParam(
name="workingColorSpace",
label="Working Color Space",
description="Allows you to choose the color space in which the data are processed.",
value="sRGB",
values=["sRGB", "Linear", "ACES2065-1", "ACEScg", "no_conversion"],
exclusive=True,
uid=[0],
),
desc.BoolParam(
name="forceCpuExtraction",
label="Force CPU Extraction",
description="Use only CPU feature extraction.",
value=True,
uid=[],
advanced=True,
),
desc.IntParam(
name="maxThreads",
label="Max Nb Threads",
description="Maximum number of threads to run simultaneously (0 for automatic mode).",
value=0,
range=(0, 24, 1),
uid=[],
advanced=True,
),
desc.ChoiceParam(
name="verboseLevel",
label="Verbose Level",
description="Verbosity level (fatal, error, warning, info, debug, trace).",
value="info",
values=["fatal", "error", "warning", "info", "debug", "trace"],
exclusive=True,
uid=[],
),
]
outputs = [
desc.File(
name="output",
label="Features Folder",
description="Output path for the features and descriptors files (*.feat, *.desc).",
value=desc.Node.internalFolder,
uid=[],
),
]
|
vocoder | cvsd | #!/usr/bin/env python
#
# Copyright 2007,2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from gnuradio import blocks, filter, gr
from . import vocoder_python
class cvsd_encode_fb(gr.hier_block2):
"""
This is a wrapper for the CVSD encoder that performs interpolation and filtering
necessary to work with the vocoding. It converts an incoming float (+-1) to a short, scales
it (to 32000; slightly below the maximum value), interpolates it, and then vocodes it.
The incoming sampling rate can be anything, though, of course, the higher the sampling rate and the
higher the interpolation rate are, the better the sound quality.
"""
def __init__(self, resample=8, bw=0.5):
"""
When using the CVSD vocoder, appropriate sampling rates are from 8k to 64k with resampling rates
from 1 to 8. A rate of 8k with a resampling rate of 8 provides a good quality signal.
"""
gr.hier_block2.__init__(
self,
"cvsd_encode",
# Input signature
gr.io_signature(1, 1, gr.sizeof_float),
gr.io_signature(1, 1, gr.sizeof_char),
) # Output signature
scale_factor = 32000.0
self.interp = resample
src_scale = blocks.multiply_const_ff(scale_factor)
taps = filter.firdes.low_pass(self.interp, self.interp, bw, 2 * bw)
interp = filter.interp_fir_filter_fff(self.interp, taps)
f2s = blocks.float_to_short()
enc = vocoder_python.cvsd_encode_sb()
self.connect(self, src_scale, interp, f2s, enc, self)
class cvsd_decode_bf(gr.hier_block2):
"""
This is a wrapper for the CVSD decoder that performs decimation and filtering
necessary to work with the vocoding. It converts an incoming CVSD-encoded short to a float, decodes it
to a float, decimates it, and scales it (by 32000; slightly below the maximum value to avoid clipping).
The sampling rate can be anything, though, of course, the higher the sampling rate and the
higher the interpolation rate are, the better the sound quality.
"""
def __init__(self, resample=8, bw=0.5):
"""
When using the CVSD vocoder, appropriate sampling rates are from 8k to 64k with resampling rates
from 1 to 8. A rate of 8k with a resampling rate of 8 provides a good quality signal.
"""
gr.hier_block2.__init__(
self,
"cvsd_decode",
# Input signature
gr.io_signature(1, 1, gr.sizeof_char),
gr.io_signature(1, 1, gr.sizeof_float),
) # Output signature
scale_factor = 32000.0
self.decim = resample
dec = vocoder_python.cvsd_decode_bs()
s2f = blocks.short_to_float()
taps = filter.firdes.low_pass(1, 1, bw, 2 * bw)
decim = filter.fir_filter_fff(self.decim, taps)
sink_scale = blocks.multiply_const_ff(1.0 / scale_factor)
self.connect(self, dec, s2f, decim, sink_scale, self)
|
lib | munge | # encoding: utf-8
# Note these functions are similar to, but separate from name/title mungers
# found in the ckanext importer. That one needs to be stable to prevent
# packages changing name on reimport, but these ones can be changed and
# improved.
import os.path
import re
from typing import Union
from ckan import model
from ckan.lib.io import decode_path
# Maximum length of a filename's extension (including the '.')
MAX_FILENAME_EXTENSION_LENGTH = 21
# Maximum total length of a filename (including extension)
MAX_FILENAME_TOTAL_LENGTH = 100
# Minimum total length of a filename (including extension)
MIN_FILENAME_TOTAL_LENGTH = 3
def munge_name(name: str) -> str:
"""Munges the package name field in case it is not to spec."""
# substitute non-ascii characters
name = substitute_ascii_equivalents(name)
# separators become dashes
name = re.sub("[ .:/]", "-", name)
# take out not-allowed characters
name = re.sub("[^a-zA-Z0-9-_]", "", name).lower()
# keep it within the length spec
name = _munge_to_length(
name, model.PACKAGE_NAME_MIN_LENGTH, model.PACKAGE_NAME_MAX_LENGTH
)
return name
def munge_title_to_name(name: str) -> str:
"""Munge a package title into a package name."""
name = substitute_ascii_equivalents(name)
# convert spaces and separators
name = re.sub("[ .:/]", "-", name)
# take out not-allowed characters
name = re.sub("[^a-zA-Z0-9-_]", "", name).lower()
# remove doubles
name = re.sub("-+", "-", name)
# remove leading or trailing hyphens
name = name.strip("-")
# if longer than max_length, keep last word if a year
max_length = model.PACKAGE_NAME_MAX_LENGTH - 5
# (make length less than max, in case we need a few for '_' chars
# to de-clash names.)
if len(name) > max_length:
year_match = re.match(r".*?[_-]((?:\d{2,4}[-/])?\d{2,4})$", name)
if year_match:
year = year_match.groups()[0]
name = "%s-%s" % (name[: (max_length - len(year) - 1)], year)
else:
name = name[:max_length]
name = _munge_to_length(
name, model.PACKAGE_NAME_MIN_LENGTH, model.PACKAGE_NAME_MAX_LENGTH
)
return name
def substitute_ascii_equivalents(text_unicode: str) -> str:
# Method taken from: http://code.activestate.com/recipes/251871/
"""
This takes a UNICODE string and replaces Latin-1 characters with something
equivalent in 7-bit ASCII. It returns a plain ASCII string. This function
makes a best effort to convert Latin-1 characters into ASCII equivalents.
It does not just strip out the Latin-1 characters. All characters in the
standard 7-bit ASCII range are preserved. In the 8th bit range all the
Latin-1 accented letters are converted to unaccented equivalents. Most
symbol characters are converted to something meaningful. Anything not
converted is deleted.
"""
char_mapping = {
0xC0: "A",
0xC1: "A",
0xC2: "A",
0xC3: "A",
0xC4: "A",
0xC5: "A",
0xC6: "Ae",
0xC7: "C",
0xC8: "E",
0xC9: "E",
0xCA: "E",
0xCB: "E",
0xCC: "I",
0xCD: "I",
0xCE: "I",
0xCF: "I",
0xD0: "Th",
0xD1: "N",
0xD2: "O",
0xD3: "O",
0xD4: "O",
0xD5: "O",
0xD6: "O",
0xD8: "O",
0xD9: "U",
0xDA: "U",
0xDB: "U",
0xDC: "U",
0xDD: "Y",
0xDE: "th",
0xDF: "ss",
0xE0: "a",
0xE1: "a",
0xE2: "a",
0xE3: "a",
0xE4: "a",
0xE5: "a",
0xE6: "ae",
0xE7: "c",
0xE8: "e",
0xE9: "e",
0xEA: "e",
0xEB: "e",
0xEC: "i",
0xED: "i",
0xEE: "i",
0xEF: "i",
0xF0: "th",
0xF1: "n",
0xF2: "o",
0xF3: "o",
0xF4: "o",
0xF5: "o",
0xF6: "o",
0xF8: "o",
0xF9: "u",
0xFA: "u",
0xFB: "u",
0xFC: "u",
0xFD: "y",
0xFE: "th",
0xFF: "y",
# 0xa1: '!', 0xa2: '{cent}', 0xa3: '{pound}', 0xa4: '{currency}',
# 0xa5: '{yen}', 0xa6: '|', 0xa7: '{section}', 0xa8: '{umlaut}',
# 0xa9: '{C}', 0xaa: '{^a}', 0xab: '<<', 0xac: '{not}',
# 0xad: '-', 0xae: '{R}', 0xaf: '_', 0xb0: '{degrees}',
# 0xb1: '{+/-}', 0xb2: '{^2}', 0xb3: '{^3}', 0xb4:"'",
# 0xb5: '{micro}', 0xb6: '{paragraph}', 0xb7: '*', 0xb8: '{cedilla}',
# 0xb9: '{^1}', 0xba: '{^o}', 0xbb: '>>',
# 0xbc: '{1/4}', 0xbd: '{1/2}', 0xbe: '{3/4}', 0xbf: '?',
# 0xd7: '*', 0xf7: '/'
}
r = ""
for char in text_unicode:
if ord(char) in char_mapping:
r += char_mapping[ord(char)]
elif ord(char) >= 0x80:
pass
else:
r += str(char)
return r
def munge_tag(tag: str) -> str:
tag = substitute_ascii_equivalents(tag)
tag = tag.lower().strip()
tag = re.sub(r"[^a-zA-Z0-9\- ]", "", tag).replace(" ", "-")
tag = _munge_to_length(tag, model.MIN_TAG_LENGTH, model.MAX_TAG_LENGTH)
return tag
def munge_filename_legacy(filename: str) -> str:
"""Tidies a filename. NB: deprecated
Unfortunately it mangles any path or filename extension, so is deprecated.
It needs to remain unchanged for use by group_dictize() and
Upload.update_data_dict() because if this routine changes then group images
uploaded previous to the change may not be viewable.
"""
filename = substitute_ascii_equivalents(filename)
filename = filename.strip()
filename = re.sub(r"[^a-zA-Z0-9.\- ]", "", filename).replace(" ", "-")
filename = _munge_to_length(filename, 3, 100)
return filename
def munge_filename(filename: Union[str, bytes]) -> str:
"""Tidies a filename
Keeps the filename extension (e.g. .csv).
Strips off any path on the front.
Returns a Unicode string.
"""
if not isinstance(filename, str):
filename = decode_path(filename)
# Ignore path
filename = os.path.split(filename)[1]
# Clean up
filename = filename.lower().strip()
filename = substitute_ascii_equivalents(filename)
filename = re.sub("[^a-zA-Z0-9_. -]", "", filename).replace(" ", "-")
filename = re.sub("-+", "-", filename)
# Enforce length constraints
name, ext = os.path.splitext(filename)
ext = ext[:MAX_FILENAME_EXTENSION_LENGTH]
ext_len = len(ext)
name = _munge_to_length(
name,
max(1, MIN_FILENAME_TOTAL_LENGTH - ext_len),
MAX_FILENAME_TOTAL_LENGTH - ext_len,
)
filename = name + ext
return filename
def _munge_to_length(string: str, min_length: int, max_length: int) -> str:
"""Pad/truncates a string"""
if len(string) < min_length:
string += "_" * (min_length - len(string))
if len(string) > max_length:
string = string[:max_length]
return string
|
context | gifContext | import tempfile
import Quartz
from .imageContext import ImageContext, getSaveImageOptions
from .tools.gifTools import generateGif
class GIFContext(ImageContext):
fileExtensions = ["gif"]
saveImageOptions = getSaveImageOptions(
[
"imageGIFDitherTransparency",
"imageGIFRGBColorTable",
"imageColorSyncProfileData",
]
) + [
("imageGIFLoop", "Boolean that indicates whether the animated gif should loop")
]
_delay = 10
def __init__(self):
super(GIFContext, self).__init__()
self._delayData = []
def _frameDuration(self, seconds):
# gifsicle -h: Set frame delay to TIME (in 1/100sec).
self._delayData[-1] = int(seconds * 100)
def _newPage(self, width, height):
super(GIFContext, self)._newPage(width, height)
self._delayData.append(self._delay)
def _writeDataToFile(self, data, path, options):
pdfDocument = Quartz.PDFDocument.alloc().initWithData_(data)
pageCount = pdfDocument.pageCount()
shouldBeAnimated = pageCount > 1
tempPath = path
if shouldBeAnimated:
options["multipage"] = True
tempPath = tempfile.mkstemp(suffix=".gif")[1]
self._inputPaths = []
super()._writeDataToFile(data, tempPath, options)
if shouldBeAnimated:
generateGif(
self._inputPaths,
path,
self._delayData,
options.get("imageGIFLoop", True),
)
del self._inputPaths
def _storeImageData(self, imageData, imagePath):
super()._storeImageData(imageData, imagePath)
self._inputPaths.append(imagePath)
|
data | viewer_slice | # -*- coding: utf-8 -*-
# --------------------------------------------------------------------------
# Software: InVesalius - Software de Reconstrucao 3D de Imagens Medicas
# Copyright: (C) 2001 Centro de Pesquisas Renato Archer
# Homepage: http://www.softwarepublico.gov.br
# Contact: invesalius@cti.gov.br
# License: GNU - GPL 2 (LICENSE.txt/LICENCA.txt)
# --------------------------------------------------------------------------
# Este programa e software livre; voce pode redistribui-lo e/ou
# modifica-lo sob os termos da Licenca Publica Geral GNU, conforme
# publicada pela Free Software Foundation; de acordo com a versao 2
# da Licenca.
#
# Este programa eh distribuido na expectativa de ser util, mas SEM
# QUALQUER GARANTIA; sem mesmo a garantia implicita de
# COMERCIALIZACAO ou de ADEQUACAO A QUALQUER PROPOSITO EM
# PARTICULAR. Consulte a Licenca Publica Geral GNU para obter mais
# detalhes.
# --------------------------------------------------------------------------
import collections
import itertools
import os
import sys
import tempfile
import invesalius.constants as const
import invesalius.data.converters as converters
import invesalius.data.cursor_actors as ca
import invesalius.data.measures as measures
import invesalius.data.slice_ as sl
import invesalius.data.slice_data as sd
import invesalius.data.styles as styles
import invesalius.data.vtk_utils as vtku
import invesalius.project as project
import invesalius.session as ses
import invesalius.utils as utils
import numpy as np
import wx
from invesalius.gui.widgets.canvas_renderer import CanvasRendererCTX
from invesalius.gui.widgets.inv_spinctrl import InvFloatSpinCtrl, InvSpinCtrl
from invesalius.pubsub import pub as Publisher
from vtkmodules.vtkFiltersGeneral import vtkCursor3D
from vtkmodules.vtkFiltersHybrid import vtkRenderLargeImage
from vtkmodules.vtkInteractionStyle import vtkInteractorStyleImage
from vtkmodules.vtkIOExport import vtkPOVExporter
from vtkmodules.vtkIOImage import (
vtkBMPWriter,
vtkJPEGWriter,
vtkPNGWriter,
vtkPostScriptWriter,
vtkTIFFWriter,
)
from vtkmodules.vtkRenderingCore import (
vtkActor,
vtkCoordinate,
vtkImageActor,
vtkPolyDataMapper,
vtkProperty,
vtkRenderer,
vtkWindowToImageFilter,
vtkWorldPointPicker,
)
from vtkmodules.wx.wxVTKRenderWindowInteractor import wxVTKRenderWindowInteractor
if sys.platform == "win32":
try:
import win32api
_has_win32api = True
except ImportError:
_has_win32api = False
else:
_has_win32api = False
ID_TO_TOOL_ITEM = {}
STR_WL = "WL: %d WW: %d"
ORIENTATIONS = {
"AXIAL": const.AXIAL,
"CORONAL": const.CORONAL,
"SAGITAL": const.SAGITAL,
}
class ContourMIPConfig(wx.Panel):
def __init__(self, prnt, orientation):
wx.Panel.__init__(self, prnt)
self.mip_size_spin = InvSpinCtrl(
self, -1, value=const.PROJECTION_MIP_SIZE, min_value=1, max_value=240
)
self.mip_size_spin.SetToolTip(
wx.ToolTip(_("Number of slices used to compound the visualization."))
)
self.mip_size_spin.CalcSizeFromTextSize("MMM")
self.border_spin = InvFloatSpinCtrl(
self,
-1,
min_value=0,
max_value=10,
increment=0.1,
value=const.PROJECTION_BORDER_SIZE,
digits=1,
)
self.border_spin.SetToolTip(
wx.ToolTip(
_(
"Controls the sharpness of the"
" contour. The greater the"
" value, the sharper the"
" contour."
)
)
)
self.border_spin.CalcSizeFromTextSize()
# w, h = self.border_spin.GetTextExtent('M')
# self.border_spin.SetMinSize((5 * w + 10, -1))
# self.border_spin.SetMaxSize((5 * w + 10, -1))
self.inverted = wx.CheckBox(self, -1, _("Inverted order"))
self.inverted.SetToolTip(
wx.ToolTip(
_(
"If checked, the slices are"
" traversed in descending"
" order to compound the"
" visualization instead of"
" ascending order."
)
)
)
txt_mip_size = wx.StaticText(
self, -1, _("Number of slices"), style=wx.ALIGN_CENTER_HORIZONTAL
)
self.txt_mip_border = wx.StaticText(self, -1, _("Sharpness"))
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(txt_mip_size, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 2)
sizer.Add(self.mip_size_spin, 0)
try:
sizer.Add(10, 0)
except TypeError:
sizer.Add((10, 0))
sizer.Add(self.txt_mip_border, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 2)
sizer.Add(self.border_spin, 0, wx.EXPAND)
try:
sizer.Add(10, 0)
except TypeError:
sizer.Add((10, 0))
sizer.Add(self.inverted, 0, wx.EXPAND)
self.SetSizer(sizer)
sizer.Fit(self)
self.Layout()
self.Update()
self.SetAutoLayout(1)
self.orientation = orientation
self.canvas = None
self.mip_size_spin.Bind(wx.EVT_SPINCTRL, self.OnSetMIPSize)
self.border_spin.Bind(wx.EVT_SPINCTRL, self.OnSetMIPBorder)
self.inverted.Bind(wx.EVT_CHECKBOX, self.OnCheckInverted)
Publisher.subscribe(self._set_projection_type, "Set projection type")
def OnSetMIPSize(self, number_slices):
val = self.mip_size_spin.GetValue()
Publisher.sendMessage("Set MIP size %s" % self.orientation, number_slices=val)
def OnSetMIPBorder(self, evt):
val = self.border_spin.GetValue()
Publisher.sendMessage("Set MIP border %s" % self.orientation, border_size=val)
def OnCheckInverted(self, evt):
val = self.inverted.GetValue()
Publisher.sendMessage("Set MIP Invert %s" % self.orientation, invert=val)
def _set_projection_type(self, projection_id):
if projection_id in (const.PROJECTION_MIDA, const.PROJECTION_CONTOUR_MIDA):
self.inverted.Enable()
else:
self.inverted.Disable()
if projection_id in (
const.PROJECTION_CONTOUR_MIP,
const.PROJECTION_CONTOUR_MIDA,
):
self.border_spin.Enable()
self.txt_mip_border.Enable()
else:
self.border_spin.Disable()
self.txt_mip_border.Disable()
class Viewer(wx.Panel):
def __init__(self, prnt, orientation="AXIAL"):
wx.Panel.__init__(self, prnt, size=wx.Size(320, 300))
# colour = [255*c for c in const.ORIENTATION_COLOUR[orientation]]
# self.SetBackgroundColour(colour)
# Interactor additional style
self._number_slices = const.PROJECTION_MIP_SIZE
self._mip_inverted = False
self.style = None
self.last_position_mouse_move = ()
self.state = const.STATE_DEFAULT
self.overwrite_mask = False
# All renderers and image actors in this viewer
self.slice_data_list = []
self.slice_data = None
self.slice_actor = None
self.interpolation_slice_status = True
self.canvas = None
self.draw_by_slice_number = collections.defaultdict(list)
# The layout from slice_data, the first is number of cols, the second
# is the number of rows
self.layout = (1, 1)
self.orientation_texts = []
self.measures = measures.MeasureData()
self.actors_by_slice_number = collections.defaultdict(list)
self.renderers_by_slice_number = {}
self.orientation = orientation
self.slice_number = 0
self.scroll_enabled = True
self.__init_gui()
self._brush_cursor_op = const.DEFAULT_BRUSH_OP
self._brush_cursor_size = const.BRUSH_SIZE
self._brush_cursor_colour = const.BRUSH_COLOUR
self._brush_cursor_type = const.DEFAULT_BRUSH_OP
self.cursor = None
self.wl_text = None
self.on_wl = False
self.on_text = False
# VTK pipeline and actors
self.__config_interactor()
self.cross_actor = vtkActor()
self.__bind_events()
self.__bind_events_wx()
self._flush_buffer = False
def __init_gui(self):
self.interactor = wxVTKRenderWindowInteractor(self, -1, size=self.GetSize())
self.interactor.SetRenderWhenDisabled(True)
scroll = wx.ScrollBar(self, -1, style=wx.SB_VERTICAL)
self.scroll = scroll
self.mip_ctrls = ContourMIPConfig(self, self.orientation)
self.mip_ctrls.Hide()
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(self.interactor, 1, wx.EXPAND)
sizer.Add(scroll, 0, wx.EXPAND | wx.GROW)
background_sizer = wx.BoxSizer(wx.VERTICAL)
background_sizer.Add(sizer, 1, wx.EXPAND)
# background_sizer.Add(self.mip_ctrls, 0, wx.EXPAND|wx.GROW|wx.ALL, 2)
self.SetSizer(background_sizer)
background_sizer.Fit(self)
self.Layout()
self.Update()
self.SetAutoLayout(1)
self.pick = vtkWorldPointPicker()
self.interactor.SetPicker(self.pick)
def OnContextMenu(self, evt):
if self.last_position_mouse_move == self.interactor.GetLastEventPosition():
self.menu.caller = self
self.PopupMenu(self.menu)
evt.Skip()
def SetPopupMenu(self, menu):
self.menu = menu
def SetLayout(self, layout):
self.layout = layout
if (layout == (1, 1)) and self.on_text:
self.ShowTextActors()
else:
self.HideTextActors(change_status=False)
slice_ = sl.Slice()
self.LoadRenderers(slice_.GetOutput())
self.__configure_renderers()
self.__configure_scroll()
def HideTextActors(self, change_status=True):
try:
self.canvas.draw_list.remove(self.wl_text)
except (ValueError, AttributeError):
pass
[self.canvas.draw_list.remove(t) for t in self.orientation_texts]
self.UpdateCanvas()
if change_status:
self.on_text = False
def ShowTextActors(self):
if self.on_wl and self.wl_text:
self.canvas.draw_list.append(self.wl_text)
[self.canvas.draw_list.append(t) for t in self.orientation_texts]
self.UpdateCanvas()
self.on_text = True
def __set_layout(self, layout):
self.SetLayout(layout)
def __config_interactor(self):
style = vtkInteractorStyleImage()
interactor = self.interactor
interactor.SetInteractorStyle(style)
def SetInteractorStyle(self, state):
cleanup = getattr(self.style, "CleanUp", None)
if cleanup:
self.style.CleanUp()
del self.style
style = styles.Styles.get_style(state)(self)
setup = getattr(style, "SetUp", None)
if setup:
style.SetUp()
self.style = style
self.interactor.SetInteractorStyle(style)
self.interactor.Render()
self.state = state
def UpdateWindowLevelValue(self, window, level):
self.acum_achange_window, self.acum_achange_level = (window, level)
self.SetWLText(window, level)
slc = sl.Slice()
slc._update_wwwl_widget_nodes(window, level)
Publisher.sendMessage("Update all slice")
Publisher.sendMessage("Update clut imagedata widget")
def UpdateWindowLevelText(self, window, level):
self.acum_achange_window, self.acum_achange_level = window, level
self.SetWLText(window, level)
self.interactor.Render()
def OnClutChange(self, evt):
Publisher.sendMessage(
"Change colour table from background image from widget",
nodes=evt.GetNodes(),
)
slc = sl.Slice()
Publisher.sendMessage(
"Update window level value", window=slc.window_width, level=slc.window_level
)
def SetWLText(self, window_width, window_level):
value = STR_WL % (window_level, window_width)
if self.wl_text:
self.wl_text.SetValue(value)
self.canvas.modified = True
# self.interactor.Render()
def EnableText(self):
if not (self.wl_text):
proj = project.Project()
colour = const.ORIENTATION_COLOUR[self.orientation]
# Window & Level text
self.wl_text = vtku.TextZero()
self.wl_text.SetPosition(const.TEXT_POS_LEFT_UP)
self.wl_text.SetSymbolicSize(wx.FONTSIZE_LARGE)
self.SetWLText(proj.level, proj.window)
# Orientation text
if self.orientation == "AXIAL":
values = [_("R"), _("L"), _("A"), _("P")]
elif self.orientation == "SAGITAL":
values = [_("P"), _("A"), _("T"), _("B")]
else:
values = [_("R"), _("L"), _("T"), _("B")]
left_text = self.left_text = vtku.TextZero()
left_text.ShadowOff()
left_text.SetColour(colour)
left_text.SetPosition(const.TEXT_POS_VCENTRE_LEFT)
left_text.SetVerticalJustificationToCentered()
left_text.SetValue(values[0])
left_text.SetSymbolicSize(wx.FONTSIZE_LARGE)
right_text = self.right_text = vtku.TextZero()
right_text.ShadowOff()
right_text.SetColour(colour)
right_text.SetPosition(const.TEXT_POS_VCENTRE_RIGHT_ZERO)
right_text.SetVerticalJustificationToCentered()
right_text.SetJustificationToRight()
right_text.SetValue(values[1])
right_text.SetSymbolicSize(wx.FONTSIZE_LARGE)
up_text = self.up_text = vtku.TextZero()
up_text.ShadowOff()
up_text.SetColour(colour)
up_text.SetPosition(const.TEXT_POS_HCENTRE_UP)
up_text.SetJustificationToCentered()
up_text.SetValue(values[2])
up_text.SetSymbolicSize(wx.FONTSIZE_LARGE)
down_text = self.down_text = vtku.TextZero()
down_text.ShadowOff()
down_text.SetColour(colour)
down_text.SetPosition(const.TEXT_POS_HCENTRE_DOWN_ZERO)
down_text.SetJustificationToCentered()
down_text.SetVerticalJustificationToBottom()
down_text.SetValue(values[3])
down_text.SetSymbolicSize(wx.FONTSIZE_LARGE)
self.orientation_texts = [left_text, right_text, up_text, down_text]
def RenderTextDirection(self, directions):
# Values are on ccw order, starting from the top:
self.up_text.SetValue(directions[0])
self.left_text.SetValue(directions[1])
self.down_text.SetValue(directions[2])
self.right_text.SetValue(directions[3])
self.interactor.Render()
def ResetTextDirection(self, cam):
# Values are on ccw order, starting from the top:
if self.orientation == "AXIAL":
values = [_("A"), _("R"), _("P"), _("L")]
elif self.orientation == "CORONAL":
values = [_("T"), _("R"), _("B"), _("L")]
else: # 'SAGITAL':
values = [_("T"), _("P"), _("B"), _("A")]
self.RenderTextDirection(values)
self.interactor.Render()
def UpdateTextDirection(self, cam):
croll = cam.GetRoll()
if self.orientation == "AXIAL":
if croll >= -2 and croll <= 1:
self.RenderTextDirection([_("A"), _("R"), _("P"), _("L")])
elif croll > 1 and croll <= 44:
self.RenderTextDirection([_("AL"), _("RA"), _("PR"), _("LP")])
elif croll > 44 and croll <= 88:
self.RenderTextDirection([_("LA"), _("AR"), _("RP"), _("PL")])
elif croll > 89 and croll <= 91:
self.RenderTextDirection([_("L"), _("A"), _("R"), _("P")])
elif croll > 91 and croll <= 135:
self.RenderTextDirection([_("LP"), _("AL"), _("RA"), _("PR")])
elif croll > 135 and croll <= 177:
self.RenderTextDirection([_("PL"), _("LA"), _("AR"), _("RP")])
elif (croll >= -180 and croll <= -178) or (croll < 180 and croll > 177):
self.RenderTextDirection([_("P"), _("L"), _("A"), _("R")])
elif croll >= -177 and croll <= -133:
self.RenderTextDirection([_("PR"), _("LP"), _("AL"), _("RA")])
elif croll >= -132 and croll <= -101:
self.RenderTextDirection([_("RP"), _("PL"), _("LA"), _("AR")])
elif croll >= -101 and croll <= -87:
self.RenderTextDirection([_("R"), _("P"), _("L"), _("A")])
elif croll >= -86 and croll <= -42:
self.RenderTextDirection([_("RA"), _("PR"), _("LP"), _("AL")])
elif croll >= -41 and croll <= -2:
self.RenderTextDirection([_("AR"), _("RP"), _("PL"), _("LA")])
elif self.orientation == "CORONAL":
if croll >= -2 and croll <= 1:
self.RenderTextDirection([_("T"), _("R"), _("B"), _("L")])
elif croll > 1 and croll <= 44:
self.RenderTextDirection([_("TL"), _("RT"), _("BR"), _("LB")])
elif croll > 44 and croll <= 88:
self.RenderTextDirection([_("LT"), _("TR"), _("RB"), _("BL")])
elif croll > 89 and croll <= 91:
self.RenderTextDirection([_("L"), _("T"), _("R"), _("B")])
elif croll > 91 and croll <= 135:
self.RenderTextDirection([_("LB"), _("TL"), _("RT"), _("BR")])
elif croll > 135 and croll <= 177:
self.RenderTextDirection([_("BL"), _("LT"), _("TR"), _("RB")])
elif (croll >= -180 and croll <= -178) or (croll < 180 and croll > 177):
self.RenderTextDirection([_("B"), _("L"), _("T"), _("R")])
elif croll >= -177 and croll <= -133:
self.RenderTextDirection([_("BR"), _("LB"), _("TL"), _("RT")])
elif croll >= -132 and croll <= -101:
self.RenderTextDirection([_("RB"), _("BL"), _("LT"), _("TR")])
elif croll >= -101 and croll <= -87:
self.RenderTextDirection([_("R"), _("B"), _("L"), _("T")])
elif croll >= -86 and croll <= -42:
self.RenderTextDirection([_("RT"), _("BR"), _("LB"), _("TL")])
elif croll >= -41 and croll <= -2:
self.RenderTextDirection([_("TR"), _("RB"), _("BL"), _("LT")])
elif self.orientation == "SAGITAL":
if croll >= -101 and croll <= -87:
self.RenderTextDirection([_("T"), _("P"), _("B"), _("A")])
elif croll >= -86 and croll <= -42:
self.RenderTextDirection([_("TA"), _("PT"), _("BP"), _("AB")])
elif croll >= -41 and croll <= -2:
self.RenderTextDirection([_("AT"), _("TP"), _("PB"), _("BA")])
elif croll >= -2 and croll <= 1:
self.RenderTextDirection([_("A"), _("T"), _("P"), _("B")])
elif croll > 1 and croll <= 44:
self.RenderTextDirection([_("AB"), _("TA"), _("PT"), _("BP")])
elif croll > 44 and croll <= 88:
self.RenderTextDirection([_("BA"), _("AT"), _("TP"), _("PB")])
elif croll > 89 and croll <= 91:
self.RenderTextDirection([_("B"), _("A"), _("T"), _("P")])
elif croll > 91 and croll <= 135:
self.RenderTextDirection([_("BP"), _("AB"), _("TA"), _("PT")])
elif croll > 135 and croll <= 177:
self.RenderTextDirection([_("PB"), _("BA"), _("AT"), _("TP")])
elif (croll >= -180 and croll <= -178) or (croll < 180 and croll > 177):
self.RenderTextDirection([_("P"), _("B"), _("A"), _("T")])
elif croll >= -177 and croll <= -133:
self.RenderTextDirection([_("PT"), _("BP"), _("AB"), _("TA")])
elif croll >= -132 and croll <= -101:
self.RenderTextDirection([_("TP"), _("PB"), _("BA"), _("AT")])
def Reposition(self, slice_data):
"""
Based on code of method Zoom in the
vtkInteractorStyleRubberBandZoom, the of
vtk 5.4.3
"""
ren = slice_data.renderer
size = ren.GetSize()
ren.ResetCamera()
ren.GetActiveCamera().Zoom(1.0)
self.interactor.Render()
def ChangeBrushColour(self, colour):
vtk_colour = colour
self._brush_cursor_colour = vtk_colour
if self.cursor:
for slice_data in self.slice_data_list:
slice_data.cursor.SetColour(vtk_colour)
def SetBrushColour(self, colour):
colour_vtk = [colour / float(255) for colour in colour]
self._brush_cursor_colour = colour_vtk
if self.slice_data.cursor:
self.slice_data.cursor.SetColour(colour_vtk)
def UpdateSlicesPosition(self, position):
# Get point from base change
px, py = self.get_slice_pixel_coord_by_world_pos(*position)
coord = self.calcultate_scroll_position(px, py)
# Debugging coordinates. For a 1.0 spacing axis the coord and position is the same,
# but for a spacing dimension =! 1, the coord and position are different
# print("\nPosition: {}".format(position))
# print("Scroll position: {}".format(coord))
# print("Slice actor bounds: {}".format(self.slice_data.actor.GetBounds()))
# print("Scroll from int of position: {}\n".format([round(s) for s in position]))
# this call did not affect the working code
# self.cross.SetFocalPoint(coord)
# update the image slices in all three orientations
self.ScrollSlice(coord)
def SetCrossFocalPoint(self, position):
"""
Sets the cross focal point for all slice panels (axial, coronal, sagittal). This function is also called via
pubsub messaging and may receive a list of 6 coordinates. Thus, limiting the number of list elements in the
SetFocalPoint call is required.
:param position: list of 6 coordinates in vtk world coordinate system wx, wy, wz
"""
self.cross.SetFocalPoint(position[:3])
def ScrollSlice(self, coord):
if self.orientation == "AXIAL":
wx.CallAfter(
Publisher.sendMessage,
("Set scroll position", "SAGITAL"),
index=coord[0],
)
wx.CallAfter(
Publisher.sendMessage,
("Set scroll position", "CORONAL"),
index=coord[1],
)
elif self.orientation == "SAGITAL":
wx.CallAfter(
Publisher.sendMessage, ("Set scroll position", "AXIAL"), index=coord[2]
)
wx.CallAfter(
Publisher.sendMessage,
("Set scroll position", "CORONAL"),
index=coord[1],
)
elif self.orientation == "CORONAL":
wx.CallAfter(
Publisher.sendMessage, ("Set scroll position", "AXIAL"), index=coord[2]
)
wx.CallAfter(
Publisher.sendMessage,
("Set scroll position", "SAGITAL"),
index=coord[0],
)
def get_slice_data(self, render):
# for slice_data in self.slice_data_list:
# if slice_data.renderer is render:
# return slice_data
# WARN: Return the only slice_data used in this slice_viewer.
return self.slice_data
def calcultate_scroll_position(self, x, y):
# Based in the given coord (x, y), returns a list with the scroll positions for each
# orientation, being the first position the sagital, second the coronal
# and the last, axial.
if self.orientation == "AXIAL":
axial = self.slice_data.number
coronal = y
sagital = x
elif self.orientation == "CORONAL":
axial = y
coronal = self.slice_data.number
sagital = x
elif self.orientation == "SAGITAL":
axial = y
coronal = x
sagital = self.slice_data.number
return sagital, coronal, axial
def calculate_matrix_position(self, coord):
x, y, z = coord
xi, xf, yi, yf, zi, zf = self.slice_data.actor.GetBounds()
if self.orientation == "AXIAL":
mx = round((x - xi) / self.slice_.spacing[0], 0)
my = round((y - yi) / self.slice_.spacing[1], 0)
elif self.orientation == "CORONAL":
mx = round((x - xi) / self.slice_.spacing[0], 0)
my = round((z - zi) / self.slice_.spacing[2], 0)
elif self.orientation == "SAGITAL":
mx = round((y - yi) / self.slice_.spacing[1], 0)
my = round((z - zi) / self.slice_.spacing[2], 0)
return int(mx), int(my)
def get_vtk_mouse_position(self):
"""
Get Mouse position inside a wxVTKRenderWindowInteractorself. Return a
tuple with X and Y position.
Please use this instead of using iren.GetEventPosition because it's
not returning the correct values on Mac with HighDPI display, maybe
the same is happing with Windows and Linux, we need to test.
"""
mposx, mposy = wx.GetMousePosition()
cposx, cposy = self.interactor.ScreenToClient((mposx, mposy))
mx, my = cposx, self.interactor.GetSize()[1] - cposy
if sys.platform == "darwin":
# It's needed to mutiple by scale factor in HighDPI because of
# https://docs.wxpython.org/wx.glcanvas.GLCanvas.html
# For now we are doing this only on Mac but it may be needed on
# Windows and Linux too.
scale = self.interactor.GetContentScaleFactor()
mx *= scale
my *= scale
return int(mx), int(my)
def get_coordinate_cursor(self, mx, my, picker=None):
"""
Given the mx, my screen position returns the x, y, z position in world
coordinates.
Parameters
mx (int): x position.
my (int): y position
picker: the picker used to get calculate the voxel coordinate.
Returns:
world coordinate (x, y, z)
"""
if picker is None:
picker = self.pick
slice_data = self.slice_data
renderer = slice_data.renderer
picker.Pick(mx, my, 0, renderer)
x, y, z = picker.GetPickPosition()
bounds = self.slice_data.actor.GetBounds()
if bounds[0] == bounds[1]:
x = bounds[0]
elif bounds[2] == bounds[3]:
y = bounds[2]
elif bounds[4] == bounds[5]:
z = bounds[4]
return x, y, z
def get_coordinate_cursor_edition(self, slice_data=None, picker=None):
# Find position
if slice_data is None:
slice_data = self.slice_data
actor = slice_data.actor
slice_number = slice_data.number
if picker is None:
picker = self.pick
x, y, z = picker.GetPickPosition()
# First we fix the position origin, based on vtkActor bounds
bounds = actor.GetBounds()
bound_xi, bound_xf, bound_yi, bound_yf, bound_zi, bound_zf = bounds
x = float(x - bound_xi)
y = float(y - bound_yi)
z = float(z - bound_zi)
dx = bound_xf - bound_xi
dy = bound_yf - bound_yi
dz = bound_zf - bound_zi
dimensions = self.slice_.matrix.shape
try:
x = (x * dimensions[2]) / dx
except ZeroDivisionError:
x = slice_number
try:
y = (y * dimensions[1]) / dy
except ZeroDivisionError:
y = slice_number
try:
z = (z * dimensions[0]) / dz
except ZeroDivisionError:
z = slice_number
return x, y, z
def get_voxel_coord_by_screen_pos(self, mx, my, picker=None):
"""
Given the (mx, my) screen position returns the voxel coordinate
of the volume at (that mx, my) position.
Parameters:
mx (int): x position.
my (int): y position
picker: the picker used to get calculate the voxel coordinate.
Returns:
voxel_coordinate (x, y, z): voxel coordinate inside the matrix. Can
be used to access the voxel value inside the matrix.
"""
if picker is None:
picker = self.pick
wx, wy, wz = self.get_coordinate_cursor(mx, my, picker)
x, y, z = self.get_voxel_coord_by_world_pos(wx, wy, wz)
return (x, y, z)
def get_voxel_coord_by_world_pos(self, wx, wy, wz):
"""
Given the (x, my) screen position returns the voxel coordinate
of the volume at (that mx, my) position.
Parameters:
wx (float): x position.
wy (float): y position
wz (float): z position
Returns:
voxel_coordinate (x, y, z): voxel coordinate inside the matrix. Can
be used to access the voxel value inside the matrix.
"""
px, py = self.get_slice_pixel_coord_by_world_pos(wx, wy, wz)
x, y, z = self.calcultate_scroll_position(px, py)
return (int(x), int(y), int(z))
def get_slice_pixel_coord_by_screen_pos(self, mx, my, picker=None):
"""
Given the (mx, my) screen position returns the pixel coordinate
of the slice at (that mx, my) position.
Parameters:
mx (int): x position.
my (int): y position
picker: the picker used to get calculate the pixel coordinate.
Returns:
voxel_coordinate (x, y): voxel coordinate inside the matrix. Can
be used to access the voxel value inside the matrix.
"""
if picker is None:
picker = self.pick
wx, wy, wz = self.get_coordinate_cursor(mx, my, picker)
x, y = self.get_slice_pixel_coord_by_world_pos(wx, wy, wz)
return int(x), int(y)
def get_slice_pixel_coord_by_world_pos(self, wx, wy, wz):
"""
Given the (wx, wy, wz) world position returns the pixel coordinate
of the slice at (that mx, my) position.
Parameters:
mx (int): x position.
my (int): y position
picker: the picker used to get calculate the pixel coordinate.
Returns:
voxel_coordinate (x, y): voxel coordinate inside the matrix. Can
be used to access the voxel value inside the matrix.
"""
coord = wx, wy, wz
px, py = self.calculate_matrix_position(coord)
return px, py
def get_coord_inside_volume(self, mx, my, picker=None):
if picker is None:
picker = self.pick
slice_data = self.slice_data
renderer = slice_data.renderer
coord = self.get_coordinate_cursor(picker)
position = slice_data.actor.GetInput().FindPoint(coord)
if position != -1:
coord = slice_data.actor.GetInput().GetPoint(position)
return coord
def __bind_events(self):
Publisher.subscribe(self.LoadImagedata, "Load slice to viewer")
Publisher.subscribe(self.SetBrushColour, "Change mask colour")
Publisher.subscribe(self.UpdateRender, "Update slice viewer")
Publisher.subscribe(
self.UpdateRender, "Update slice viewer %s" % self.orientation
)
Publisher.subscribe(self.UpdateCanvas, "Redraw canvas")
Publisher.subscribe(self.UpdateCanvas, "Redraw canvas %s" % self.orientation)
Publisher.subscribe(
self.ChangeSliceNumber, ("Set scroll position", self.orientation)
)
# Publisher.subscribe(self.__update_cross_position,
# 'Update cross position')
# Publisher.subscribe(self.__update_cross_position,
# 'Update cross position %s' % self.orientation)
Publisher.subscribe(self.SetCrossFocalPoint, "Set cross focal point")
Publisher.subscribe(self.UpdateSlicesPosition, "Update slices position")
###
# Publisher.subscribe(self.ChangeBrushColour,
# 'Add mask')
Publisher.subscribe(self.UpdateWindowLevelValue, "Update window level value")
Publisher.subscribe(self.UpdateWindowLevelText, "Update window level text")
Publisher.subscribe(self.__set_layout, "Set slice viewer layout")
Publisher.subscribe(self.OnSetInteractorStyle, "Set slice interaction style")
Publisher.subscribe(self.OnCloseProject, "Close project data")
#####
Publisher.subscribe(self.OnShowText, "Show text actors on viewers")
Publisher.subscribe(self.OnHideText, "Hide text actors on viewers")
Publisher.subscribe(self.OnExportPicture, "Export picture to file")
Publisher.subscribe(self.SetDefaultCursor, "Set interactor default cursor")
Publisher.subscribe(self.SetSizeNSCursor, "Set interactor resize NS cursor")
Publisher.subscribe(self.SetSizeWECursor, "Set interactor resize WE cursor")
Publisher.subscribe(self.SetSizeNWSECursor, "Set interactor resize NSWE cursor")
Publisher.subscribe(
self.AddActors, "Add actors " + str(ORIENTATIONS[self.orientation])
)
Publisher.subscribe(
self.RemoveActors, "Remove actors " + str(ORIENTATIONS[self.orientation])
)
Publisher.subscribe(self.OnSwapVolumeAxes, "Swap volume axes")
Publisher.subscribe(self.ReloadActualSlice, "Reload actual slice")
Publisher.subscribe(
self.ReloadActualSlice, "Reload actual slice %s" % self.orientation
)
Publisher.subscribe(self.OnUpdateScroll, "Update scroll")
# MIP
Publisher.subscribe(self.OnSetMIPSize, "Set MIP size %s" % self.orientation)
Publisher.subscribe(self.OnSetMIPBorder, "Set MIP border %s" % self.orientation)
Publisher.subscribe(self.OnSetMIPInvert, "Set MIP Invert %s" % self.orientation)
Publisher.subscribe(self.OnShowMIPInterface, "Show MIP interface")
Publisher.subscribe(self.OnSetOverwriteMask, "Set overwrite mask")
Publisher.subscribe(self.RefreshViewer, "Refresh viewer")
Publisher.subscribe(self.SetInterpolatedSlices, "Set interpolated slices")
Publisher.subscribe(self.UpdateInterpolatedSlice, "Update Slice Interpolation")
Publisher.subscribe(self.GetCrossPos, "Set Update cross pos")
Publisher.subscribe(self.UpdateCross, "Update cross pos")
def RefreshViewer(self):
self.Refresh()
def SetDefaultCursor(self):
self.interactor.SetCursor(wx.Cursor(wx.CURSOR_DEFAULT))
def SetSizeNSCursor(self):
self.interactor.SetCursor(wx.Cursor(wx.CURSOR_SIZENS))
def SetSizeWECursor(self):
self.interactor.SetCursor(wx.Cursor(wx.CURSOR_SIZEWE))
def SetSizeNWSECursor(self):
if sys.platform.startswith("linux"):
self.interactor.SetCursor(wx.Cursor(wx.CURSOR_SIZENWSE))
else:
self.interactor.SetCursor(wx.Cursor(wx.CURSOR_SIZING))
def SetFocus(self):
Publisher.sendMessage(
"Set viewer orientation focus", orientation=self.orientation
)
super().SetFocus()
def OnExportPicture(self, orientation, filename, filetype):
dict = {
"AXIAL": const.AXIAL,
"CORONAL": const.CORONAL,
"SAGITAL": const.SAGITAL,
}
if orientation == dict[self.orientation]:
Publisher.sendMessage("Begin busy cursor")
if _has_win32api:
utils.touch(filename)
win_filename = win32api.GetShortPathName(filename)
self._export_picture(orientation, win_filename, filetype)
else:
self._export_picture(orientation, filename, filetype)
Publisher.sendMessage("End busy cursor")
def _export_picture(self, id, filename, filetype):
view_prop_list = []
dict = {
"AXIAL": const.AXIAL,
"CORONAL": const.CORONAL,
"SAGITAL": const.SAGITAL,
}
if id == dict[self.orientation]:
if filetype == const.FILETYPE_POV:
renwin = self.interactor.GetRenderWindow()
image = vtkWindowToImageFilter()
image.SetInput(renwin)
writer = vtkPOVExporter()
writer.SetFilePrefix(filename.split(".")[0])
writer.SetRenderWindow(renwin)
writer.Write()
else:
ren = self.slice_data.renderer
# Use tiling to generate a large rendering.
image = vtkRenderLargeImage()
image.SetInput(ren)
image.SetMagnification(1)
image.Update()
image = image.GetOutput()
# write image file
if filetype == const.FILETYPE_BMP:
writer = vtkBMPWriter()
elif filetype == const.FILETYPE_JPG:
writer = vtkJPEGWriter()
elif filetype == const.FILETYPE_PNG:
writer = vtkPNGWriter()
elif filetype == const.FILETYPE_PS:
writer = vtkPostScriptWriter()
elif filetype == const.FILETYPE_TIF:
writer = vtkTIFFWriter()
filename = "%s.tif" % filename.strip(".tif")
writer.SetInputData(image)
writer.SetFileName(filename.encode(const.FS_ENCODE))
writer.Write()
if not os.path.exists(filename):
wx.MessageBox(
_("InVesalius was not able to export this picture"),
_("Export picture error"),
)
for actor in view_prop_list:
self.slice_data.renderer.AddViewProp(actor)
Publisher.sendMessage("End busy cursor")
def OnShowText(self):
self.ShowTextActors()
def OnHideText(self):
self.HideTextActors()
def OnCloseProject(self):
self.CloseProject()
def CloseProject(self):
for slice_data in self.slice_data_list:
del slice_data
self.slice_data_list = []
self.layout = (1, 1)
del self.slice_data
self.slice_data = None
if self.canvas:
self.canvas.draw_list = []
self.canvas.remove_from_renderer()
self.canvas = None
self.orientation_texts = []
self.slice_number = 0
self.cursor = None
self.wl_text = None
self.pick = vtkWorldPointPicker()
def OnSetInteractorStyle(self, style):
self.SetInteractorStyle(style)
if style not in [const.SLICE_STATE_EDITOR, const.SLICE_STATE_WATERSHED]:
Publisher.sendMessage("Set interactor default cursor")
def __bind_events_wx(self):
self.scroll.Bind(wx.EVT_SCROLL, self.OnScrollBar)
self.scroll.Bind(wx.EVT_SCROLL_THUMBTRACK, self.OnScrollBarRelease)
# self.scroll.Bind(wx.EVT_SCROLL_ENDSCROLL, self.OnScrollBarRelease)
self.interactor.Bind(wx.EVT_KEY_DOWN, self.OnKeyDown)
self.interactor.Bind(wx.EVT_RIGHT_UP, self.OnContextMenu)
def LoadImagedata(self, mask_dict):
self.SetInput(mask_dict)
def LoadRenderers(self, imagedata):
number_renderers = self.layout[0] * self.layout[1]
diff = number_renderers - len(self.slice_data_list)
if diff > 0:
for i in range(diff):
slice_data = self.create_slice_window(imagedata)
self.slice_data_list.append(slice_data)
elif diff < 0:
to_remove = self.slice_data_list[number_renderers::]
for slice_data in to_remove:
self.interactor.GetRenderWindow().RemoveRenderer(slice_data.renderer)
self.slice_data_list = self.slice_data_list[:number_renderers]
def __configure_renderers(self):
proportion_x = 1.0 / self.layout[0]
proportion_y = 1.0 / self.layout[1]
# The (0,0) in VTK is in bottom left. So the creation from renderers
# must be # in inverted order, from the top left to bottom right
w, h = self.interactor.GetRenderWindow().GetSize()
w *= proportion_x
h *= proportion_y
n = 0
for j in range(self.layout[1] - 1, -1, -1):
for i in range(self.layout[0]):
slice_xi = i * proportion_x
slice_xf = (i + 1) * proportion_x
slice_yi = j * proportion_y
slice_yf = (j + 1) * proportion_y
position = (slice_xi, slice_yi, slice_xf, slice_yf)
slice_data = self.slice_data_list[n]
slice_data.renderer.SetViewport(position)
# Text actor position
x, y = const.TEXT_POS_LEFT_DOWN
slice_data.text.SetPosition((x + slice_xi, y + slice_yi))
slice_data.SetCursor(self.__create_cursor())
# slice_data.SetSize((w, h))
self.__update_camera(slice_data)
style = 0
if j == 0:
style = style | sd.BORDER_DOWN
if j == self.layout[1] - 1:
style = style | sd.BORDER_UP
if i == 0:
style = style | sd.BORDER_LEFT
if i == self.layout[0] - 1:
style = style | sd.BORDER_RIGHT
# slice_data.SetBorderStyle(style)
n += 1
def __create_cursor(self):
cursor = ca.CursorCircle()
cursor.SetOrientation(self.orientation)
# self.__update_cursor_position([i for i in actor_bound[1::2]])
cursor.SetColour(self._brush_cursor_colour)
cursor.SetSpacing(self.slice_.spacing)
cursor.Show(0)
self.cursor_ = cursor
return cursor
def SetInput(self, mask_dict):
self.slice_ = sl.Slice()
max_slice_number = sl.Slice().GetNumberOfSlices(self.orientation)
self.scroll.SetScrollbar(wx.SB_VERTICAL, 1, max_slice_number, max_slice_number)
self.slice_data = self.create_slice_window()
self.slice_data.SetCursor(self.__create_cursor())
self.cam = self.slice_data.renderer.GetActiveCamera()
self.__build_cross_lines()
self.canvas = CanvasRendererCTX(
self,
self.slice_data.renderer,
self.slice_data.canvas_renderer,
self.orientation,
)
self.canvas.draw_list.append(self.slice_data)
# Set the slice number to the last slice to ensure the camera if far
# enough to show all slices.
self.set_slice_number(max_slice_number - 1)
self.__update_camera()
self.slice_data.renderer.ResetCamera()
self.interactor.GetRenderWindow().AddRenderer(self.slice_data.renderer)
self.interactor.Render()
self.EnableText()
self.wl_text.Hide()
## Insert cursor
self.SetInteractorStyle(const.STATE_DEFAULT)
def __build_cross_lines(self):
renderer = self.slice_data.overlay_renderer
cross = vtkCursor3D()
cross.AllOff()
cross.AxesOn()
self.cross = cross
c = vtkCoordinate()
c.SetCoordinateSystemToWorld()
cross_mapper = vtkPolyDataMapper()
cross_mapper.SetInputConnection(cross.GetOutputPort())
# cross_mapper.SetTransformCoordinate(c)
p = vtkProperty()
p.SetColor(1, 0, 0)
cross_actor = vtkActor()
cross_actor.SetMapper(cross_mapper)
cross_actor.SetProperty(p)
cross_actor.VisibilityOff()
# Only the slices are pickable
cross_actor.PickableOff()
self.cross_actor = cross_actor
renderer.AddActor(cross_actor)
# def __update_cross_position(self, arg, position):
# # self.cross.SetFocalPoint(position[:3])
# self.UpdateSlicesPosition(None, position)
def set_cross_visibility(self, visibility):
self.cross_actor.SetVisibility(visibility)
def _set_editor_cursor_visibility(self, visibility):
for slice_data in self.slice_data_list:
slice_data.cursor.actor.SetVisibility(visibility)
def SetOrientation(self, orientation):
self.orientation = orientation
for slice_data in self.slice_data_list:
self.__update_camera(slice_data)
def create_slice_window(self):
renderer = vtkRenderer()
renderer.SetLayer(0)
cam = renderer.GetActiveCamera()
canvas_renderer = vtkRenderer()
canvas_renderer.SetLayer(1)
canvas_renderer.SetActiveCamera(cam)
canvas_renderer.SetInteractive(0)
canvas_renderer.PreserveDepthBufferOn()
overlay_renderer = vtkRenderer()
overlay_renderer.SetLayer(2)
overlay_renderer.SetActiveCamera(cam)
overlay_renderer.SetInteractive(0)
self.interactor.GetRenderWindow().SetNumberOfLayers(3)
self.interactor.GetRenderWindow().AddRenderer(overlay_renderer)
self.interactor.GetRenderWindow().AddRenderer(canvas_renderer)
self.interactor.GetRenderWindow().AddRenderer(renderer)
actor = vtkImageActor()
self.slice_actor = actor
# TODO: Create a option to let the user set if he wants to interpolate
# the slice images.
session = ses.Session()
if session.GetConfig("slice_interpolation"):
actor.InterpolateOff()
else:
actor.InterpolateOn()
slice_data = sd.SliceData()
slice_data.SetOrientation(self.orientation)
slice_data.renderer = renderer
slice_data.canvas_renderer = canvas_renderer
slice_data.overlay_renderer = overlay_renderer
slice_data.actor = actor
# slice_data.SetBorderStyle(sd.BORDER_ALL)
renderer.AddActor(actor)
# renderer.AddActor(slice_data.text.actor)
# renderer.AddViewProp(slice_data.box_actor)
return slice_data
def UpdateInterpolatedSlice(self):
if self.slice_actor != None:
session = ses.Session()
if session.GetConfig("slice_interpolation"):
self.slice_actor.InterpolateOff()
else:
self.slice_actor.InterpolateOn()
self.interactor.Render()
def SetInterpolatedSlices(self, flag):
self.interpolation_slice_status = flag
if self.slice_actor != None:
if self.interpolation_slice_status == True:
self.slice_actor.InterpolateOn()
else:
self.slice_actor.InterpolateOff()
self.interactor.Render()
def __update_camera(self):
orientation = self.orientation
proj = project.Project()
orig_orien = proj.original_orientation
self.cam.SetFocalPoint(0, 0, 0)
self.cam.SetViewUp(const.SLICE_POSITION[orig_orien][0][self.orientation])
self.cam.SetPosition(const.SLICE_POSITION[orig_orien][1][self.orientation])
# self.cam.ComputeViewPlaneNormal()
# self.cam.OrthogonalizeViewUp()
self.cam.ParallelProjectionOn()
def __update_display_extent(self, image):
self.slice_data.actor.SetDisplayExtent(image.GetExtent())
self.slice_data.renderer.ResetCameraClippingRange()
def UpdateRender(self):
self.interactor.Render()
def UpdateCanvas(self, evt=None):
if self.canvas is not None:
self._update_draw_list()
self.canvas.modified = True
self.interactor.Render()
def _update_draw_list(self):
cp_draw_list = self.canvas.draw_list[:]
self.canvas.draw_list = []
# Removing all measures
for i in cp_draw_list:
if not isinstance(
i,
(
measures.AngularMeasure,
measures.LinearMeasure,
measures.CircleDensityMeasure,
measures.PolygonDensityMeasure,
),
):
self.canvas.draw_list.append(i)
# Then add all needed measures
for m, mr in self.measures.get(self.orientation, self.slice_data.number):
if m.visible:
self.canvas.draw_list.append(mr)
n = self.slice_data.number
self.canvas.draw_list.extend(self.draw_by_slice_number[n])
def __configure_scroll(self):
actor = self.slice_data_list[0].actor
number_of_slices = self.layout[0] * self.layout[1]
max_slice_number = actor.GetSliceNumberMax() / number_of_slices
if actor.GetSliceNumberMax() % number_of_slices:
max_slice_number += 1
self.scroll.SetScrollbar(wx.SB_VERTICAL, 1, max_slice_number, max_slice_number)
self.set_scroll_position(0)
@property
def number_slices(self):
return self._number_slices
@number_slices.setter
def number_slices(self, val):
if val != self._number_slices:
self._number_slices = val
buffer_ = self.slice_.buffer_slices[self.orientation]
buffer_.discard_buffer()
def set_scroll_position(self, position):
self.scroll.SetThumbPosition(position)
self.OnScrollBar()
def UpdateSlice3D(self, pos):
original_orientation = project.Project().original_orientation
pos = self.scroll.GetThumbPosition()
Publisher.sendMessage(
"Change slice from slice plane", orientation=self.orientation, index=pos
)
def OnScrollBar(self, evt=None, update3D=True):
pos = self.scroll.GetThumbPosition()
self.set_slice_number(pos)
if update3D:
self.UpdateSlice3D(pos)
# This Render needs to come before the self.style.OnScrollBar, otherwise the GetFocalPoint will sometimes
# provide the non-updated coordinate and the cross focal point will lag one pixel behind the actual
# scroll position
self.interactor.Render()
try:
self.style.OnScrollBar()
except AttributeError:
pass
if evt:
if self._flush_buffer:
self.slice_.apply_slice_buffer_to_mask(self.orientation)
evt.Skip()
def OnScrollBarRelease(self, evt):
pos = self.scroll.GetThumbPosition()
evt.Skip()
def OnKeyDown(self, evt=None, obj=None):
pos = self.scroll.GetThumbPosition()
skip = True
min = 0
max = self.slice_.GetMaxSliceNumber(self.orientation)
projections = {
wx.WXK_NUMPAD0: const.PROJECTION_NORMAL,
wx.WXK_NUMPAD1: const.PROJECTION_MaxIP,
wx.WXK_NUMPAD2: const.PROJECTION_MinIP,
wx.WXK_NUMPAD3: const.PROJECTION_MeanIP,
wx.WXK_NUMPAD4: const.PROJECTION_MIDA,
wx.WXK_NUMPAD5: const.PROJECTION_CONTOUR_MIP,
wx.WXK_NUMPAD6: const.PROJECTION_CONTOUR_MIDA,
}
if self._flush_buffer:
self.slice_.apply_slice_buffer_to_mask(self.orientation)
if evt.GetKeyCode() == wx.WXK_UP and pos > min:
self.OnScrollForward()
self.OnScrollBar()
skip = False
elif evt.GetKeyCode() == wx.WXK_DOWN and pos < max:
self.OnScrollBackward()
self.OnScrollBar()
skip = False
elif evt.GetKeyCode() == wx.WXK_NUMPAD_ADD:
actual_value = self.mip_ctrls.mip_size_spin.GetValue()
self.mip_ctrls.mip_size_spin.SetValue(actual_value + 1)
if self.mip_ctrls.mip_size_spin.GetValue() != actual_value:
self.number_slices = self.mip_ctrls.mip_size_spin.GetValue()
self.ReloadActualSlice()
skip = False
elif evt.GetKeyCode() == wx.WXK_NUMPAD_SUBTRACT:
actual_value = self.mip_ctrls.mip_size_spin.GetValue()
self.mip_ctrls.mip_size_spin.SetValue(actual_value - 1)
if self.mip_ctrls.mip_size_spin.GetValue() != actual_value:
self.number_slices = self.mip_ctrls.mip_size_spin.GetValue()
self.ReloadActualSlice()
skip = False
elif evt.GetKeyCode() in projections:
self.slice_.SetTypeProjection(projections[evt.GetKeyCode()])
Publisher.sendMessage(
"Set projection type", projection_id=projections[evt.GetKeyCode()]
)
Publisher.sendMessage("Reload actual slice")
skip = False
self.UpdateSlice3D(pos)
self.interactor.Render()
if evt and skip:
evt.Skip()
def OnScrollForward(self, evt=None, obj=None):
if not self.scroll_enabled:
return
pos = self.scroll.GetThumbPosition()
min = 0
if pos > min:
if self._flush_buffer:
self.slice_.apply_slice_buffer_to_mask(self.orientation)
pos = pos - 1
self.scroll.SetThumbPosition(pos)
self.OnScrollBar()
def OnScrollBackward(self, evt=None, obj=None):
if not self.scroll_enabled:
return
pos = self.scroll.GetThumbPosition()
max = self.slice_.GetMaxSliceNumber(self.orientation)
if pos < max:
if self._flush_buffer:
self.slice_.apply_slice_buffer_to_mask(self.orientation)
pos = pos + 1
self.scroll.SetThumbPosition(pos)
self.OnScrollBar()
def OnSetMIPSize(self, number_slices):
self.number_slices = number_slices
self.ReloadActualSlice()
def OnSetMIPBorder(self, border_size):
self.slice_.n_border = border_size
buffer_ = self.slice_.buffer_slices[self.orientation]
buffer_.discard_buffer()
self.ReloadActualSlice()
def OnSetMIPInvert(self, invert):
self._mip_inverted = invert
buffer_ = self.slice_.buffer_slices[self.orientation]
buffer_.discard_buffer()
self.ReloadActualSlice()
def OnShowMIPInterface(self, flag):
if flag:
if not self.mip_ctrls.Shown:
self.mip_ctrls.Show()
self.GetSizer().Add(self.mip_ctrls, 0, wx.EXPAND | wx.GROW | wx.ALL, 2)
self.Layout()
else:
self.mip_ctrls.Hide()
self.GetSizer().Detach(self.mip_ctrls)
self.Layout()
def OnSetOverwriteMask(self, flag):
self.overwrite_mask = flag
def set_slice_number(self, index):
max_slice_number = sl.Slice().GetNumberOfSlices(self.orientation)
index = max(index, 0)
index = min(index, max_slice_number - 1)
inverted = self.mip_ctrls.inverted.GetValue()
border_size = self.mip_ctrls.border_spin.GetValue()
try:
image = self.slice_.GetSlices(
self.orientation, index, self.number_slices, inverted, border_size
)
except IndexError:
return
self.slice_data.actor.SetInputData(image)
for actor in self.actors_by_slice_number[self.slice_data.number]:
self.slice_data.renderer.RemoveActor(actor)
for actor in self.actors_by_slice_number[index]:
self.slice_data.renderer.AddActor(actor)
# for (m, mr) in self.measures.get(self.orientation, self.slice_data.number):
# try:
# self.canvas.draw_list.remove(mr)
# except ValueError:
# pass
# for (m, mr) in self.measures.get(self.orientation, index):
# if m.visible:
# self.canvas.draw_list.append(mr)
if self.slice_._type_projection == const.PROJECTION_NORMAL:
self.slice_data.SetNumber(index)
else:
max_slices = self.slice_.GetMaxSliceNumber(self.orientation)
end = min(max_slices, index + self.number_slices - 1)
self.slice_data.SetNumber(index, end)
self.__update_display_extent(image)
self.cross.SetModelBounds(self.slice_data.actor.GetBounds())
self._update_draw_list()
def ChangeSliceNumber(self, index):
self.scroll.SetThumbPosition(int(index))
pos = self.scroll.GetThumbPosition()
self.set_slice_number(pos)
def ReloadActualSlice(self):
pos = self.scroll.GetThumbPosition()
self.set_slice_number(pos)
self.interactor.Render()
def OnUpdateScroll(self):
max_slice_number = sl.Slice().GetNumberOfSlices(self.orientation)
self.scroll.SetScrollbar(wx.SB_VERTICAL, 1, max_slice_number, max_slice_number)
def OnSwapVolumeAxes(self, axes):
# Adjusting cursor spacing to match the spacing from the actual slice
# orientation
axis0, axis1 = axes
cursor = self.slice_data.cursor
spacing = cursor.spacing
if (axis0, axis1) == (2, 1):
cursor.SetSpacing((spacing[1], spacing[0], spacing[2]))
elif (axis0, axis1) == (2, 0):
cursor.SetSpacing((spacing[2], spacing[1], spacing[0]))
elif (axis0, axis1) == (1, 0):
cursor.SetSpacing((spacing[0], spacing[2], spacing[1]))
self.slice_data.renderer.ResetCamera()
def GetCrossPos(self):
spacing = self.slice_data.actor.GetInput().GetSpacing()
Publisher.sendMessage(
"Cross focal point", coord=self.cross.GetFocalPoint(), spacing=spacing
)
def UpdateCross(self, coord):
self.cross.SetFocalPoint(coord)
Publisher.sendMessage(
"Co-registered points",
arg=None,
position=(coord[0], coord[1], coord[2], 0.0, 0.0, 0.0),
)
self.OnScrollBar()
self.interactor.Render()
def AddActors(self, actors, slice_number):
"Inserting actors"
pos = self.scroll.GetThumbPosition()
# try:
# renderer = self.renderers_by_slice_number[slice_number]
# for actor in actors:
# renderer.AddActor(actor)
# except KeyError:
# pass
if pos == slice_number:
for actor in actors:
self.slice_data.renderer.AddActor(actor)
self.actors_by_slice_number[slice_number].extend(actors)
def RemoveActors(self, actors, slice_number):
"Remove a list of actors"
try:
renderer = self.renderers_by_slice_number[slice_number]
except KeyError:
for actor in actors:
self.actors_by_slice_number[slice_number].remove(actor)
self.slice_data.renderer.RemoveActor(actor)
else:
for actor in actors:
# Remove the actor from the renderer
renderer.RemoveActor(actor)
# and remove the actor from the actor's list
self.actors_by_slice_number[slice_number].remove(actor)
def get_actual_mask(self):
# Returns actual mask. Returns None if there is not a mask or no mask
# visible.
mask = self.slice_.current_mask
return mask
def get_slice(self):
return self.slice_
def discard_slice_cache(self, all_orientations=False, vtk_cache=True):
if all_orientations:
for orientation in self.slice_.buffer_slices:
buffer_ = self.slice_.buffer_slices[orientation]
buffer_.discard_image()
if vtk_cache:
buffer_.discard_vtk_image()
else:
buffer_ = self.slice_.buffer_slices[self.orientation]
buffer_.discard_image()
if vtk_cache:
buffer_.discard_vtk_image()
def discard_mask_cache(self, all_orientations=False, vtk_cache=True):
if all_orientations:
for orientation in self.slice_.buffer_slices:
buffer_ = self.slice_.buffer_slices[orientation]
buffer_.discard_mask()
if vtk_cache:
buffer_.discard_vtk_mask()
else:
buffer_ = self.slice_.buffer_slices[self.orientation]
buffer_.discard_mask()
if vtk_cache:
buffer_.discard_vtk_mask()
|
friture | dock | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009 Timoth?Lecomte
# This file is part of Friture.
#
# Friture is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as published by
# the Free Software Foundation.
#
# Friture is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Friture. If not, see <http://www.gnu.org/licenses/>.
from inspect import signature
from friture.controlbar import ControlBar
from friture.widgetdict import getWidgetById, widgetIds
from PyQt5 import QtWidgets
class Dock(QtWidgets.QWidget):
def __init__(self, parent, name, qml_engine, widgetId=None):
super().__init__(parent)
self.dockmanager = parent.dockmanager
self.audiobuffer = parent.audiobuffer
self.setObjectName(name)
self.control_bar = ControlBar(self)
self.control_bar.combobox_select.activated.connect(self.indexChanged)
self.control_bar.settings_button.clicked.connect(self.settings_slot)
self.control_bar.close_button.clicked.connect(self.closeClicked)
# self.dockwidget = QtWidgets.QWidget(self)
self.layout = QtWidgets.QVBoxLayout(self)
self.layout.addWidget(self.control_bar)
self.layout.setContentsMargins(0, 0, 0, 0)
# self.dockwidget.setLayout(self.layout)
# self.setWidget(self.dockwidget)
self.widgetId = None
self.audiowidget = None
self.qml_engine = qml_engine
if widgetId is None:
widgetId = widgetIds()[0]
self.widget_select(widgetId)
# note that by default the closeEvent is accepted, no need to do it explicitely
def closeEvent(self, event):
self.dockmanager.close_dock(self)
def closeClicked(self, checked):
self.close()
# slot
def indexChanged(self, index):
if index > len(widgetIds()):
index = widgetIds()[0]
self.widget_select(widgetIds()[index])
# slot
def widget_select(self, widgetId):
if self.widgetId == widgetId:
return
if self.audiowidget is not None:
self.audiowidget.close()
self.audiowidget.deleteLater()
if widgetId not in widgetIds():
widgetId = widgetIds()[0]
self.widgetId = widgetId
constructor = getWidgetById(widgetId)["Class"]
if len(signature(constructor).parameters) == 2:
self.audiowidget = constructor(self, self.qml_engine)
else:
self.audiowidget = constructor(self)
self.audiowidget.set_buffer(self.audiobuffer)
self.audiobuffer.new_data_available.connect(self.audiowidget.handle_new_data)
self.layout.addWidget(self.audiowidget)
index = widgetIds().index(widgetId)
self.control_bar.combobox_select.setCurrentIndex(index)
def canvasUpdate(self):
if self.audiowidget is not None:
self.audiowidget.canvasUpdate()
def pause(self):
if self.audiowidget is not None:
self.audiowidget.pause()
def restart(self):
if self.audiowidget is not None:
self.audiowidget.restart()
# slot
def settings_slot(self, checked):
self.audiowidget.settings_called(checked)
# method
def saveState(self, settings):
settings.setValue("type", self.widgetId)
self.audiowidget.saveState(settings)
# method
def restoreState(self, settings):
self.audiowidget.restoreState(settings)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.