section stringlengths 2 30 | filename stringlengths 1 82 | text stringlengths 783 28M |
|---|---|---|
stream | filtered | from threading import Event
from streamlink.buffers import Buffer
from streamlink.stream.stream import StreamIO
class FilteredStream(StreamIO):
"""StreamIO mixin for being able to pause read calls while filtering content"""
buffer: Buffer
def __init__(self, *args, **kwargs):
self._event_filter = Event()
self._event_filter.set()
super().__init__(*args, **kwargs)
def read(self, *args, **kwargs) -> bytes:
read = super().read
while True:
try:
return read(*args, **kwargs)
except OSError:
# wait indefinitely until filtering ends
self._event_filter.wait()
if self.buffer.closed:
return b""
# if data is available, try reading again
if self.buffer.length > 0:
continue
# raise if not filtering and no data available
raise
def close(self) -> None:
super().close()
self._event_filter.set()
def is_paused(self) -> bool:
return not self._event_filter.is_set()
def pause(self) -> None:
self._event_filter.clear()
def resume(self) -> None:
self._event_filter.set()
def filter_wait(self, timeout=None):
return self._event_filter.wait(timeout)
|
draftgeoutils | arcs | # ***************************************************************************
# * Copyright (c) 2009, 2010 Yorik van Havre <yorik@uncreated.net> *
# * Copyright (c) 2009, 2010 Ken Cline <cline@frii.com> *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * FreeCAD is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with FreeCAD; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
"""Provides various functions to work with arcs."""
## @package arcs
# \ingroup draftgeoutils
# \brief Provides various functions to work with arcs.
import math
import DraftVecUtils
import FreeCAD as App
import lazy_loader.lazy_loader as lz
from draftgeoutils.edges import findMidpoint
from draftgeoutils.general import geomType
# Delay import of module until first use because it is heavy
Part = lz.LazyLoader("Part", globals(), "Part")
## \addtogroup draftgeoutils
# @{
def isClockwise(edge, ref=None):
"""Return True if a circle-based edge has a clockwise direction."""
if not geomType(edge) == "Circle":
return True
v1 = edge.Curve.tangent(edge.ParameterRange[0])[0]
if DraftVecUtils.isNull(v1):
return True
# we take an arbitrary other point on the edge that has little chances
# to be aligned with the first one
v2 = edge.Curve.tangent(edge.ParameterRange[0] + 0.01)[0]
n = edge.Curve.Axis
# if that axis points "the wrong way" from the reference, we invert it
if not ref:
ref = App.Vector(0, 0, 1)
if n.getAngle(ref) > math.pi / 2:
n = n.negative()
if DraftVecUtils.angle(v1, v2, n) < 0:
return False
if n.z < 0:
return False
return True
def isWideAngle(edge):
"""Return True if the given edge is an arc with angle > 180 degrees."""
if geomType(edge) != "Circle":
return False
r = edge.Curve.Radius
total = 2 * r * math.pi
if edge.Length > total / 2:
return True
return False
def arcFrom2Pts(firstPt, lastPt, center, axis=None):
"""Build an arc with center and 2 points, can be oriented with axis."""
radius1 = firstPt.sub(center).Length
radius2 = lastPt.sub(center).Length
# (PREC = 4 = same as Part Module), Is it possible?
if round(radius1 - radius2, 4) != 0:
return None
thirdPt = App.Vector(firstPt.sub(center).add(lastPt).sub(center))
thirdPt.normalize()
thirdPt.scale(radius1, radius1, radius1)
thirdPt = thirdPt.add(center)
newArc = Part.Edge(Part.Arc(firstPt, thirdPt, lastPt))
if axis and newArc.Curve.Axis.dot(axis) < 0:
thirdPt = thirdPt.sub(center)
thirdPt.scale(-1, -1, -1)
thirdPt = thirdPt.add(center)
newArc = Part.Edge(Part.Arc(firstPt, thirdPt, lastPt))
return newArc
def arcFromSpline(edge):
"""Turn given edge into a circular arc from three points.
Takes its first point, midpoint and endpoint. It works best with bspline
segments such as those from imported svg files. Use this only
if you are sure your edge is really an arc.
It returns None if there is a problem, including passing straight edges.
"""
if geomType(edge) == "Line":
print("This edge is straight, cannot build an arc on it")
return None
if len(edge.Vertexes) > 1:
# 2-point arc
p1 = edge.Vertexes[0].Point
p2 = edge.Vertexes[-1].Point
ml = edge.Length / 2
p3 = edge.valueAt(ml)
try:
return Part.Arc(p1, p3, p2).toShape()
except Part.OCCError:
print("Couldn't make an arc out of this edge")
return None
else:
# circle
p1 = edge.Vertexes[0].Point
p2 = findMidpoint(edge)
ray = p2.sub(p1)
ray.scale(0.5, 0.5, 0.5)
center = p1.add(ray)
radius = ray.Length
try:
return Part.makeCircle(radius, center)
except Part.OCCError:
print("couldn't make a circle out of this edge")
return None
## @}
|
gui | project_properties | # --------------------------------------------------------------------
# Software: InVesalius - Software de Reconstrucao 3D de Imagens Medicas
# Copyright: (C) 2001 Centro de Pesquisas Renato Archer
# Homepage: http://www.softwarepublico.gov.br
# Contact: invesalius@cti.gov.br
# License: GNU - GPL 2 (LICENSE.txt/LICENCA.txt)
# --------------------------------------------------------------------
# Este programa e software livre; voce pode redistribui-lo e/ou
# modifica-lo sob os termos da Licenca Publica Geral GNU, conforme
# publicada pela Free Software Foundation; de acordo com a versao 2
# da Licenca.
#
# Este programa eh distribuido na expectativa de ser util, mas SEM
# QUALQUER GARANTIA; sem mesmo a garantia implicita de
# COMERCIALIZACAO ou de ADEQUACAO A QUALQUER PROPOSITO EM
# PARTICULAR. Consulte a Licenca Publica Geral GNU para obter mais
# detalhes.
# --------------------------------------------------------------------
import invesalius.project as prj
import wx
from invesalius import constants as const
from invesalius.gui import utils
from invesalius.pubsub import pub as Publisher
ORIENTATION_LABEL = {
const.AXIAL: _("Axial"),
const.CORONAL: _("Coronal"),
const.SAGITAL: _("Sagital"),
}
class ProjectProperties(wx.Dialog):
def __init__(self, parent):
super().__init__(
id=-1,
name="",
parent=parent,
style=wx.DEFAULT_FRAME_STYLE,
title=_("Project Properties"),
)
self.Center(wx.BOTH)
self._init_gui()
def _init_gui(self):
project = prj.Project()
self.name_txt = wx.TextCtrl(self, -1, value=project.name)
self.name_txt.SetMinSize((utils.calc_width_needed(self.name_txt, 30), -1))
modality_txt = wx.TextCtrl(
self, -1, value=project.modality, style=wx.TE_READONLY
)
try:
orientation = ORIENTATION_LABEL[project.original_orientation]
except KeyError:
orientation = _("Other")
orientation_txt = wx.TextCtrl(self, -1, value=orientation, style=wx.TE_READONLY)
sx, sy, sz = project.spacing
spacing_txt_x = wx.TextCtrl(self, -1, value=f"{sx:.5}", style=wx.TE_READONLY)
spacing_txt_y = wx.TextCtrl(self, -1, value=f"{sy:.5}", style=wx.TE_READONLY)
spacing_txt_z = wx.TextCtrl(self, -1, value=f"{sz:.5}", style=wx.TE_READONLY)
name_sizer = wx.BoxSizer(wx.HORIZONTAL)
name_sizer.Add(
wx.StaticText(self, -1, _("Name")), 0, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5
)
name_sizer.Add(self.name_txt, 1, wx.EXPAND | wx.ALL, 5)
modality_sizer = wx.BoxSizer(wx.HORIZONTAL)
modality_sizer.Add(
wx.StaticText(self, -1, _("Modality")),
0,
wx.ALIGN_CENTER_VERTICAL | wx.ALL,
5,
)
modality_sizer.Add(modality_txt, 1, wx.EXPAND | wx.ALL, 5)
orientation_sizer = wx.BoxSizer(wx.HORIZONTAL)
orientation_sizer.Add(
wx.StaticText(self, -1, _("Orientation")),
0,
wx.ALIGN_CENTER_VERTICAL | wx.ALL,
5,
)
orientation_sizer.Add(orientation_txt, 1, wx.EXPAND | wx.ALL, 5)
spacing_sizer = wx.BoxSizer(wx.HORIZONTAL)
spacing_sizer.Add(
wx.StaticText(self, -1, _("Spacing")),
0,
wx.ALIGN_CENTER_VERTICAL | wx.ALL,
5,
)
spacing_sizer.Add(spacing_txt_x, 1, wx.EXPAND | wx.ALL, 5)
spacing_sizer.Add(spacing_txt_y, 1, wx.EXPAND | wx.ALL, 5)
spacing_sizer.Add(spacing_txt_z, 1, wx.EXPAND | wx.ALL, 5)
btn_sizer = wx.StdDialogButtonSizer()
btn_ok = wx.Button(self, wx.ID_OK)
btn_ok.SetDefault()
btn_cancel = wx.Button(self, wx.ID_CANCEL)
btn_sizer.AddButton(btn_ok)
btn_sizer.AddButton(btn_cancel)
btn_sizer.Realize()
main_sizer = wx.BoxSizer(wx.VERTICAL)
main_sizer.Add(name_sizer, 1, wx.EXPAND)
main_sizer.Add(modality_sizer, 1, wx.EXPAND)
main_sizer.Add(orientation_sizer, 1, wx.EXPAND)
main_sizer.Add(spacing_sizer, 1, wx.EXPAND)
main_sizer.Add(btn_sizer, 1, wx.EXPAND | wx.ALL, 5)
self.SetSizer(main_sizer)
main_sizer.Fit(self)
self.Layout()
|
cargo | changeMetas | import math
import eos.db
import gui.mainFrame
import wx
from gui import globalEvents as GE
from gui.fitCommands.calc.cargo.add import CalcAddCargoCommand
from gui.fitCommands.calc.cargo.remove import CalcRemoveCargoCommand
from gui.fitCommands.helpers import CargoInfo, InternalCommandHistory
from service.fit import Fit
class GuiChangeCargoMetasCommand(wx.Command):
def __init__(self, fitID, itemIDs, newItemID):
wx.Command.__init__(self, True, "Change Cargo Metas")
self.internalHistory = InternalCommandHistory()
self.fitID = fitID
self.itemIDs = itemIDs
self.newItemID = newItemID
def Do(self):
sFit = Fit.getInstance()
fit = sFit.getFit(self.fitID)
results = []
for itemID in self.itemIDs:
if itemID == self.newItemID:
continue
cargo = next((c for c in fit.cargo if c.itemID == itemID), None)
if cargo is None:
continue
amount = cargo.amount
cmdRemove = CalcRemoveCargoCommand(
fitID=self.fitID, cargoInfo=CargoInfo(itemID=itemID, amount=math.inf)
)
cmdAdd = CalcAddCargoCommand(
fitID=self.fitID,
cargoInfo=CargoInfo(itemID=self.newItemID, amount=amount),
)
results.append(self.internalHistory.submitBatch(cmdRemove, cmdAdd))
success = any(results)
eos.db.commit()
wx.PostEvent(
gui.mainFrame.MainFrame.getInstance(), GE.FitChanged(fitIDs=(self.fitID,))
)
return success
def Undo(self):
success = self.internalHistory.undoAll()
eos.db.commit()
wx.PostEvent(
gui.mainFrame.MainFrame.getInstance(), GE.FitChanged(fitIDs=(self.fitID,))
)
return success
|
event | query_event_list | from datetime import timedelta
from typing import Dict, List, Optional, Tuple, Union
from zoneinfo import ZoneInfo
from dateutil.parser import isoparse
from django.utils.timezone import now
from posthog.api.utils import get_pk_or_uuid
from posthog.clickhouse.client.connection import Workload
from posthog.hogql.constants import DEFAULT_RETURNED_ROWS
from posthog.hogql.context import HogQLContext
from posthog.models import Action, Filter, Person, Team
from posthog.models.action.util import format_action_filter
from posthog.models.event.sql import (
SELECT_EVENT_BY_TEAM_AND_CONDITIONS_FILTERS_SQL,
SELECT_EVENT_BY_TEAM_AND_CONDITIONS_SQL,
)
from posthog.models.property.util import parse_prop_grouped_clauses
from posthog.queries.insight import insight_query_with_columns
from posthog.utils import relative_date_parse
def determine_event_conditions(
conditions: Dict[str, Union[None, str, List[str]]], tzinfo: ZoneInfo
) -> Tuple[str, Dict]:
result = ""
params: Dict[str, Union[str, List[str]]] = {}
for k, v in conditions.items():
if not isinstance(v, str):
continue
if k == "after":
try:
timestamp = isoparse(v).strftime("%Y-%m-%d %H:%M:%S.%f")
except ValueError:
timestamp = relative_date_parse(v, tzinfo).strftime(
"%Y-%m-%d %H:%M:%S.%f"
)
result += "AND timestamp > %(after)s "
params.update({"after": timestamp})
elif k == "before":
try:
timestamp = isoparse(v).strftime("%Y-%m-%d %H:%M:%S.%f")
except ValueError:
timestamp = relative_date_parse(v, tzinfo).strftime(
"%Y-%m-%d %H:%M:%S.%f"
)
result += "AND timestamp < %(before)s "
params.update({"before": timestamp})
elif k == "person_id":
result += """AND distinct_id IN (%(distinct_ids)s) """
person = get_pk_or_uuid(Person.objects.all(), v).first()
distinct_ids = person.distinct_ids if person is not None else []
params.update({"distinct_ids": list(map(str, distinct_ids))})
elif k == "distinct_id":
result += "AND distinct_id = %(distinct_id)s "
params.update({"distinct_id": v})
elif k == "event":
result += "AND event = %(event)s "
params.update({"event": v})
return result, params
def query_events_list(
filter: Filter,
team: Team,
request_get_query_dict: Dict,
order_by: List[str],
action_id: Optional[str],
unbounded_date_from: bool = False,
limit: int = DEFAULT_RETURNED_ROWS,
offset: int = 0,
) -> List:
# Note: This code is inefficient and problematic, see https://github.com/PostHog/posthog/issues/13485 for details.
# To isolate its impact from rest of the queries its queries are run on different nodes as part of "offline" workloads.
hogql_context = HogQLContext(
within_non_hogql_query=True, team_id=team.pk, enable_select_queries=True
)
limit += 1
limit_sql = "LIMIT %(limit)s"
if offset > 0:
limit_sql += " OFFSET %(offset)s"
workload = Workload.OFFLINE if unbounded_date_from else Workload.ONLINE
conditions, condition_params = determine_event_conditions(
{
"after": None
if unbounded_date_from
else (now() - timedelta(days=1)).isoformat(),
"before": (now() + timedelta(seconds=5)).isoformat(),
**request_get_query_dict,
},
tzinfo=team.timezone_info,
)
prop_filters, prop_filter_params = parse_prop_grouped_clauses(
team_id=team.pk,
property_group=filter.property_groups,
has_person_id_joined=False,
hogql_context=hogql_context,
)
if action_id:
try:
action = Action.objects.get(pk=action_id, team_id=team.pk)
except Action.DoesNotExist:
return []
if action.steps.count() == 0:
return []
action_query, params = format_action_filter(
team_id=team.pk, action=action, hogql_context=hogql_context
)
prop_filters += " AND {}".format(action_query)
prop_filter_params = {**prop_filter_params, **params}
order = "DESC" if len(order_by) == 1 and order_by[0] == "-timestamp" else "ASC"
if prop_filters != "":
return insight_query_with_columns(
SELECT_EVENT_BY_TEAM_AND_CONDITIONS_FILTERS_SQL.format(
conditions=conditions,
limit=limit_sql,
filters=prop_filters,
order=order,
),
{
"team_id": team.pk,
"limit": limit,
"offset": offset,
**condition_params,
**prop_filter_params,
**hogql_context.values,
},
query_type="events_list",
workload=workload,
team_id=team.pk,
)
else:
return insight_query_with_columns(
SELECT_EVENT_BY_TEAM_AND_CONDITIONS_SQL.format(
conditions=conditions, limit=limit_sql, order=order
),
{
"team_id": team.pk,
"limit": limit,
"offset": offset,
**condition_params,
**hogql_context.values,
},
query_type="events_list",
workload=workload,
team_id=team.pk,
)
|
language-names | generate | #!/usr/bin/env python
import codecs
import collections
import re
"""
This script van generate a dictionary of language names.
This dictionary looks as follows:
language_names = {
"C": {
"nl": "Dutch",
"de": "German",
"en": "English",
},
"nl": {
"nl": "Nederlands",
"de": "Duits",
"en": "Engels",
},
}
Etcetera.
It can be created from:
- the 'all_languages' file that is part of KDE (currently the only option).
This generate.py script writes the dictionary to a file named
data.py.
This script needs not to be installed to be able to use the language_names package.
"""
# Here you should name the language names to be extracted.
# If empty, all are used. "C" must be named.
# lang_names = []
lang_names = [
"C",
"en",
"de",
"fr",
"es",
"nl",
"pl",
"pt_BR",
"cs",
"ru",
"hu",
"gl",
"it",
"tr",
"uk",
"ja",
"zh_CN",
"zh_HK",
"zh_TW",
]
def generate_kde(fileName="/usr/share/locale/all_languages"):
"""Uses the KDE file to extract language names.
Returns the dictionary. All strings are in unicode form.
"""
langs = collections.defaultdict(dict)
group = None
with codecs.open(fileName, "r", "utf-8") as langfile:
for line in langfile:
line = line.strip()
m = re.match(r"\[([^]]+)\]", line)
if m:
group = m.group(1)
elif group and group != "x-test":
m = re.match(r"Name(?:\[([^]]+)\])?\s*=(.*)$", line)
if m:
lang, name = m.group(1) or "C", m.group(2)
langs[lang][group] = name
# correct KDE mistake
langs["cs"]["gl"] = "Galicijský"
langs["zh_HK"]["gl"] = "加利西亞語"
langs["zh_HK"]["zh_HK"] = "繁體中文(香港)"
return dict(langs)
def makestring(text):
"""Returns the text wrapped in quotes, usable as Python input (expecting unicode_literals)."""
return '"' + re.sub(r'([\\"])', r"\\\1", text) + '"'
def write_dict(langs):
"""Writes the dictionary file to the 'data.py' file."""
keys = sorted(filter(lambda k: k in langs, lang_names) if lang_names else langs)
with codecs.open("data.py", "w", "utf-8") as output:
output.write("# -*- coding: utf-8;\n\n")
output.write("# Do not edit, this file is generated. See generate.py.\n")
output.write("\n\n")
output.write("language_names = {\n")
for key in keys:
output.write(f"{makestring(key)}: {{\n")
for lang in sorted(langs[key]):
output.write(f" {makestring(lang)}:{makestring(langs[key][lang])},\n")
output.write("},\n")
output.write("}\n\n# End of data.py\n")
if __name__ == "__main__":
langs = generate_kde()
langs["zh"] = langs["zh_CN"]
write_dict(langs)
|
extractor | kommunetv | # coding: utf-8
from __future__ import unicode_literals
from ..utils import update_url
from .common import InfoExtractor
class KommunetvIE(InfoExtractor):
_VALID_URL = r"https://(\w+).kommunetv.no/archive/(?P<id>\w+)"
_TEST = {
"url": "https://oslo.kommunetv.no/archive/921",
"md5": "5f102be308ee759be1e12b63d5da4bbc",
"info_dict": {"id": "921", "title": "Bystyremøte", "ext": "mp4"},
}
def _real_extract(self, url):
video_id = self._match_id(url)
headers = {"Accept": "application/json"}
data = self._download_json(
"https://oslo.kommunetv.no/api/streams?streamType=1&id=%s" % video_id,
video_id,
headers=headers,
)
title = data["stream"]["title"]
file = data["playlist"][0]["playlist"][0]["file"]
url = update_url(file, query=None, fragment=None)
formats = self._extract_m3u8_formats(
url,
video_id,
ext="mp4",
entry_protocol="m3u8_native",
m3u8_id="hls",
fatal=False,
)
self._sort_formats(formats)
return {"id": video_id, "formats": formats, "title": title}
|
bup | index | import errno
import os
import stat
import struct
from contextlib import ExitStack
from bup import metadata, xstat
from bup._helpers import UINT_MAX, bytescmp
from bup.compat import pending_raise
from bup.helpers import (
add_error,
atomically_replaced_file,
log,
merge_iter,
mmap_readwrite,
progress,
qprogress,
resolve_parent,
slashappend,
)
EMPTY_SHA = b"\0" * 20
FAKE_SHA = b"\x01" * 20
INDEX_HDR = b"BUPI\0\0\0\7"
# Time values are handled as integer nanoseconds since the epoch in
# memory, but are written as xstat/metadata timespecs. This behavior
# matches the existing metadata/xstat/.bupm code.
# Record times (mtime, ctime, atime) as xstat/metadata timespecs, and
# store all of the times in the index so they won't interfere with the
# forthcoming metadata cache.
INDEX_SIG = (
"!"
"Q" # dev
"Q" # ino
"Q" # nlink
"qQ" # ctime_s, ctime_ns
"qQ" # mtime_s, mtime_ns
"qQ" # atime_s, atime_ns
"Q" # size
"I" # mode
"I" # gitmode
"20s" # sha
"H" # flags
"Q" # children_ofs
"I" # children_n
"Q"
) # meta_ofs
ENTLEN = struct.calcsize(INDEX_SIG)
FOOTER_SIG = "!Q"
FOOTLEN = struct.calcsize(FOOTER_SIG)
IX_EXISTS = 0x8000 # file exists on filesystem
IX_HASHVALID = 0x4000 # the stored sha1 matches the filesystem
IX_SHAMISSING = 0x2000 # the stored sha1 object doesn't seem to exist
class Error(Exception):
pass
class MetaStoreReader:
def __init__(self, filename):
self._file = None
self._file = open(filename, "rb")
def close(self):
f, self._file = self._file, None
if f:
f.close()
def __del__(self):
assert not self._file
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
with pending_raise(value, rethrow=True):
self.close()
def metadata_at(self, ofs):
self._file.seek(ofs)
return metadata.Metadata.read(self._file)
class MetaStoreWriter:
# For now, we just append to the file, and try to handle any
# truncation or corruption somewhat sensibly.
def __init__(self, filename):
self._closed = False
# Map metadata hashes to bupindex.meta offsets.
self._offsets = {}
self._filename = filename
self._file = None
# FIXME: see how slow this is; does it matter?
m_file = open(filename, "ab+")
try:
m_file.seek(0)
try:
m_off = m_file.tell()
m = metadata.Metadata.read(m_file)
while m:
m_encoded = m.encode()
self._offsets[m_encoded] = m_off
m_off = m_file.tell()
m = metadata.Metadata.read(m_file)
except EOFError:
pass
except:
log("index metadata in %r appears to be corrupt\n" % filename)
raise
finally:
m_file.close()
self._file = open(filename, "ab")
def close(self):
self._closed = True
if self._file:
self._file.close()
self._file = None
def __del__(self):
assert self._closed
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
with pending_raise(value, rethrow=False):
self.close()
def store(self, metadata):
meta_encoded = metadata.encode(include_path=False)
ofs = self._offsets.get(meta_encoded)
if ofs:
return ofs
ofs = self._file.tell()
self._file.write(meta_encoded)
self._offsets[meta_encoded] = ofs
return ofs
class Level:
def __init__(self, ename, parent):
self.parent = parent
self.ename = ename
self.list = []
self.count = 0
def write(self, f):
(ofs, n) = (f.tell(), len(self.list))
if self.list:
count = len(self.list)
# log('popping %r with %d entries\n'
# % (''.join(self.ename), count))
for e in self.list:
e.write(f)
if self.parent:
self.parent.count += count + self.count
return (ofs, n)
def _golevel(level, f, ename, newentry, metastore, tmax):
# close nodes back up the tree
assert level
default_meta_ofs = metastore.store(metadata.Metadata())
while ename[: len(level.ename)] != level.ename:
n = BlankNewEntry(level.ename[-1], default_meta_ofs, tmax)
n.flags |= IX_EXISTS
(n.children_ofs, n.children_n) = level.write(f)
level.parent.list.append(n)
level = level.parent
# create nodes down the tree
while len(level.ename) < len(ename):
level = Level(ename[: len(level.ename) + 1], level)
# are we in precisely the right place?
assert ename == level.ename
n = newentry or BlankNewEntry(
ename and level.ename[-1] or None, default_meta_ofs, tmax
)
(n.children_ofs, n.children_n) = level.write(f)
if level.parent:
level.parent.list.append(n)
level = level.parent
return level
class Entry:
def __init__(self, basename, name, meta_ofs, tmax):
assert basename is None or isinstance(basename, bytes)
assert name is None or isinstance(name, bytes)
self.basename = basename
self.name = name
self.meta_ofs = meta_ofs
self.tmax = tmax
self.children_ofs = 0
self.children_n = 0
def __repr__(self):
return "(%r,0x%04x,%d,%d,%d,%d,%d,%d,%s/%s,0x%04x,%d,0x%08x/%d)" % (
self.name,
self.dev,
self.ino,
self.nlink,
self.ctime,
self.mtime,
self.atime,
self.size,
self.mode,
self.gitmode,
self.flags,
self.meta_ofs,
self.children_ofs,
self.children_n,
)
def packed(self):
try:
ctime = xstat.nsecs_to_timespec(self.ctime)
mtime = xstat.nsecs_to_timespec(self.mtime)
atime = xstat.nsecs_to_timespec(self.atime)
return struct.pack(
INDEX_SIG,
self.dev,
self.ino,
self.nlink,
ctime[0],
ctime[1],
mtime[0],
mtime[1],
atime[0],
atime[1],
self.size,
self.mode,
self.gitmode,
self.sha,
self.flags,
self.children_ofs,
self.children_n,
self.meta_ofs,
)
except (DeprecationWarning, struct.error) as e:
log("pack error: %s (%r)\n" % (e, self))
raise
def stale(self, st, check_device=True):
if self.size != st.st_size:
return True
if self.mtime != st.st_mtime:
return True
if self.sha == EMPTY_SHA:
return True
if not self.gitmode:
return True
if self.ctime != st.st_ctime:
return True
if self.ino != st.st_ino:
return True
if self.nlink != st.st_nlink:
return True
if not (self.flags & IX_EXISTS):
return True
if check_device and (self.dev != st.st_dev):
return True
return False
def update_from_stat(self, st, meta_ofs):
# Should only be called when the entry is stale(), and
# invalidate() should almost certainly be called afterward.
self.dev = st.st_dev
self.ino = st.st_ino
self.nlink = st.st_nlink
self.ctime = st.st_ctime
self.mtime = st.st_mtime
self.atime = st.st_atime
self.size = st.st_size
self.mode = st.st_mode
self.flags |= IX_EXISTS
self.meta_ofs = meta_ofs
self._fixup()
def _fixup(self):
self.mtime = self._fixup_time(self.mtime)
self.ctime = self._fixup_time(self.ctime)
def _fixup_time(self, t):
if self.tmax != None and t > self.tmax:
return self.tmax
else:
return t
def is_valid(self):
f = IX_HASHVALID | IX_EXISTS
return (self.flags & f) == f
def invalidate(self):
self.flags &= ~IX_HASHVALID
def validate(self, gitmode, sha):
assert sha
assert gitmode
assert gitmode + 0 == gitmode
self.gitmode = gitmode
self.sha = sha
self.flags |= IX_HASHVALID | IX_EXISTS
def exists(self):
return not self.is_deleted()
def sha_missing(self):
return (self.flags & IX_SHAMISSING) or not (self.flags & IX_HASHVALID)
def is_deleted(self):
return (self.flags & IX_EXISTS) == 0
def set_deleted(self):
if self.flags & IX_EXISTS:
self.flags &= ~(IX_EXISTS | IX_HASHVALID)
def is_real(self):
return not self.is_fake()
def is_fake(self):
return not self.ctime
def _cmp(self, other):
# Note reversed name ordering
bc = bytescmp(other.name, self.name)
if bc != 0:
return bc
vc = self.is_valid() - other.is_valid()
if vc != 0:
return vc
fc = self.is_fake() - other.is_fake()
if fc != 0:
return fc
return 0
def __eq__(self, other):
return self._cmp(other) == 0
def __ne__(self, other):
return self._cmp(other) != 0
def __lt__(self, other):
return self._cmp(other) < 0
def __gt__(self, other):
return self._cmp(other) > 0
def __le__(self, other):
return self._cmp(other) <= 0
def __ge__(self, other):
return self._cmp(other) >= 0
def write(self, f):
f.write(self.basename + b"\0" + self.packed())
class NewEntry(Entry):
def __init__(
self,
basename,
name,
tmax,
dev,
ino,
nlink,
ctime,
mtime,
atime,
size,
mode,
gitmode,
sha,
flags,
meta_ofs,
children_ofs,
children_n,
):
Entry.__init__(self, basename, name, meta_ofs, tmax)
(
self.dev,
self.ino,
self.nlink,
self.ctime,
self.mtime,
self.atime,
self.size,
self.mode,
self.gitmode,
self.sha,
self.flags,
self.children_ofs,
self.children_n,
) = (
dev,
ino,
nlink,
ctime,
mtime,
atime,
size,
mode,
gitmode,
sha,
flags,
children_ofs,
children_n,
)
self._fixup()
class BlankNewEntry(NewEntry):
def __init__(self, basename, meta_ofs, tmax):
NewEntry.__init__(
self,
basename,
basename,
tmax,
0,
0,
0,
0,
0,
0,
0,
0,
0,
EMPTY_SHA,
0,
meta_ofs,
0,
0,
)
class ExistingEntry(Entry):
def __init__(self, parent, basename, name, m, ofs):
Entry.__init__(self, basename, name, None, None)
self.parent = parent
self._m = m
self._ofs = ofs
(
self.dev,
self.ino,
self.nlink,
self.ctime,
ctime_ns,
self.mtime,
mtime_ns,
self.atime,
atime_ns,
self.size,
self.mode,
self.gitmode,
self.sha,
self.flags,
self.children_ofs,
self.children_n,
self.meta_ofs,
) = struct.unpack(INDEX_SIG, m[ofs : ofs + ENTLEN])
self.atime = xstat.timespec_to_nsecs((self.atime, atime_ns))
self.mtime = xstat.timespec_to_nsecs((self.mtime, mtime_ns))
self.ctime = xstat.timespec_to_nsecs((self.ctime, ctime_ns))
# effectively, we don't bother messing with IX_SHAMISSING if
# not IX_HASHVALID, since it's redundant, and repacking is more
# expensive than not repacking.
# This is implemented by having sha_missing() check IX_HASHVALID too.
def set_sha_missing(self, val):
val = val and 1 or 0
oldval = self.sha_missing() and 1 or 0
if val != oldval:
flag = val and IX_SHAMISSING or 0
newflags = (self.flags & (~IX_SHAMISSING)) | flag
self.flags = newflags
self.repack()
def unset_sha_missing(self, flag):
if self.flags & IX_SHAMISSING:
self.flags &= ~IX_SHAMISSING
self.repack()
def repack(self):
self._m[self._ofs : self._ofs + ENTLEN] = self.packed()
if self.parent and not self.is_valid():
self.parent.invalidate()
self.parent.repack()
def iter(self, name=None, wantrecurse=None):
dname = name
if dname and not dname.endswith(b"/"):
dname += b"/"
ofs = self.children_ofs
assert ofs <= len(self._m)
assert self.children_n <= UINT_MAX # i.e. python struct 'I'
for i in range(self.children_n):
eon = self._m.find(b"\0", ofs)
assert eon >= 0
assert eon >= ofs
assert eon > ofs
basename = self._m[ofs : ofs + (eon - ofs)]
child = ExistingEntry(
self, basename, self.name + basename, self._m, eon + 1
)
if (
not dname
or child.name.startswith(dname)
or child.name.endswith(b"/")
and dname.startswith(child.name)
):
if not wantrecurse or wantrecurse(child):
for e in child.iter(name=name, wantrecurse=wantrecurse):
yield e
if not name or child.name == name or child.name.startswith(dname):
yield child
ofs = eon + 1 + ENTLEN
def __iter__(self):
return self.iter()
class Reader:
def __init__(self, filename):
self.closed = False
self.filename = filename
self.m = b""
self.writable = False
self.count = 0
f = None
try:
f = open(filename, "rb+")
except IOError as e:
if e.errno == errno.ENOENT:
pass
else:
raise
if f:
b = f.read(len(INDEX_HDR))
if b != INDEX_HDR:
log(
"warning: %s: header: expected %r, got %r\n"
% (filename, INDEX_HDR, b)
)
else:
st = os.fstat(f.fileno())
if st.st_size:
self.m = mmap_readwrite(f)
self.writable = True
self.count = struct.unpack(
FOOTER_SIG, self.m[st.st_size - FOOTLEN : st.st_size]
)[0]
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
with pending_raise(value, rethrow=False):
self.close()
def __len__(self):
return int(self.count)
def forward_iter(self):
ofs = len(INDEX_HDR)
while ofs + ENTLEN <= len(self.m) - FOOTLEN:
eon = self.m.find(b"\0", ofs)
assert eon >= 0
assert eon >= ofs
assert eon > ofs
basename = self.m[ofs : ofs + (eon - ofs)]
yield ExistingEntry(None, basename, basename, self.m, eon + 1)
ofs = eon + 1 + ENTLEN
def iter(self, name=None, wantrecurse=None):
if len(self.m) > len(INDEX_HDR) + ENTLEN:
dname = name
if dname and not dname.endswith(b"/"):
dname += b"/"
root = ExistingEntry(
None, b"/", b"/", self.m, len(self.m) - FOOTLEN - ENTLEN
)
for sub in root.iter(name=name, wantrecurse=wantrecurse):
yield sub
if not dname or dname == root.name:
yield root
def __iter__(self):
return self.iter()
def find(self, name):
return next(
(e for e in self.iter(name, wantrecurse=lambda x: True) if e.name == name),
None,
)
def exists(self):
return self.m
def save(self):
if self.writable and self.m:
self.m.flush()
def close(self):
self.closed = True
self.save()
if self.writable and self.m:
self.m.close()
self.m = None
self.writable = False
def __del__(self):
assert self.closed
def filter(self, prefixes, wantrecurse=None):
for rp, path in reduce_paths(prefixes):
any_entries = False
for e in self.iter(rp, wantrecurse=wantrecurse):
any_entries = True
assert e.name.startswith(rp)
name = path + e.name[len(rp) :]
yield (name, e)
if not any_entries:
# Always return at least the top for each prefix.
# Otherwise something like "save x/y" will produce
# nothing if x is up to date.
pe = self.find(rp)
if not pe:
raise Exception("cannot find %r" % rp)
name = path + pe.name[len(rp) :]
yield (name, pe)
# FIXME: this function isn't very generic, because it splits the filename
# in an odd way and depends on a terminating '/' to indicate directories.
def pathsplit(p):
"""Split a path into a list of elements of the file system hierarchy."""
l = p.split(b"/")
l = [i + b"/" for i in l[:-1]] + l[-1:]
if l[-1] == b"":
l.pop() # extra blank caused by terminating '/'
return l
class Writer:
def __init__(self, filename, metastore, tmax):
self.closed = False
self.rootlevel = self.level = Level([], None)
self.pending_index = None
self.f = None
self.count = 0
self.lastfile = None
self.filename = None
self.filename = filename = resolve_parent(filename)
self.metastore = metastore
self.tmax = tmax
(dir, name) = os.path.split(filename)
with ExitStack() as self.cleanup:
self.pending_index = atomically_replaced_file(
self.filename, mode="wb", buffering=65536
)
self.f = self.cleanup.enter_context(self.pending_index)
self.cleanup.enter_context(self.f)
self.f.write(INDEX_HDR)
self.cleanup = self.cleanup.pop_all()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
with pending_raise(value, rethrow=False):
self.abort()
def abort(self):
self.close(abort=True)
def flush(self):
if self.level:
self.level = _golevel(
self.level, self.f, [], None, self.metastore, self.tmax
)
self.count = self.rootlevel.count
if self.count:
self.count += 1
self.f.write(struct.pack(FOOTER_SIG, self.count))
self.f.flush()
assert self.level == None
def close(self, abort=False):
self.closed = True
with self.cleanup:
if abort:
self.pending_index.cancel()
else:
self.flush()
def __del__(self):
assert self.closed
def _add(self, ename, entry):
if self.lastfile and self.lastfile <= ename:
raise Error(
"%r must come before %r" % ("".join(ename), "".join(self.lastfile))
)
self.lastfile = ename
self.level = _golevel(
self.level, self.f, ename, entry, self.metastore, self.tmax
)
def add(self, name, st, meta_ofs, hashgen=None):
endswith = name.endswith(b"/")
ename = pathsplit(name)
basename = ename[-1]
# log('add: %r %r\n' % (basename, name))
flags = IX_EXISTS
sha = None
if hashgen:
(gitmode, sha) = hashgen(name)
flags |= IX_HASHVALID
else:
(gitmode, sha) = (0, EMPTY_SHA)
if st:
isdir = stat.S_ISDIR(st.st_mode)
assert isdir == endswith
e = NewEntry(
basename,
name,
self.tmax,
st.st_dev,
st.st_ino,
st.st_nlink,
st.st_ctime,
st.st_mtime,
st.st_atime,
st.st_size,
st.st_mode,
gitmode,
sha,
flags,
meta_ofs,
0,
0,
)
else:
assert endswith
meta_ofs = self.metastore.store(metadata.Metadata())
e = BlankNewEntry(basename, meta_ofs, self.tmax)
e.gitmode = gitmode
e.sha = sha
e.flags = flags
self._add(ename, e)
def add_ixentry(self, e):
e.children_ofs = e.children_n = 0
self._add(pathsplit(e.name), e)
def new_reader(self):
self.flush()
return Reader(self.f.name)
def _slashappend_or_add_error(p, caller):
"""Return p, after ensuring it has a single trailing slash if it names
a directory, unless there's an OSError, in which case, call
add_error() and return None."""
try:
st = os.lstat(p)
except OSError as e:
add_error("%s: %s" % (caller, e))
return None
else:
if stat.S_ISDIR(st.st_mode):
return slashappend(p)
return p
def unique_resolved_paths(paths):
"Return a collection of unique resolved paths."
rps = (
_slashappend_or_add_error(resolve_parent(p), "unique_resolved_paths")
for p in paths
)
return frozenset((x for x in rps if x is not None))
def reduce_paths(paths):
xpaths = []
for p in paths:
rp = _slashappend_or_add_error(resolve_parent(p), "reduce_paths")
if rp:
xpaths.append((rp, slashappend(p) if rp.endswith(b"/") else p))
xpaths.sort()
paths = []
prev = None
for rp, p in xpaths:
if prev and (prev == rp or (prev.endswith(b"/") and rp.startswith(prev))):
continue # already superceded by previous path
paths.append((rp, p))
prev = rp
paths.sort(reverse=True)
return paths
def merge(*iters):
def pfunc(count, total):
qprogress("bup: merging indexes (%d/%d)\r" % (count, total))
def pfinal(count, total):
progress("bup: merging indexes (%d/%d), done.\n" % (count, total))
return merge_iter(iters, 1024, pfunc, pfinal, key="name")
|
app | mocks | # SPDX-License-Identifier: LGPL-2.1-or-later
# ***************************************************************************
# * *
# * Copyright (c) 2022-2023 FreeCAD Project Association *
# * *
# * This file is part of FreeCAD. *
# * *
# * FreeCAD is free software: you can redistribute it and/or modify it *
# * under the terms of the GNU Lesser General Public License as *
# * published by the Free Software Foundation, either version 2.1 of the *
# * License, or (at your option) any later version. *
# * *
# * FreeCAD is distributed in the hope that it will be useful, but *
# * WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU *
# * Lesser General Public License for more details. *
# * *
# * You should have received a copy of the GNU Lesser General Public *
# * License along with FreeCAD. If not, see *
# * <https://www.gnu.org/licenses/>. *
# * *
# ***************************************************************************
"""Mock objects for use when testing the addon manager non-GUI code."""
# pylint: disable=too-few-public-methods,too-many-instance-attributes,missing-function-docstring
import os
import xml.etree.ElementTree as ElemTree
from typing import List, Union
class GitFailed(RuntimeError):
pass
class MockConsole:
"""Spy for the FreeCAD.Console -- does NOT print anything out, just logs it."""
def __init__(self):
self.log = []
self.messages = []
self.warnings = []
self.errors = []
def PrintLog(self, data: str):
self.log.append(data)
def PrintMessage(self, data: str):
self.messages.append(data)
def PrintWarning(self, data: str):
self.warnings.append(data)
def PrintError(self, data: str):
self.errors.append(data)
def missing_newlines(self) -> int:
"""In most cases, all console entries should end with newlines: this is a
convenience function for unit testing that is true."""
counter = 0
counter += self._count_missing_newlines(self.log)
counter += self._count_missing_newlines(self.messages)
counter += self._count_missing_newlines(self.warnings)
counter += self._count_missing_newlines(self.errors)
return counter
@staticmethod
def _count_missing_newlines(some_list) -> int:
counter = 0
for line in some_list:
if line[-1] != "\n":
counter += 1
return counter
class MockAddon:
"""Minimal Addon class"""
# pylint: disable=too-many-instance-attributes
def __init__(
self,
name: str = None,
url: str = None,
status: object = None,
branch: str = "main",
):
test_dir = os.path.join(os.path.dirname(__file__), "..", "data")
if name:
self.name = name
self.display_name = name
else:
self.name = "MockAddon"
self.display_name = "Mock Addon"
self.url = url if url else os.path.join(test_dir, "test_simple_repo.zip")
self.branch = branch
self.status = status
self.macro = None
self.update_status = None
self.metadata = None
self.icon_file = None
self.last_updated = None
self.requires = set()
self.python_requires = set()
self.python_optional = set()
self.on_git = False
self.on_wiki = True
def set_status(self, status):
self.update_status = status
@staticmethod
def get_best_icon_relative_path():
return ""
class MockMacro:
"""Minimal Macro class"""
def __init__(self, name="MockMacro"):
self.name = name
self.filename = self.name + ".FCMacro"
self.icon = "" # If set, should just be fake filename, doesn't have to exist
self.xpm = ""
self.code = ""
self.raw_code_url = ""
self.other_files = [] # If set, should be fake names, don't have to exist
self.details_filled_from_file = False
self.details_filled_from_code = False
self.parsed_wiki_page = False
self.on_git = False
self.on_wiki = True
def install(self, location: os.PathLike):
"""Installer function for the mock macro object: creates a file with the src_filename
attribute, and optionally an icon, xpm, and other_files. The data contained in these files
is not usable and serves only as a placeholder for the existence of the files.
"""
with open(
os.path.join(location, self.filename),
"w",
encoding="utf-8",
) as f:
f.write("Test file for macro installation unit tests")
if self.icon:
with open(os.path.join(location, self.icon), "wb") as f:
f.write(b"Fake icon data - nothing to see here\n")
if self.xpm:
with open(
os.path.join(location, "MockMacro_icon.xpm"), "w", encoding="utf-8"
) as f:
f.write(self.xpm)
for name in self.other_files:
if "/" in name:
new_location = os.path.dirname(os.path.join(location, name))
os.makedirs(new_location, exist_ok=True)
with open(os.path.join(location, name), "w", encoding="utf-8") as f:
f.write("# Fake macro data for unit testing\n")
return True, []
def fill_details_from_file(self, _):
"""Tracks that this function was called, but otherwise does nothing"""
self.details_filled_from_file = True
def fill_details_from_code(self, _):
self.details_filled_from_code = True
def parse_wiki_page(self, _):
self.parsed_wiki_page = True
class SignalCatcher:
"""Object to track signals that it has caught.
Usage:
catcher = SignalCatcher()
my_signal.connect(catcher.catch_signal)
do_things_that_emit_the_signal()
self.assertTrue(catcher.caught)
"""
def __init__(self):
self.caught = False
self.killed = False
self.args = None
def catch_signal(self, *args):
self.caught = True
self.args = args
def die(self):
self.killed = True
class AddonSignalCatcher:
"""Signal catcher specifically designed for catching emitted addons."""
def __init__(self):
self.addons = []
def catch_signal(self, addon):
self.addons.append(addon)
class CallCatcher:
"""Generic call monitor -- use to override functions that are not themselves under
test so that you can detect when the function has been called, and how many times.
"""
def __init__(self):
self.called = False
self.call_count = 0
self.args = None
def catch_call(self, *args):
self.called = True
self.call_count += 1
self.args = args
class MockGitManager:
"""A mock git manager: does NOT require a git installation. Takes no actions, only records
which functions are called for instrumentation purposes. Can be forced to appear to fail as
needed. Various member variables can be set to emulate necessary return responses.
"""
def __init__(self):
self.called_methods = []
self.update_available_response = False
self.current_tag_response = "main"
self.current_branch_response = "main"
self.get_remote_response = "No remote set"
self.get_branches_response = ["main"]
self.get_last_committers_response = {
"John Doe": {"email": "jdoe@freecad.org", "count": 1}
}
self.get_last_authors_response = {
"Jane Doe": {"email": "jdoe@freecad.org", "count": 1}
}
self.should_fail = False
self.fail_once = False # Switch back to success after the simulated failure
def _check_for_failure(self):
if self.should_fail:
if self.fail_once:
self.should_fail = False
raise GitFailed("Unit test forced failure")
def clone(self, _remote, _local_path, _args: List[str] = None):
self.called_methods.append("clone")
self._check_for_failure()
def async_clone(
self, _remote, _local_path, _progress_monitor, _args: List[str] = None
):
self.called_methods.append("async_clone")
self._check_for_failure()
def checkout(self, _local_path, _spec, _args: List[str] = None):
self.called_methods.append("checkout")
self._check_for_failure()
def update(self, _local_path):
self.called_methods.append("update")
self._check_for_failure()
def status(self, _local_path) -> str:
self.called_methods.append("status")
self._check_for_failure()
return "Up-to-date"
def reset(self, _local_path, _args: List[str] = None):
self.called_methods.append("reset")
self._check_for_failure()
def async_fetch_and_update(self, _local_path, _progress_monitor, _args=None):
self.called_methods.append("async_fetch_and_update")
self._check_for_failure()
def update_available(self, _local_path) -> bool:
self.called_methods.append("update_available")
self._check_for_failure()
return self.update_available_response
def current_tag(self, _local_path) -> str:
self.called_methods.append("current_tag")
self._check_for_failure()
return self.current_tag_response
def current_branch(self, _local_path) -> str:
self.called_methods.append("current_branch")
self._check_for_failure()
return self.current_branch_response
def repair(self, _remote, _local_path):
self.called_methods.append("repair")
self._check_for_failure()
def get_remote(self, _local_path) -> str:
self.called_methods.append("get_remote")
self._check_for_failure()
return self.get_remote_response
def get_branches(self, _local_path) -> List[str]:
self.called_methods.append("get_branches")
self._check_for_failure()
return self.get_branches_response
def get_last_committers(self, _local_path, _n=10):
self.called_methods.append("get_last_committers")
self._check_for_failure()
return self.get_last_committers_response
def get_last_authors(self, _local_path, _n=10):
self.called_methods.append("get_last_authors")
self._check_for_failure()
return self.get_last_authors_response
class MockSignal:
"""A purely synchronous signal, instrumented and intended only for use in unit testing.
emit() is semi-functional, but does not use queued slots so cannot be used across
threads."""
def __init__(self, *args):
self.expected_types = args
self.connections = []
self.disconnections = []
self.emitted = False
def connect(self, func):
self.connections.append(func)
def disconnect(self, func):
if func in self.connections:
self.connections.remove(func)
self.disconnections.append(func)
def emit(self, *args):
self.emitted = True
for connection in self.connections:
connection(args)
class MockNetworkManager:
"""Instrumented mock for the NetworkManager. Does no network access, is not asynchronous, and
does not require a running event loop. No submitted requests ever complete."""
def __init__(self):
self.urls = []
self.aborted = []
self.data = MockByteArray()
self.called_methods = []
self.completed = MockSignal(int, int, MockByteArray)
self.progress_made = MockSignal(int, int, int)
self.progress_complete = MockSignal(int, int, os.PathLike)
def submit_unmonitored_get(self, url: str) -> int:
self.urls.append(url)
self.called_methods.append("submit_unmonitored_get")
return len(self.urls) - 1
def submit_monitored_get(self, url: str) -> int:
self.urls.append(url)
self.called_methods.append("submit_monitored_get")
return len(self.urls) - 1
def blocking_get(self, url: str):
self.urls.append(url)
self.called_methods.append("blocking_get")
return self.data
def abort_all(self):
self.called_methods.append("abort_all")
for url in self.urls:
self.aborted.append(url)
def abort(self, index: int):
self.called_methods.append("abort")
self.aborted.append(self.urls[index])
class MockByteArray:
"""Mock for QByteArray. Only provides the data() access member."""
def __init__(self, data_to_wrap="data".encode("utf-8")):
self.wrapped = data_to_wrap
def data(self) -> bytes:
return self.wrapped
class MockThread:
"""Mock for QThread for use when threading is not being used, but interruption
needs to be tested. Set interrupt_after_n_calls to the call number to stop at."""
def __init__(self):
self.interrupt_after_n_calls = 0
self.interrupt_check_counter = 0
def isInterruptionRequested(self):
self.interrupt_check_counter += 1
if (
self.interrupt_after_n_calls
and self.interrupt_check_counter >= self.interrupt_after_n_calls
):
return True
return False
class MockPref:
def __init__(self):
self.prefs = {}
self.pref_set_counter = {}
self.pref_get_counter = {}
def set_prefs(self, pref_dict: dict) -> None:
self.prefs = pref_dict
def GetInt(self, key: str, default: int) -> int:
return self.Get(key, default)
def GetString(self, key: str, default: str) -> str:
return self.Get(key, default)
def GetBool(self, key: str, default: bool) -> bool:
return self.Get(key, default)
def Get(self, key: str, default):
if key not in self.pref_set_counter:
self.pref_get_counter[key] = 1
else:
self.pref_get_counter[key] += 1
if key in self.prefs:
return self.prefs[key]
raise ValueError(f"Expected key not in mock preferences: {key}")
def SetInt(self, key: str, value: int) -> None:
return self.Set(key, value)
def SetString(self, key: str, value: str) -> None:
return self.Set(key, value)
def SetBool(self, key: str, value: bool) -> None:
return self.Set(key, value)
def Set(self, key: str, value):
if key not in self.pref_set_counter:
self.pref_set_counter[key] = 1
else:
self.pref_set_counter[key] += 1
self.prefs[key] = value
class MockExists:
def __init__(self, files: List[str] = None):
"""Returns True for all files in files, and False for all others"""
self.files = files
self.files_checked = []
def exists(self, check_file: str):
self.files_checked.append(check_file)
if not self.files:
return False
for file in self.files:
if check_file.endswith(file):
return True
return False
|
extractor | dbtv | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class DBTVIE(InfoExtractor):
_VALID_URL = r"https?://(?:www\.)?dagbladet\.no/video/(?:(?:embed|(?P<display_id>[^/]+))/)?(?P<id>[0-9A-Za-z_-]{11}|[a-zA-Z0-9]{8})"
_TESTS = [
{
"url": "https://www.dagbladet.no/video/PynxJnNWChE/",
"md5": "b8f850ba1860adbda668d367f9b77699",
"info_dict": {
"id": "PynxJnNWChE",
"ext": "mp4",
"title": "Skulle teste ut fornøyelsespark, men kollegaen var bare opptatt av bikinikroppen",
"description": "md5:49cc8370e7d66e8a2ef15c3b4631fd3f",
"thumbnail": r"re:https?://.*\.jpg",
"upload_date": "20160916",
"duration": 69,
"uploader_id": "UCk5pvsyZJoYJBd7_oFPTlRQ",
"uploader": "Dagbladet",
},
"add_ie": ["Youtube"],
},
{
"url": "https://www.dagbladet.no/video/embed/xlGmyIeN9Jo/?autoplay=false",
"only_matching": True,
},
{
"url": "https://www.dagbladet.no/video/truer-iran-bor-passe-dere/PalfB2Cw",
"only_matching": True,
},
]
@staticmethod
def _extract_urls(webpage):
return [
url
for _, url in re.findall(
r'<iframe[^>]+src=(["\'])((?:https?:)?//(?:www\.)?dagbladet\.no/video/embed/(?:[0-9A-Za-z_-]{11}|[a-zA-Z0-9]{8}).*?)\1',
webpage,
)
]
def _real_extract(self, url):
display_id, video_id = re.match(self._VALID_URL, url).groups()
info = {
"_type": "url_transparent",
"id": video_id,
"display_id": display_id,
}
if len(video_id) == 11:
info.update(
{
"url": video_id,
"ie_key": "Youtube",
}
)
else:
info.update(
{
"url": "jwplatform:" + video_id,
"ie_key": "JWPlatform",
}
)
return info
|
blocks | qa_pack_k_bits | #!/usr/bin/env python
#
# Copyright 2006,2010,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
import random
import pmt
from gnuradio import blocks, gr, gr_unittest
class test_pack(gr_unittest.TestCase):
def setUp(self):
random.seed(0)
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_001(self):
src_data = [1, 0, 1, 1, 0, 1, 1, 0]
expected_results = [1, 0, 1, 1, 0, 1, 1, 0]
src = blocks.vector_source_b(src_data, False)
op = blocks.pack_k_bits_bb(1)
dst = blocks.vector_sink_b()
self.tb.connect(src, op, dst)
self.tb.run()
self.assertEqual(expected_results, dst.data())
def test_002(self):
src_data = [1, 0, 1, 1, 0, 0, 0, 1]
expected_results = [2, 3, 0, 1]
src = blocks.vector_source_b(src_data, False)
op = blocks.pack_k_bits_bb(2)
dst = blocks.vector_sink_b()
self.tb.connect(src, op, dst)
self.tb.run()
self.assertEqual(expected_results, dst.data())
def test_003(self):
src_data = expected_results = [random.randint(0, 3) for x in range(10)]
src = blocks.vector_source_b(src_data)
pack = blocks.pack_k_bits_bb(2)
unpack = blocks.unpack_k_bits_bb(2)
snk = blocks.vector_sink_b()
self.tb.connect(src, unpack, pack, snk)
self.tb.run()
self.assertEqual(list(expected_results), list(snk.data()))
def test_004(self):
# Test tags propagation
# Tags on the incoming bits
src_data = [1, 0, 1, 1, 0, 0, 0, 1]
# src_tag_offsets = [1, 2, 3, 5, 6]
src_tag_offsets = [1, 2, 3, 5, 6, 7]
# Ground Truth
expected_data = [2, 3, 0, 1]
expected_tag_offsets = [0, 1, 1, 2, 3, 3]
test_tags = list()
tag_indexs = range(len(src_tag_offsets))
for src_tag in tag_indexs:
test_tags.append(
gr.tag_utils.python_to_tag(
(
src_tag_offsets[src_tag],
pmt.intern("tag_byte"),
pmt.from_long(src_tag),
None,
)
)
)
src = blocks.vector_source_b(src_data, False, 1, test_tags)
op = blocks.pack_k_bits_bb(2)
dst = blocks.vector_sink_b()
self.tb.connect(src, op, dst)
self.tb.run()
# Check the data
self.assertEqual(expected_data, dst.data())
# Check the tag values
self.assertEqual(list(tag_indexs), [pmt.to_python(x.value) for x in dst.tags()])
# Check the tag offsets
self.assertEqual(expected_tag_offsets, [x.offset for x in dst.tags()])
if __name__ == "__main__":
gr_unittest.run(test_pack)
|
reclineview | plugin | # encoding: utf-8
from __future__ import annotations
import os
from logging import getLogger
from typing import Any, Callable, Container
import ckan.plugins as p
import ckan.plugins.toolkit as toolkit
import yaml
from ckan.common import CKANConfig, config, json
from ckan.plugins.toolkit import _
from ckan.types import Context, Validator
log = getLogger(__name__)
ignore_empty = p.toolkit.get_validator("ignore_empty")
natural_number_validator = p.toolkit.get_validator("natural_number_validator")
Invalid = p.toolkit.Invalid
def get_mapview_config() -> dict[str, Any]:
"""
Extracts and returns map view configuration of the reclineview extension.
"""
namespace = "ckanext.spatial.common_map."
return {
k.replace(namespace, ""): v
for k, v in config.items()
if k.startswith(namespace)
}
def get_dataproxy_url() -> str:
"""
Returns the value of the ckan.recline.dataproxy_url config option
"""
return config.get("ckan.recline.dataproxy_url")
def in_list(list_possible_values: Callable[[], Container[Any]]) -> Validator:
"""
Validator that checks that the input value is one of the given
possible values.
:param list_possible_values: function that returns list of possible values
for validated field
:type possible_values: function
"""
def validate(value: Any):
if value not in list_possible_values():
raise Invalid('"{0}" is not a valid parameter'.format(value))
return validate
def datastore_fields(resource: dict[str, Any], valid_field_types: Container[str]):
"""
Return a list of all datastore fields for a given resource, as long as
the datastore field type is in valid_field_types.
:param resource: resource dict
:type resource: dict
:param valid_field_types: field types to include in returned list
:type valid_field_types: list of strings
"""
data = {"resource_id": resource["id"], "limit": 0}
fields = toolkit.get_action("datastore_search")({}, data)["fields"]
return [
{"value": f["id"], "text": f["id"]}
for f in fields
if f["type"] in valid_field_types
]
def _load_declaration(declaration: Any):
filename = os.path.join(os.path.dirname(__file__), "config_declaration.yaml")
with open(filename) as src:
data = yaml.safe_load(src)
try:
declaration.load_dict(data)
except ValueError:
# we a loading two recline plugins that are share config declaration.
pass
class ReclineViewBase(p.SingletonPlugin):
"""
This base class for the Recline view extensions.
"""
p.implements(p.IConfigurer, inherit=True)
p.implements(p.IResourceView, inherit=True)
p.implements(p.ITemplateHelpers, inherit=True)
def update_config(self, config: CKANConfig):
"""
Set up the resource library, public directory and
template directory for the view
"""
toolkit.add_template_directory(config, "templates")
toolkit.add_resource("assets", "ckanext-reclineview")
log.warning(
"The Recline-based views are deprecated and"
"will be removed in future versions"
)
def can_view(self, data_dict: dict[str, Any]):
resource = data_dict["resource"]
return resource.get(
"datastore_active"
) or "_datastore_only_resource" in resource.get("url", "")
def setup_template_variables(self, context: Context, data_dict: dict[str, Any]):
return {
"resource_json": json.dumps(data_dict["resource"]),
"resource_view_json": json.dumps(data_dict["resource_view"]),
}
def view_template(self, context: Context, data_dict: dict[str, Any]):
return "recline_view.html"
def get_helpers(self) -> dict[str, Callable[..., Any]]:
return {
"get_map_config": get_mapview_config,
"get_dataproxy_url": get_dataproxy_url,
}
class ReclineView(ReclineViewBase):
"""
This extension views resources using a Recline MultiView.
"""
p.implements(p.IConfigDeclaration)
def declare_config_options(self, declaration: Any, key: Any):
_load_declaration(declaration)
def info(self) -> dict[str, Any]:
return {
"name": "recline_view",
"title": _("Data Explorer"),
"filterable": True,
"icon": "table",
"requires_datastore": False,
"default_title": p.toolkit._("Data Explorer"),
}
def can_view(self, data_dict: dict[str, Any]):
resource = data_dict["resource"]
if resource.get(
"datastore_active"
) or "_datastore_only_resource" in resource.get("url", ""):
return True
resource_format = resource.get("format", None)
if resource_format:
return resource_format.lower() in ["csv", "xls", "xlsx", "ods", "tsv"]
else:
return False
class ReclineGridView(ReclineViewBase):
"""
This extension views resources using a Recline grid.
"""
p.implements(p.IConfigDeclaration)
def declare_config_options(self, declaration: Any, key: Any):
_load_declaration(declaration)
def info(self) -> dict[str, Any]:
return {
"name": "recline_grid_view",
"title": _("Grid"),
"filterable": True,
"icon": "table",
"requires_datastore": True,
"default_title": p.toolkit._("Table"),
}
class ReclineGraphView(ReclineViewBase):
"""
This extension views resources using a Recline graph.
"""
p.implements(p.IConfigDeclaration)
def declare_config_options(self, declaration: Any, key: Any):
_load_declaration(declaration)
graph_types = [
{"value": "lines-and-points", "text": "Lines and points"},
{"value": "lines", "text": "Lines"},
{"value": "points", "text": "Points"},
{"value": "bars", "text": "Bars"},
{"value": "columns", "text": "Columns"},
]
datastore_fields = []
datastore_field_types = ["numeric", "int4", "timestamp"]
def list_graph_types(self):
return [t["value"] for t in self.graph_types]
def list_datastore_fields(self):
return [t["value"] for t in self.datastore_fields]
def info(self) -> dict[str, Any]:
# in_list validator here is passed functions because this
# method does not know what the possible values of the
# datastore fields are (requires a datastore search)
schema = {
"offset": [ignore_empty, natural_number_validator],
"limit": [ignore_empty, natural_number_validator],
"graph_type": [ignore_empty, in_list(self.list_graph_types)],
"group": [ignore_empty, in_list(self.list_datastore_fields)],
"series": [ignore_empty, in_list(self.list_datastore_fields)],
}
return {
"name": "recline_graph_view",
"title": _("Graph"),
"filterable": True,
"icon": "chart-bar",
"requires_datastore": True,
"schema": schema,
"default_title": p.toolkit._("Graph"),
}
def setup_template_variables(self, context: Context, data_dict: dict[str, Any]):
self.datastore_fields = datastore_fields(
data_dict["resource"], self.datastore_field_types
)
vars: dict[str, Any] = ReclineViewBase.setup_template_variables(
self, context, data_dict
)
vars.update(
{"graph_types": self.graph_types, "graph_fields": self.datastore_fields}
)
return vars
def form_template(self, context: Context, data_dict: dict[str, Any]):
return "recline_graph_form.html"
class ReclineMapView(ReclineViewBase):
"""
This extension views resources using a Recline map.
"""
map_field_types = [
{"value": "lat_long", "text": "Latitude / Longitude fields"},
{"value": "geojson", "text": "GeoJSON"},
]
datastore_fields = []
datastore_field_latlon_types = ["numeric", "text"]
datastore_field_geojson_types = ["text"]
def list_map_field_types(self):
return [t["value"] for t in self.map_field_types]
def list_datastore_fields(self):
return [t["value"] for t in self.datastore_fields]
def info(self) -> dict[str, Any]:
# in_list validator here is passed functions because this
# method does not know what the possible values of the
# datastore fields are (requires a datastore search)
schema = {
"offset": [ignore_empty, natural_number_validator],
"limit": [ignore_empty, natural_number_validator],
"map_field_type": [ignore_empty, in_list(self.list_map_field_types)],
"latitude_field": [ignore_empty, in_list(self.list_datastore_fields)],
"longitude_field": [ignore_empty, in_list(self.list_datastore_fields)],
"geojson_field": [ignore_empty, in_list(self.list_datastore_fields)],
"auto_zoom": [ignore_empty],
"cluster_markers": [ignore_empty],
}
return {
"name": "recline_map_view",
"title": _("Map"),
"schema": schema,
"filterable": True,
"icon": "map-marker",
"default_title": p.toolkit._("Map"),
}
def setup_template_variables(self, context: Context, data_dict: dict[str, Any]):
map_latlon_fields = datastore_fields(
data_dict["resource"], self.datastore_field_latlon_types
)
map_geojson_fields = datastore_fields(
data_dict["resource"], self.datastore_field_geojson_types
)
self.datastore_fields = map_latlon_fields + map_geojson_fields
vars: dict[str, Any] = ReclineViewBase.setup_template_variables(
self, context, data_dict
)
vars.update(
{
"map_field_types": self.map_field_types,
"map_latlon_fields": map_latlon_fields,
"map_geojson_fields": map_geojson_fields,
}
)
return vars
def form_template(self, context: Context, data_dict: dict[str, Any]):
return "recline_map_form.html"
|
aliceVision | SfMTransfer | __version__ = "2.1"
import os.path
from meshroom.core import desc
class SfMTransfer(desc.AVCommandLineNode):
commandLine = "aliceVision_sfmTransfer {allParams}"
size = desc.DynamicNodeSize("input")
category = "Utils"
documentation = """
This node allows to transfer poses and/or intrinsics form one SfM scene onto another one.
"""
inputs = [
desc.File(
name="input",
label="Input",
description="SfMData file.",
value="",
uid=[0],
),
desc.File(
name="reference",
label="Reference",
description="Path to the scene used as the reference to retrieve resolved poses and intrinsics.",
value="",
uid=[0],
),
desc.ChoiceParam(
name="method",
label="Matching Method",
description="Matching Method:\n"
" - from_viewid: Align cameras with same view ID.\n"
" - from_filepath: Align cameras with a filepath matching, using 'fileMatchingPattern'.\n"
" - from_metadata: Align cameras with matching metadata, using 'metadataMatchingList'.\n"
" - from_intrinsicid: Copy intrinsics parameters.\n",
value="from_viewid",
values=[
"from_viewid",
"from_filepath",
"from_metadata",
"from_intrinsicid",
],
exclusive=True,
uid=[0],
),
desc.StringParam(
name="fileMatchingPattern",
label="File Matching Pattern",
description="Matching regular expression for the 'from_cameras_filepath' method.\n"
"You should capture specific parts of the filepath with parentheses to define matching elements.\n"
"Some examples of patterns:\n"
" - Match the filename without extension (default value): "
r'".*\/(.*?)\.\w{3}"' + "\n"
' - Match the filename suffix after "_": '
r'".*\/.*(_.*?\.\w{3})"' + "\n"
' - Match the filename prefix before "_": '
r'".*\/(.*?)_.*\.\w{3}"',
value=r".*\/(.*?)\.\w{3}",
uid=[0],
),
desc.ListAttribute(
elementDesc=desc.File(
name="metadataMatching",
label="Metadata",
description="Metadata that should match to create correspondences.",
value="",
uid=[0],
),
name="metadataMatchingList",
label="Metadata Matching List",
description="List of metadata that should match to create the correspondences.\n"
"If the list is empty, the default value will be used:\n"
"['Make', 'Model', 'Exif:BodySerialNumber', 'Exif:LensSerialNumber'].",
),
desc.BoolParam(
name="transferPoses",
label="Poses",
description="Transfer poses.",
value=True,
uid=[0],
),
desc.BoolParam(
name="transferIntrinsics",
label="Intrinsics",
description="Transfer cameras intrinsics.",
value=True,
uid=[0],
),
desc.BoolParam(
name="transferLandmarks",
label="Landmarks",
description="Transfer landmarks.",
value=True,
uid=[0],
),
desc.ChoiceParam(
name="verboseLevel",
label="Verbose Level",
description="Verbosity level (fatal, error, warning, info, debug, trace).",
value="info",
values=["fatal", "error", "warning", "info", "debug", "trace"],
exclusive=True,
uid=[],
),
]
outputs = [
desc.File(
name="output",
label="SfMData",
description="Path to the output SfM point cloud file (in SfMData format).",
value=lambda attr: desc.Node.internalFolder
+ (
os.path.splitext(os.path.basename(attr.node.input.value))[0]
or "sfmData"
)
+ ".abc",
uid=[],
),
desc.File(
name="outputViewsAndPoses",
label="Poses",
description="Path to the output SfMData file with cameras (views and poses).",
value=desc.Node.internalFolder + "cameras.sfm",
uid=[],
),
]
|
filter | qa_fft_filter | #!/usr/bin/env python
#
# Copyright 2004,2005,2007,2010,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
import random
import sys
from gnuradio import blocks, filter, gr, gr_unittest
def make_random_complex_tuple(L):
return [complex(2 * random.random() - 1, 2 * random.random() - 1) for _ in range(L)]
def make_random_float_tuple(L):
return [(2 * random.random() - 1) for _ in range(L)]
def reference_filter_ccc(dec, taps, input):
"""
compute result using conventional fir filter
"""
tb = gr.top_block()
# src = blocks.vector_source_c(((0,) * (len(taps) - 1)) + input)
src = blocks.vector_source_c(input)
op = filter.fir_filter_ccc(dec, taps)
dst = blocks.vector_sink_c()
tb.connect(src, op, dst)
tb.run()
return dst.data()
def reference_filter_fff(dec, taps, input):
"""
compute result using conventional fir filter
"""
tb = gr.top_block()
# src = blocks.vector_source_f(((0,) * (len(taps) - 1)) + input)
src = blocks.vector_source_f(input)
op = filter.fir_filter_fff(dec, taps)
dst = blocks.vector_sink_f()
tb.connect(src, op, dst)
tb.run()
return dst.data()
def reference_filter_ccf(dec, taps, input):
"""
compute result using conventional fir filter
"""
tb = gr.top_block()
src = blocks.vector_source_c(input)
op = filter.fir_filter_ccf(dec, taps)
dst = blocks.vector_sink_c()
tb.connect(src, op, dst)
tb.run()
return dst.data()
def print_complex(x):
for i in x:
i = complex(i)
sys.stdout.write("(%6.3f,%6.3fj), " % (i.real, i.imag))
sys.stdout.write("\n")
class test_fft_filter(gr_unittest.TestCase):
def setUp(self):
random.seed(0)
def tearDown(self):
pass
def assert_fft_ok2(self, expected_result, result_data):
expected_result = expected_result[: len(result_data)]
self.assertComplexTuplesAlmostEqual2(
expected_result, result_data, abs_eps=1e-9, rel_eps=4e-4
)
def assert_fft_float_ok2(
self, expected_result, result_data, abs_eps=1e-9, rel_eps=4e-4
):
expected_result = expected_result[: len(result_data)]
self.assertFloatTuplesAlmostEqual2(
expected_result, result_data, abs_eps, rel_eps
)
def test_ccc_001(self):
tb = gr.top_block()
src_data = (0, 1, 2, 3, 4, 5, 6, 7)
taps = (1,)
expected_result = tuple([complex(x) for x in (0, 1, 2, 3, 4, 5, 6, 7)])
src = blocks.vector_source_c(src_data)
op = filter.fft_filter_ccc(1, taps)
dst = blocks.vector_sink_c()
tb.connect(src, op, dst)
tb.run()
result_data = dst.data()
# print 'expected:', expected_result
# print 'results: ', result_data
self.assertComplexTuplesAlmostEqual(expected_result, result_data, 5)
def test_ccc_002(self):
# Test nthreads
tb = gr.top_block()
src_data = (0, 1, 2, 3, 4, 5, 6, 7)
taps = (2,)
nthreads = 2
expected_result = tuple([2 * complex(x) for x in (0, 1, 2, 3, 4, 5, 6, 7)])
src = blocks.vector_source_c(src_data)
op = filter.fft_filter_ccc(1, taps, nthreads)
dst = blocks.vector_sink_c()
tb.connect(src, op, dst)
tb.run()
result_data = dst.data()
# print 'expected:', expected_result
# print 'results: ', result_data
self.assertComplexTuplesAlmostEqual(expected_result, result_data, 5)
def test_ccc_003(self):
tb = gr.top_block()
src_data = (0, 1, 2, 3, 4, 5, 6, 7)
taps = (2,)
expected_result = tuple([2 * complex(x) for x in (0, 1, 2, 3, 4, 5, 6, 7)])
src = blocks.vector_source_c(src_data)
op = filter.fft_filter_ccc(1, taps)
dst = blocks.vector_sink_c()
tb.connect(src, op, dst)
tb.run()
result_data = dst.data()
# print 'expected:', expected_result
# print 'results: ', result_data
self.assertComplexTuplesAlmostEqual(expected_result, result_data, 5)
def test_ccc_004(self):
random.seed(0)
for i in range(25):
# sys.stderr.write("\n>>> Loop = %d\n" % (i,))
src_len = 4 * 1024
src_data = make_random_complex_tuple(src_len)
ntaps = int(random.uniform(2, 1000))
taps = make_random_complex_tuple(ntaps)
expected_result = reference_filter_ccc(1, taps, src_data)
src = blocks.vector_source_c(src_data)
op = filter.fft_filter_ccc(1, taps)
dst = blocks.vector_sink_c()
tb = gr.top_block()
tb.connect(src, op, dst)
tb.run()
result_data = dst.data()
del tb
self.assert_fft_ok2(expected_result, result_data)
def test_ccc_005(self):
random.seed(0)
for i in range(25):
# sys.stderr.write("\n>>> Loop = %d\n" % (i,))
dec = i + 1
src_len = 4 * 1024
src_data = make_random_complex_tuple(src_len)
ntaps = int(random.uniform(2, 100))
taps = make_random_complex_tuple(ntaps)
expected_result = reference_filter_ccc(dec, taps, src_data)
src = blocks.vector_source_c(src_data)
op = filter.fft_filter_ccc(dec, taps)
dst = blocks.vector_sink_c()
tb = gr.top_block()
tb.connect(src, op, dst)
tb.run()
del tb
result_data = dst.data()
self.assert_fft_ok2(expected_result, result_data)
def test_ccc_006(self):
# Test decimating with nthreads=2
random.seed(0)
nthreads = 2
for i in range(25):
# sys.stderr.write("\n>>> Loop = %d\n" % (i,))
dec = i + 1
src_len = 4 * 1024
src_data = make_random_complex_tuple(src_len)
ntaps = int(random.uniform(2, 100))
taps = make_random_complex_tuple(ntaps)
expected_result = reference_filter_ccc(dec, taps, src_data)
src = blocks.vector_source_c(src_data)
op = filter.fft_filter_ccc(dec, taps, nthreads)
dst = blocks.vector_sink_c()
tb = gr.top_block()
tb.connect(src, op, dst)
tb.run()
del tb
result_data = dst.data()
self.assert_fft_ok2(expected_result, result_data)
# ----------------------------------------------------------------
# test _ccf version
# ----------------------------------------------------------------
def test_ccf_001(self):
tb = gr.top_block()
src_data = (0, 1, 2, 3, 4, 5, 6, 7)
taps = (1,)
expected_result = tuple([complex(x) for x in (0, 1, 2, 3, 4, 5, 6, 7)])
src = blocks.vector_source_c(src_data)
op = filter.fft_filter_ccf(1, taps)
dst = blocks.vector_sink_c()
tb.connect(src, op, dst)
tb.run()
result_data = dst.data()
# print 'expected:', expected_result
# print 'results: ', result_data
self.assertComplexTuplesAlmostEqual(expected_result, result_data, 5)
def test_ccf_002(self):
# Test nthreads
tb = gr.top_block()
src_data = (0, 1, 2, 3, 4, 5, 6, 7)
taps = (2,)
nthreads = 2
expected_result = tuple([2 * complex(x) for x in (0, 1, 2, 3, 4, 5, 6, 7)])
src = blocks.vector_source_c(src_data)
op = filter.fft_filter_ccf(1, taps, nthreads)
dst = blocks.vector_sink_c()
tb.connect(src, op, dst)
tb.run()
result_data = dst.data()
# print 'expected:', expected_result
# print 'results: ', result_data
self.assertComplexTuplesAlmostEqual(expected_result, result_data, 5)
def test_ccf_003(self):
tb = gr.top_block()
src_data = (0, 1, 2, 3, 4, 5, 6, 7)
taps = (2,)
expected_result = tuple([2 * complex(x) for x in (0, 1, 2, 3, 4, 5, 6, 7)])
src = blocks.vector_source_c(src_data)
op = filter.fft_filter_ccf(1, taps)
dst = blocks.vector_sink_c()
tb.connect(src, op, dst)
tb.run()
result_data = dst.data()
# print 'expected:', expected_result
# print 'results: ', result_data
self.assertComplexTuplesAlmostEqual(expected_result, result_data, 5)
def test_ccf_004(self):
random.seed(0)
for i in range(25):
# sys.stderr.write("\n>>> Loop = %d\n" % (i,))
src_len = 4 * 1024
src_data = make_random_complex_tuple(src_len)
ntaps = int(random.uniform(2, 1000))
taps = make_random_float_tuple(ntaps)
expected_result = reference_filter_ccf(1, taps, src_data)
src = blocks.vector_source_c(src_data)
op = filter.fft_filter_ccf(1, taps)
dst = blocks.vector_sink_c()
tb = gr.top_block()
tb.connect(src, op, dst)
tb.run()
result_data = dst.data()
del tb
self.assert_fft_ok2(expected_result, result_data)
def test_ccf_005(self):
random.seed(0)
for i in range(25):
# sys.stderr.write("\n>>> Loop = %d\n" % (i,))
dec = i + 1
src_len = 4 * 1024
src_data = make_random_complex_tuple(src_len)
ntaps = int(random.uniform(2, 100))
taps = make_random_float_tuple(ntaps)
expected_result = reference_filter_ccf(dec, taps, src_data)
src = blocks.vector_source_c(src_data)
op = filter.fft_filter_ccf(dec, taps)
dst = blocks.vector_sink_c()
tb = gr.top_block()
tb.connect(src, op, dst)
tb.run()
del tb
result_data = dst.data()
self.assert_fft_ok2(expected_result, result_data)
def test_ccf_006(self):
# Test decimating with nthreads=2
random.seed(0)
nthreads = 2
for i in range(25):
# sys.stderr.write("\n>>> Loop = %d\n" % (i,))
dec = i + 1
src_len = 4 * 1024
src_data = make_random_complex_tuple(src_len)
ntaps = int(random.uniform(2, 100))
taps = make_random_float_tuple(ntaps)
expected_result = reference_filter_ccf(dec, taps, src_data)
src = blocks.vector_source_c(src_data)
op = filter.fft_filter_ccc(dec, taps, nthreads)
dst = blocks.vector_sink_c()
tb = gr.top_block()
tb.connect(src, op, dst)
tb.run()
del tb
result_data = dst.data()
self.assert_fft_ok2(expected_result, result_data)
# ----------------------------------------------------------------
# test _fff version
# ----------------------------------------------------------------
def test_fff_001(self):
tb = gr.top_block()
src_data = (0, 1, 2, 3, 4, 5, 6, 7)
taps = (1,)
expected_result = tuple([float(x) for x in (0, 1, 2, 3, 4, 5, 6, 7)])
src = blocks.vector_source_f(src_data)
op = filter.fft_filter_fff(1, taps)
dst = blocks.vector_sink_f()
tb.connect(src, op, dst)
tb.run()
result_data = dst.data()
# print 'expected:', expected_result
# print 'results: ', result_data
self.assertFloatTuplesAlmostEqual(expected_result, result_data, 5)
def test_fff_002(self):
tb = gr.top_block()
src_data = (0, 1, 2, 3, 4, 5, 6, 7)
taps = (2,)
expected_result = tuple([2 * float(x) for x in (0, 1, 2, 3, 4, 5, 6, 7)])
src = blocks.vector_source_f(src_data)
op = filter.fft_filter_fff(1, taps)
dst = blocks.vector_sink_f()
tb.connect(src, op, dst)
tb.run()
result_data = dst.data()
# print 'expected:', expected_result
# print 'results: ', result_data
self.assertFloatTuplesAlmostEqual(expected_result, result_data, 5)
def test_fff_003(self):
# Test 02 with nthreads
tb = gr.top_block()
src_data = (0, 1, 2, 3, 4, 5, 6, 7)
taps = (2,)
nthreads = 2
expected_result = tuple([2 * float(x) for x in (0, 1, 2, 3, 4, 5, 6, 7)])
src = blocks.vector_source_f(src_data)
op = filter.fft_filter_fff(1, taps, nthreads)
dst = blocks.vector_sink_f()
tb.connect(src, op, dst)
tb.run()
result_data = dst.data()
self.assertFloatTuplesAlmostEqual(expected_result, result_data, 5)
def xtest_fff_004(self):
random.seed(0)
for i in range(25):
sys.stderr.write("\n>>> Loop = %d\n" % (i,))
src_len = 4096
src_data = make_random_float_tuple(src_len)
ntaps = int(random.uniform(2, 1000))
taps = make_random_float_tuple(ntaps)
expected_result = reference_filter_fff(1, taps, src_data)
src = blocks.vector_source_f(src_data)
op = filter.fft_filter_fff(1, taps)
dst = blocks.vector_sink_f()
tb = gr.top_block()
tb.connect(src, op, dst)
tb.run()
result_data = dst.data()
# print "src_len =", src_len, " ntaps =", ntaps
try:
self.assert_fft_float_ok2(expected_result, result_data, abs_eps=1.0)
except AssertionError:
expected = open("expected", "w")
for x in expected_result:
expected.write(repr(x) + "\n")
actual = open("actual", "w")
for x in result_data:
actual.write(repr(x) + "\n")
raise
def xtest_fff_005(self):
random.seed(0)
for i in range(25):
sys.stderr.write("\n>>> Loop = %d\n" % (i,))
src_len = 4 * 1024
src_data = make_random_float_tuple(src_len)
ntaps = int(random.uniform(2, 1000))
taps = make_random_float_tuple(ntaps)
expected_result = reference_filter_fff(1, taps, src_data)
src = blocks.vector_source_f(src_data)
op = filter.fft_filter_fff(1, taps)
dst = blocks.vector_sink_f()
tb = gr.top_block()
tb.connect(src, op, dst)
tb.run()
result_data = dst.data()
self.assert_fft_float_ok2(expected_result, result_data, abs_eps=2.0)
def xtest_fff_006(self):
random.seed(0)
for i in range(25):
sys.stderr.write("\n>>> Loop = %d\n" % (i,))
dec = i + 1
src_len = 4 * 1024
src_data = make_random_float_tuple(src_len)
ntaps = int(random.uniform(2, 100))
taps = make_random_float_tuple(ntaps)
expected_result = reference_filter_fff(dec, taps, src_data)
src = blocks.vector_source_f(src_data)
op = filter.fft_filter_fff(dec, taps)
dst = blocks.vector_sink_f()
tb = gr.top_block()
tb.connect(src, op, dst)
tb.run()
result_data = dst.data()
self.assert_fft_float_ok2(expected_result, result_data)
def xtest_fff_007(self):
# test decimation with nthreads
random.seed(0)
nthreads = 2
for i in range(25):
sys.stderr.write("\n>>> Loop = %d\n" % (i,))
dec = i + 1
src_len = 4 * 1024
src_data = make_random_float_tuple(src_len)
ntaps = int(random.uniform(2, 100))
taps = make_random_float_tuple(ntaps)
expected_result = reference_filter_fff(dec, taps, src_data)
src = blocks.vector_source_f(src_data)
op = filter.fft_filter_fff(dec, taps, nthreads)
dst = blocks.vector_sink_f()
tb = gr.top_block()
tb.connect(src, op, dst)
tb.run()
result_data = dst.data()
self.assert_fft_float_ok2(expected_result, result_data)
def test_fff_get0(self):
random.seed(0)
for i in range(25):
ntaps = int(random.uniform(2, 100))
taps = list(make_random_float_tuple(ntaps))
op = filter.fft_filter_fff(1, taps)
result_data = op.taps()
# print result_data
self.assertFloatTuplesAlmostEqual(taps, result_data, 4)
def test_ccc_get0(self):
random.seed(0)
for i in range(25):
ntaps = int(random.uniform(2, 100))
taps = make_random_complex_tuple(ntaps)
op = filter.fft_filter_ccc(1, taps)
result_data = op.taps()
# print result_data
self.assertComplexTuplesAlmostEqual(taps, result_data, 4)
if __name__ == "__main__":
gr_unittest.run(test_fft_filter)
|
bpd | gstplayer | # This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""A wrapper for the GStreamer Python bindings that exposes a simple
music player.
"""
import _thread
import copy
import os
import sys
import time
import urllib
import gi
from beets import ui
gi.require_version("Gst", "1.0")
from gi.repository import GLib, Gst # noqa: E402
Gst.init(None)
class QueryError(Exception):
pass
class GstPlayer:
"""A music player abstracting GStreamer's Playbin element.
Create a player object, then call run() to start a thread with a
runloop. Then call play_file to play music. Use player.playing
to check whether music is currently playing.
A basic play queue is also implemented (just a Python list,
player.queue, whose last element is next to play). To use it,
just call enqueue() and then play(). When a track finishes and
another is available on the queue, it is played automatically.
"""
def __init__(self, finished_callback=None):
"""Initialize a player.
If a finished_callback is provided, it is called every time a
track started with play_file finishes.
Once the player has been created, call run() to begin the main
runloop in a separate thread.
"""
# Set up the Gstreamer player. From the pygst tutorial:
# https://pygstdocs.berlios.de/pygst-tutorial/playbin.html (gone)
# https://brettviren.github.io/pygst-tutorial-org/pygst-tutorial.html
####
# Updated to GStreamer 1.0 with:
# https://wiki.ubuntu.com/Novacut/GStreamer1.0
self.player = Gst.ElementFactory.make("playbin", "player")
if self.player is None:
raise ui.UserError("Could not create playbin")
fakesink = Gst.ElementFactory.make("fakesink", "fakesink")
if fakesink is None:
raise ui.UserError("Could not create fakesink")
self.player.set_property("video-sink", fakesink)
bus = self.player.get_bus()
bus.add_signal_watch()
bus.connect("message", self._handle_message)
# Set up our own stuff.
self.playing = False
self.finished_callback = finished_callback
self.cached_time = None
self._volume = 1.0
def _get_state(self):
"""Returns the current state flag of the playbin."""
# gst's get_state function returns a 3-tuple; we just want the
# status flag in position 1.
return self.player.get_state(Gst.CLOCK_TIME_NONE)[1]
def _handle_message(self, bus, message):
"""Callback for status updates from GStreamer."""
if message.type == Gst.MessageType.EOS:
# file finished playing
self.player.set_state(Gst.State.NULL)
self.playing = False
self.cached_time = None
if self.finished_callback:
self.finished_callback()
elif message.type == Gst.MessageType.ERROR:
# error
self.player.set_state(Gst.State.NULL)
err, debug = message.parse_error()
print(f"Error: {err}")
self.playing = False
def _set_volume(self, volume):
"""Set the volume level to a value in the range [0, 1.5]."""
# And the volume for the playbin.
self._volume = volume
self.player.set_property("volume", volume)
def _get_volume(self):
"""Get the volume as a float in the range [0, 1.5]."""
return self._volume
volume = property(_get_volume, _set_volume)
def play_file(self, path):
"""Immediately begin playing the audio file at the given
path.
"""
self.player.set_state(Gst.State.NULL)
if isinstance(path, str):
path = path.encode("utf-8")
uri = "file://" + urllib.parse.quote(path)
self.player.set_property("uri", uri)
self.player.set_state(Gst.State.PLAYING)
self.playing = True
def play(self):
"""If paused, resume playback."""
if self._get_state() == Gst.State.PAUSED:
self.player.set_state(Gst.State.PLAYING)
self.playing = True
def pause(self):
"""Pause playback."""
self.player.set_state(Gst.State.PAUSED)
def stop(self):
"""Halt playback."""
self.player.set_state(Gst.State.NULL)
self.playing = False
self.cached_time = None
def run(self):
"""Start a new thread for the player.
Call this function before trying to play any music with
play_file() or play().
"""
# If we don't use the MainLoop, messages are never sent.
def start():
loop = GLib.MainLoop()
loop.run()
_thread.start_new_thread(start, ())
def time(self):
"""Returns a tuple containing (position, length) where both
values are integers in seconds. If no stream is available,
returns (0, 0).
"""
fmt = Gst.Format(Gst.Format.TIME)
try:
posq = self.player.query_position(fmt)
if not posq[0]:
raise QueryError("query_position failed")
pos = posq[1] / (10**9)
lengthq = self.player.query_duration(fmt)
if not lengthq[0]:
raise QueryError("query_duration failed")
length = lengthq[1] / (10**9)
self.cached_time = (pos, length)
return (pos, length)
except QueryError:
# Stream not ready. For small gaps of time, for instance
# after seeking, the time values are unavailable. For this
# reason, we cache recent.
if self.playing and self.cached_time:
return self.cached_time
else:
return (0, 0)
def seek(self, position):
"""Seeks to position (in seconds)."""
cur_pos, cur_len = self.time()
if position > cur_len:
self.stop()
return
fmt = Gst.Format(Gst.Format.TIME)
ns = position * 10**9 # convert to nanoseconds
self.player.seek_simple(fmt, Gst.SeekFlags.FLUSH, ns)
# save new cached time
self.cached_time = (position, cur_len)
def block(self):
"""Block until playing finishes."""
while self.playing:
time.sleep(1)
def get_decoders(self):
return get_decoders()
def get_decoders():
"""Get supported audio decoders from GStreamer.
Returns a dict mapping decoder element names to the associated media types
and file extensions.
"""
# We only care about audio decoder elements.
filt = (
Gst.ELEMENT_FACTORY_TYPE_DEPAYLOADER
| Gst.ELEMENT_FACTORY_TYPE_DEMUXER
| Gst.ELEMENT_FACTORY_TYPE_PARSER
| Gst.ELEMENT_FACTORY_TYPE_DECODER
| Gst.ELEMENT_FACTORY_TYPE_MEDIA_AUDIO
)
decoders = {}
mime_types = set()
for f in Gst.ElementFactory.list_get_elements(filt, Gst.Rank.NONE):
for pad in f.get_static_pad_templates():
if pad.direction == Gst.PadDirection.SINK:
caps = pad.static_caps.get()
mimes = set()
for i in range(caps.get_size()):
struct = caps.get_structure(i)
mime = struct.get_name()
if mime == "unknown/unknown":
continue
mimes.add(mime)
mime_types.add(mime)
if mimes:
decoders[f.get_name()] = (mimes, set())
# Check all the TypeFindFactory plugin features form the registry. If they
# are associated with an audio media type that we found above, get the list
# of corresponding file extensions.
mime_extensions = {mime: set() for mime in mime_types}
for feat in Gst.Registry.get().get_feature_list(Gst.TypeFindFactory):
caps = feat.get_caps()
if caps:
for i in range(caps.get_size()):
struct = caps.get_structure(i)
mime = struct.get_name()
if mime in mime_types:
mime_extensions[mime].update(feat.get_extensions())
# Fill in the slot we left for file extensions.
for name, (mimes, exts) in decoders.items():
for mime in mimes:
exts.update(mime_extensions[mime])
return decoders
def play_simple(paths):
"""Play the files in paths in a straightforward way, without
using the player's callback function.
"""
p = GstPlayer()
p.run()
for path in paths:
p.play_file(path)
p.block()
def play_complicated(paths):
"""Play the files in the path one after the other by using the
callback function to advance to the next song.
"""
my_paths = copy.copy(paths)
def next_song():
my_paths.pop(0)
p.play_file(my_paths[0])
p = GstPlayer(next_song)
p.run()
p.play_file(my_paths[0])
while my_paths:
time.sleep(1)
if __name__ == "__main__":
# A very simple command-line player. Just give it names of audio
# files on the command line; these are all played in sequence.
paths = [os.path.abspath(os.path.expanduser(p)) for p in sys.argv[1:]]
# play_simple(paths)
play_complicated(paths)
|
femviewprovider | view_base_femconstraint | # ***************************************************************************
# * Copyright (c) 2017 Markus Hovorka <m.hovorka@live.de> *
# * Copyright (c) 2018 Bernd Hahnebach <bernd@bimstatik.org> *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
__title__ = "FreeCAD FEM base constraint ViewProvider"
__author__ = "Markus Hovorka, Bernd Hahnebach"
__url__ = "https://www.freecad.org"
## @package view_base_femconstraint
# \ingroup FEM
# \brief view provider for Python base constraint object
from femviewprovider import view_base_femobject
from pivy import coin
class VPBaseFemConstraint(view_base_femobject.VPBaseFemObject):
"""Proxy View Provider for Pythons base constraint."""
def attach(self, vobj):
default = coin.SoGroup()
vobj.addDisplayMode(default, "Default")
self.Object = (
vobj.Object
) # used on various places, claim childreens, get icon, etc.
# self.ViewObject = vobj # not used ATM
def getDisplayModes(self, obj):
"Return a list of display modes."
modes = ["Default"]
return modes
def getDefaultDisplayMode(self):
return "Default"
def setDisplayMode(self, mode):
return mode
|
logic | schema | # encoding: utf-8
import inspect
from functools import wraps
from typing import Any, Callable, Iterable, cast
import ckan.model
import ckan.plugins as plugins
from ckan.logic import get_validator
from ckan.types import (
ComplexSchemaFunc,
PlainSchemaFunc,
Schema,
Validator,
ValidatorFactory,
)
def validator_args(fn: ComplexSchemaFunc) -> PlainSchemaFunc:
"""collect validator names from argument names
and pass them to wrapped function"""
args = inspect.signature(fn).parameters
@wraps(fn)
def wrapper():
kwargs = {arg: get_validator(arg) for arg in args}
return fn(**kwargs)
return wrapper
@validator_args
def default_resource_schema(
ignore_empty: Validator,
unicode_safe: Validator,
ignore: Validator,
ignore_missing: Validator,
remove_whitespace: Validator,
if_empty_guess_format: Validator,
clean_format: Validator,
isodate: Validator,
int_validator: Validator,
extras_valid_json: Validator,
keep_extras: Validator,
resource_id_validator: Validator,
resource_id_does_not_exist: Validator,
) -> Schema:
return {
"id": [
ignore_empty,
resource_id_validator,
resource_id_does_not_exist,
unicode_safe,
],
"package_id": [ignore],
"url": [ignore_missing, unicode_safe, remove_whitespace],
"description": [ignore_missing, unicode_safe],
"format": [if_empty_guess_format, ignore_missing, clean_format, unicode_safe],
"hash": [ignore_missing, unicode_safe],
"state": [ignore],
"position": [ignore],
"name": [ignore_missing, unicode_safe],
"resource_type": [ignore_missing, unicode_safe],
"url_type": [ignore_missing, unicode_safe],
"mimetype": [ignore_missing, unicode_safe],
"mimetype_inner": [ignore_missing, unicode_safe],
"cache_url": [ignore_missing, unicode_safe],
"size": [ignore_missing, int_validator],
"created": [ignore_missing, isodate],
"last_modified": [ignore_missing, isodate],
"cache_last_updated": [ignore_missing, isodate],
"tracking_summary": [ignore_missing],
"datastore_active": [ignore_missing],
"__extras": [ignore_missing, extras_valid_json, keep_extras],
}
@validator_args
def default_update_resource_schema():
schema = default_resource_schema()
return schema
@validator_args
def default_tags_schema(
not_missing: Validator,
not_empty: Validator,
unicode_safe: Validator,
tag_length_validator: Validator,
tag_name_validator: Validator,
ignore_missing: Validator,
vocabulary_id_exists: Validator,
ignore: Validator,
) -> Schema:
return {
"name": [
not_missing,
not_empty,
unicode_safe,
tag_length_validator,
tag_name_validator,
],
"vocabulary_id": [ignore_missing, unicode_safe, vocabulary_id_exists],
"revision_timestamp": [ignore],
"state": [ignore],
"display_name": [ignore],
}
@validator_args
def default_create_tag_schema(
not_missing: Validator,
not_empty: Validator,
unicode_safe: Validator,
vocabulary_id_exists: Validator,
tag_not_in_vocabulary: Validator,
empty: Validator,
):
schema = default_tags_schema()
# When creating a tag via the tag_create() logic action function, a
# vocabulary_id _must_ be given (you cannot create free tags via this
# function).
schema["vocabulary_id"] = [
not_missing,
not_empty,
unicode_safe,
vocabulary_id_exists,
tag_not_in_vocabulary,
]
# You're not allowed to specify your own ID when creating a tag.
schema["id"] = [empty]
return schema
@validator_args
def default_create_package_schema(
duplicate_extras_key: Validator,
ignore: Validator,
empty_if_not_sysadmin: Validator,
ignore_missing: Validator,
unicode_safe: Validator,
package_id_does_not_exist: Validator,
not_empty: Validator,
name_validator: Validator,
package_name_validator: Validator,
strip_value: Validator,
if_empty_same_as: ValidatorFactory,
email_validator: Validator,
package_version_validator: Validator,
ignore_not_package_admin: Validator,
boolean_validator: Validator,
datasets_with_no_organization_cannot_be_private: Validator,
empty: Validator,
tag_string_convert: Validator,
owner_org_validator: Validator,
json_object: Validator,
ignore_not_sysadmin: Validator,
) -> Schema:
return {
"__before": [duplicate_extras_key, ignore],
"id": [
empty_if_not_sysadmin,
ignore_missing,
unicode_safe,
package_id_does_not_exist,
],
"name": [not_empty, unicode_safe, name_validator, package_name_validator],
"title": [if_empty_same_as("name"), unicode_safe],
"author": [ignore_missing, unicode_safe],
"author_email": [ignore_missing, unicode_safe, strip_value, email_validator],
"maintainer": [ignore_missing, unicode_safe],
"maintainer_email": [
ignore_missing,
unicode_safe,
strip_value,
email_validator,
],
"license_id": [ignore_missing, unicode_safe],
"notes": [ignore_missing, unicode_safe],
"url": [ignore_missing, unicode_safe],
"version": [ignore_missing, unicode_safe, package_version_validator],
"state": [ignore_not_package_admin, ignore_missing],
"type": [ignore_missing, unicode_safe],
"owner_org": [owner_org_validator, unicode_safe],
"private": [
ignore_missing,
boolean_validator,
datasets_with_no_organization_cannot_be_private,
],
"__extras": [ignore],
"__junk": [empty],
"resources": default_resource_schema(),
"tags": default_tags_schema(),
"tag_string": [ignore_missing, tag_string_convert],
"plugin_data": [ignore_missing, json_object, ignore_not_sysadmin],
"extras": default_extras_schema(),
"save": [ignore],
"return_to": [ignore],
"relationships_as_object": default_relationship_schema(),
"relationships_as_subject": default_relationship_schema(),
"groups": {
"id": [ignore_missing, unicode_safe],
"name": [ignore_missing, unicode_safe],
"title": [ignore_missing, unicode_safe],
"__extras": [ignore],
},
}
@validator_args
def default_update_package_schema(
ignore_missing: Validator,
package_id_not_changed: Validator,
name_validator: Validator,
package_name_validator: Validator,
unicode_safe: Validator,
owner_org_validator: Validator,
):
schema = default_create_package_schema()
schema["resources"] = default_update_resource_schema()
# Users can (optionally) supply the package id when updating a package, but
# only to identify the package to be updated, they cannot change the id.
schema["id"] = [ignore_missing, package_id_not_changed]
# Supplying the package name when updating a package is optional (you can
# supply the id to identify the package instead).
schema["name"] = [
ignore_missing,
name_validator,
package_name_validator,
unicode_safe,
]
# Supplying the package title when updating a package is optional, if it's
# not supplied the title will not be changed.
schema["title"] = [ignore_missing, unicode_safe]
schema["owner_org"] = [ignore_missing, owner_org_validator, unicode_safe]
return schema
@validator_args
def default_show_package_schema(
keep_extras: Validator,
ignore_missing: Validator,
clean_format: Validator,
unicode_safe: Validator,
not_empty: Validator,
):
schema = default_create_package_schema()
# Don't strip ids from package dicts when validating them.
schema["id"] = []
schema.update({"tags": {"__extras": [keep_extras]}})
# Add several keys to the 'resources' subschema so they don't get stripped
# from the resource dicts by validation.
cast(Schema, schema["resources"]).update(
{
"format": [ignore_missing, clean_format, unicode_safe],
"created": [ignore_missing],
"position": [not_empty],
"last_modified": [],
"cache_last_updated": [],
"package_id": [],
"size": [],
"state": [],
"mimetype": [],
"cache_url": [],
"name": [],
"description": [],
"mimetype_inner": [],
"resource_type": [],
"url_type": [],
}
)
schema.update(
{
"state": [ignore_missing],
"isopen": [ignore_missing],
"license_url": [ignore_missing],
}
)
cast(Schema, schema["groups"]).update(
{
"description": [ignore_missing],
"display_name": [ignore_missing],
"image_display_url": [ignore_missing],
}
)
# Remove validators for several keys from the schema so validation doesn't
# strip the keys from the package dicts if the values are 'missing' (i.e.
# None).
schema["author"] = []
schema["author_email"] = []
schema["maintainer"] = []
schema["maintainer_email"] = []
schema["license_id"] = []
schema["notes"] = []
schema["url"] = []
schema["version"] = []
# Add several keys that are missing from default_create_package_schema(),
# so validation doesn't strip the keys from the package dicts.
schema["metadata_created"] = []
schema["metadata_modified"] = []
schema["creator_user_id"] = []
schema["num_resources"] = []
schema["num_tags"] = []
schema["organization"] = []
schema["owner_org"] = []
schema["private"] = []
schema["tracking_summary"] = [ignore_missing]
schema["license_title"] = []
return schema
@validator_args
def default_group_schema(
ignore_missing: Validator,
unicode_safe: Validator,
ignore: Validator,
not_empty: Validator,
name_validator: Validator,
group_name_validator: Validator,
package_id_or_name_exists: Validator,
no_loops_in_hierarchy: Validator,
ignore_not_group_admin: Validator,
) -> Schema:
return {
"id": [ignore_missing, unicode_safe],
"name": [not_empty, unicode_safe, name_validator, group_name_validator],
"title": [ignore_missing, unicode_safe],
"description": [ignore_missing, unicode_safe],
"image_url": [ignore_missing, unicode_safe],
"image_display_url": [ignore_missing, unicode_safe],
"type": [ignore_missing, unicode_safe],
"state": [ignore_not_group_admin, ignore_missing],
"created": [ignore],
"is_organization": [ignore_missing],
"approval_status": [ignore_missing, unicode_safe],
"extras": default_extras_schema(),
"__extras": [ignore],
"__junk": [ignore],
"packages": {
"id": [not_empty, unicode_safe, package_id_or_name_exists],
"title": [ignore_missing, unicode_safe],
"name": [ignore_missing, unicode_safe],
"__extras": [ignore],
},
"users": {
"name": [not_empty, unicode_safe],
"capacity": [ignore_missing],
"__extras": [ignore],
},
"groups": {
"name": [not_empty, no_loops_in_hierarchy, unicode_safe],
"capacity": [ignore_missing],
"__extras": [ignore],
},
}
@validator_args
def group_form_schema(
not_empty: Validator,
unicode_safe: Validator,
ignore_missing: Validator,
ignore: Validator,
):
schema = default_group_schema()
# schema['extras_validation'] = [duplicate_extras_key, ignore]
schema["packages"] = {
"name": [not_empty, unicode_safe],
"title": [ignore_missing],
"__extras": [ignore],
}
schema["users"] = {
"name": [not_empty, unicode_safe],
"capacity": [ignore_missing],
"__extras": [ignore],
}
return schema
@validator_args
def default_update_group_schema(
ignore_missing: Validator, group_name_validator: Validator, unicode_safe: Validator
):
schema = default_group_schema()
schema["name"] = [ignore_missing, group_name_validator, unicode_safe]
return schema
@validator_args
def default_show_group_schema(keep_extras: Validator, ignore_missing: Validator):
schema = default_group_schema()
# make default show schema behave like when run with no validation
schema["num_followers"] = []
schema["created"] = []
schema["display_name"] = []
schema["extras"] = {"__extras": [keep_extras]}
schema["package_count"] = [ignore_missing]
schema["member_count"] = [ignore_missing]
schema["packages"] = {"__extras": [keep_extras]}
schema["state"] = []
schema["users"] = {"__extras": [keep_extras]}
return schema
@validator_args
def default_extras_schema(
ignore: Validator,
not_empty: Validator,
extra_key_not_in_root_schema: Validator,
unicode_safe: Validator,
not_missing: Validator,
ignore_missing: Validator,
) -> Schema:
return {
"id": [ignore],
"key": [not_empty, extra_key_not_in_root_schema, unicode_safe],
"value": [not_missing],
"state": [ignore],
"deleted": [ignore_missing],
"revision_timestamp": [ignore],
"__extras": [ignore],
}
@validator_args
def default_relationship_schema(
ignore_missing: Validator,
unicode_safe: Validator,
not_empty: Validator,
one_of: ValidatorFactory,
ignore: Validator,
) -> Schema:
return {
"id": [ignore_missing, unicode_safe],
"subject": [ignore_missing, unicode_safe],
"object": [ignore_missing, unicode_safe],
"type": [not_empty, one_of(ckan.model.PackageRelationship.get_all_types())],
"comment": [ignore_missing, unicode_safe],
"state": [ignore],
}
@validator_args
def default_create_relationship_schema(
empty: Validator,
not_empty: Validator,
unicode_safe: Validator,
package_id_or_name_exists: Validator,
):
schema = default_relationship_schema()
schema["id"] = [empty]
schema["subject"] = [not_empty, unicode_safe, package_id_or_name_exists]
schema["object"] = [not_empty, unicode_safe, package_id_or_name_exists]
return schema
@validator_args
def default_update_relationship_schema(
ignore_missing: Validator, package_id_not_changed: Validator
):
schema = default_relationship_schema()
schema["id"] = [ignore_missing, package_id_not_changed]
# Todo: would like to check subject, object & type haven't changed, but
# no way to do this in schema
schema["subject"] = [ignore_missing]
schema["object"] = [ignore_missing]
schema["type"] = [ignore_missing]
return schema
@validator_args
def default_user_schema(
ignore_missing: Validator,
unicode_safe: Validator,
name_validator: Validator,
user_name_validator: Validator,
user_password_validator: Validator,
user_password_not_empty: Validator,
ignore_not_sysadmin: Validator,
not_empty: Validator,
strip_value: Validator,
email_validator: Validator,
user_about_validator: Validator,
ignore: Validator,
boolean_validator: Validator,
json_object: Validator,
) -> Schema:
return {
"id": [ignore_missing, unicode_safe],
"name": [not_empty, name_validator, user_name_validator, unicode_safe],
"fullname": [ignore_missing, unicode_safe],
"password": [
user_password_validator,
user_password_not_empty,
ignore_missing,
unicode_safe,
],
"password_hash": [ignore_missing, ignore_not_sysadmin, unicode_safe],
"email": [not_empty, strip_value, email_validator, unicode_safe],
"about": [ignore_missing, user_about_validator, unicode_safe],
"created": [ignore],
"sysadmin": [ignore_missing, ignore_not_sysadmin],
"reset_key": [ignore],
"activity_streams_email_notifications": [ignore_missing, boolean_validator],
"state": [ignore_missing, ignore_not_sysadmin],
"image_url": [ignore_missing, unicode_safe],
"image_display_url": [ignore_missing, unicode_safe],
"plugin_extras": [ignore_missing, json_object, ignore_not_sysadmin],
}
@validator_args
def create_user_for_user_invite_schema(ignore_missing: Validator):
schema = default_user_schema()
schema["password"] = [ignore_missing]
return schema
@validator_args
def user_new_form_schema(
unicode_safe: Validator,
user_both_passwords_entered: Validator,
user_password_validator: Validator,
user_passwords_match: Validator,
):
schema = default_user_schema()
schema["password1"] = [
unicode_safe,
user_both_passwords_entered,
user_password_validator,
user_passwords_match,
]
schema["password2"] = [unicode_safe]
return schema
@validator_args
def user_edit_form_schema(
ignore_missing: Validator,
unicode_safe: Validator,
user_password_validator: Validator,
user_passwords_match: Validator,
):
schema = default_user_schema()
schema["password"] = [ignore_missing]
schema["password1"] = [
ignore_missing,
unicode_safe,
user_password_validator,
user_passwords_match,
]
schema["password2"] = [ignore_missing, unicode_safe]
return schema
@validator_args
def default_update_user_schema(
ignore_missing: Validator,
name_validator: Validator,
user_name_validator: Validator,
unicode_safe: Validator,
user_password_validator: Validator,
):
schema = default_user_schema()
schema["name"] = [ignore_missing, name_validator, user_name_validator, unicode_safe]
schema["password"] = [user_password_validator, ignore_missing, unicode_safe]
return schema
@validator_args
def default_user_invite_schema(
not_empty: Validator,
email_validator: Validator,
email_is_unique: Validator,
unicode_safe: Validator,
) -> Schema:
return {
"email": [not_empty, email_validator, email_is_unique, unicode_safe],
"group_id": [not_empty],
"role": [not_empty],
}
@validator_args
def default_task_status_schema(
ignore: Validator,
not_empty: Validator,
unicode_safe: Validator,
ignore_missing: Validator,
) -> Schema:
return {
"id": [ignore],
"entity_id": [not_empty, unicode_safe],
"entity_type": [not_empty, unicode_safe],
"task_type": [not_empty, unicode_safe],
"key": [not_empty, unicode_safe],
"value": [ignore_missing],
"state": [ignore_missing],
"last_updated": [ignore_missing],
"error": [ignore_missing],
}
@validator_args
def default_vocabulary_schema(
ignore_missing: Validator,
unicode_safe: Validator,
vocabulary_id_exists: Validator,
not_empty: Validator,
vocabulary_name_validator: Validator,
) -> Schema:
return {
"id": [ignore_missing, unicode_safe, vocabulary_id_exists],
"name": [not_empty, unicode_safe, vocabulary_name_validator],
"tags": default_tags_schema(),
}
@validator_args
def default_create_vocabulary_schema(empty: Validator):
schema = default_vocabulary_schema()
schema["id"] = [empty]
return schema
@validator_args
def default_update_vocabulary_schema(
ignore_missing: Validator,
vocabulary_id_not_changed: Validator,
vocabulary_name_validator: Validator,
):
schema = default_vocabulary_schema()
schema["id"] = [ignore_missing, vocabulary_id_not_changed]
schema["name"] = [ignore_missing, vocabulary_name_validator]
return schema
@validator_args
def default_follow_user_schema(
not_missing: Validator,
not_empty: Validator,
unicode_safe: Validator,
convert_user_name_or_id_to_id: Validator,
ignore_missing: Validator,
) -> Schema:
return {
"id": [not_missing, not_empty, unicode_safe, convert_user_name_or_id_to_id],
"q": [ignore_missing],
}
@validator_args
def default_follow_dataset_schema(
not_missing: Validator,
not_empty: Validator,
unicode_safe: Validator,
convert_package_name_or_id_to_id: Validator,
) -> Schema:
return {
"id": [not_missing, not_empty, unicode_safe, convert_package_name_or_id_to_id]
}
@validator_args
def member_schema(
not_missing: Validator,
group_id_or_name_exists: Validator,
unicode_safe: Validator,
user_id_or_name_exists: Validator,
role_exists: Validator,
) -> Schema:
return {
"id": [not_missing, group_id_or_name_exists, unicode_safe],
"username": [not_missing, user_id_or_name_exists, unicode_safe],
"role": [not_missing, role_exists, unicode_safe],
}
@validator_args
def default_follow_group_schema(
not_missing: Validator,
not_empty: Validator,
unicode_safe: Validator,
convert_group_name_or_id_to_id: Validator,
) -> Schema:
return {
"id": [not_missing, not_empty, unicode_safe, convert_group_name_or_id_to_id]
}
@validator_args
def default_package_list_schema(
ignore_missing: Validator,
natural_number_validator: Validator,
is_positive_integer: Validator,
) -> Schema:
return {
"limit": [ignore_missing, natural_number_validator],
"offset": [ignore_missing, natural_number_validator],
"page": [ignore_missing, is_positive_integer],
}
@validator_args
def default_pagination_schema(
ignore_missing: Validator, natural_number_validator: Validator
) -> Schema:
return {
"limit": [ignore_missing, natural_number_validator],
"offset": [ignore_missing, natural_number_validator],
}
@validator_args
def default_autocomplete_schema(
not_missing: Validator,
unicode_safe: Validator,
ignore_missing: Validator,
natural_number_validator: Validator,
) -> Schema:
return {
"q": [not_missing, unicode_safe],
"ignore_self": [ignore_missing],
"limit": [ignore_missing, natural_number_validator],
}
@validator_args
def default_package_search_schema(
ignore_missing: Validator,
unicode_safe: Validator,
list_of_strings: Validator,
natural_number_validator: Validator,
int_validator: Validator,
convert_to_json_if_string: Validator,
convert_to_list_if_string: Validator,
limit_to_configured_maximum: ValidatorFactory,
default: ValidatorFactory,
) -> Schema:
return {
"q": [ignore_missing, unicode_safe],
"fl": [ignore_missing, convert_to_list_if_string],
"fq": [ignore_missing, unicode_safe],
"rows": [
default(10),
natural_number_validator,
limit_to_configured_maximum("ckan.search.rows_max", 1000),
],
"sort": [ignore_missing, unicode_safe],
"start": [ignore_missing, natural_number_validator],
"qf": [ignore_missing, unicode_safe],
"facet": [ignore_missing, unicode_safe],
"facet.mincount": [ignore_missing, natural_number_validator],
"facet.limit": [ignore_missing, int_validator],
"facet.field": [ignore_missing, convert_to_json_if_string, list_of_strings],
"extras": [ignore_missing], # Not used by Solr,
# but useful for extensions
}
@validator_args
def default_resource_search_schema(
ignore_missing: Validator,
unicode_safe: Validator,
natural_number_validator: Validator,
) -> Schema:
return {
"query": [ignore_missing], # string or list of strings
"fields": [ignore_missing], # dict of fields
"order_by": [ignore_missing, unicode_safe],
"offset": [ignore_missing, natural_number_validator],
"limit": [ignore_missing, natural_number_validator],
}
def create_schema_for_required_keys(keys: Iterable[str]) -> Schema:
"""helper function that creates a schema definition where
each key from keys is validated against ``not_missing``.
"""
not_missing = get_validator("not_missing")
return {x: [not_missing] for x in keys}
def default_create_resource_view_schema(resource_view: Any):
if resource_view.info().get("filterable"):
return default_create_resource_view_schema_filtered()
return default_create_resource_view_schema_unfiltered()
@validator_args
def default_create_resource_view_schema_unfiltered(
not_empty: Validator,
resource_id_exists: Validator,
unicode_safe: Validator,
ignore_missing: Validator,
empty: Validator,
) -> Schema:
return {
"resource_id": [not_empty, resource_id_exists],
"title": [not_empty, unicode_safe],
"description": [ignore_missing, unicode_safe],
"view_type": [not_empty, unicode_safe],
"__extras": [empty],
}
@validator_args
def default_create_resource_view_schema_filtered(
ignore_missing: Validator,
convert_to_list_if_string: Validator,
filter_fields_and_values_should_have_same_length: Validator,
filter_fields_and_values_exist_and_are_valid: Validator,
):
schema = default_create_resource_view_schema_unfiltered()
schema["filter_fields"] = [
ignore_missing,
convert_to_list_if_string,
filter_fields_and_values_should_have_same_length,
filter_fields_and_values_exist_and_are_valid,
]
schema["filter_values"] = [ignore_missing, convert_to_list_if_string]
return schema
def default_update_resource_view_schema(resource_view: Any):
schema = default_create_resource_view_schema(resource_view)
schema.update(default_update_resource_view_schema_changes())
return schema
@validator_args
def default_update_resource_view_schema_changes(
not_missing: Validator,
not_empty: Validator,
unicode_safe: Validator,
resource_id_exists: Validator,
ignore: Validator,
ignore_missing: Validator,
) -> Schema:
return {
"id": [not_missing, not_empty, unicode_safe],
"resource_id": [ignore_missing, resource_id_exists],
"title": [ignore_missing, unicode_safe],
"view_type": [ignore], # cannot change after create
"package_id": [ignore],
}
@validator_args
def default_update_configuration_schema(
unicode_safe: Validator, ignore_missing: Validator
) -> Schema:
return {
"ckan.site_title": [ignore_missing, unicode_safe],
"ckan.site_logo": [ignore_missing, unicode_safe],
"ckan.site_url": [ignore_missing, unicode_safe],
"ckan.site_description": [ignore_missing, unicode_safe],
"ckan.site_about": [ignore_missing, unicode_safe],
"ckan.site_intro_text": [ignore_missing, unicode_safe],
"ckan.site_custom_css": [ignore_missing, unicode_safe],
"ckan.theme": [ignore_missing, unicode_safe],
"logo_upload": [ignore_missing, unicode_safe],
"clear_logo_upload": [ignore_missing, unicode_safe],
}
def update_configuration_schema():
"""
Returns the schema for the config options that can be edited during runtime
By default these are the keys of the
:py:func:`ckan.logic.schema.default_update_configuration_schema`.
Extensions can add or remove keys from this schema using the
:py:meth:`ckan.plugins.interfaces.IConfigurer.update_config_schema`
method.
These configuration options can be edited during runtime via the web
interface or using
the :py:func:`ckan.logic.action.update.config_option_update` API call.
:returns: a dictionary mapping runtime-editable configuration option keys
to lists of validator and converter functions to be applied to those
keys
:rtype: dictionary
"""
schema = default_update_configuration_schema()
for plugin in plugins.PluginImplementations(plugins.IConfigurer):
if hasattr(plugin, "update_config_schema"):
schema = plugin.update_config_schema(schema)
return schema
@validator_args
def job_list_schema(ignore_missing: Validator, list_of_strings: Validator) -> Schema:
return {
"queues": [ignore_missing, list_of_strings],
}
@validator_args
def job_clear_schema(ignore_missing: Validator, list_of_strings: Validator) -> Schema:
return {
"queues": [ignore_missing, list_of_strings],
}
@validator_args
def default_create_api_token_schema(
not_empty: Validator,
unicode_safe: Validator,
ignore_missing: Validator,
json_object: Validator,
ignore_not_sysadmin: Validator,
) -> Schema:
return {
"name": [not_empty, unicode_safe],
"user": [not_empty, unicode_safe],
"plugin_extras": [ignore_missing, json_object, ignore_not_sysadmin],
}
@validator_args
def package_revise_schema(
ignore_missing: Validator,
list_of_strings: Validator,
collect_prefix_validate: Callable[[str, str], Validator],
json_or_string: Validator,
json_list_or_string: Validator,
dict_only: Validator,
) -> Schema:
return {
"__before": [
collect_prefix_validate("match__", "json_or_string"),
collect_prefix_validate("update__", "json_or_string"),
],
"match": [ignore_missing, json_or_string, dict_only],
"filter": [ignore_missing, json_list_or_string, list_of_strings],
"update": [ignore_missing, json_or_string, dict_only],
"include": [ignore_missing, json_list_or_string, list_of_strings],
# collect_prefix moves values to these, always dicts:
"match__": [],
"update__": [],
}
@validator_args
def config_declaration_v1(
ignore_missing: Validator,
unicode_safe: Validator,
not_empty: Validator,
default: ValidatorFactory,
dict_only: Validator,
one_of: ValidatorFactory,
ignore_empty: Validator,
) -> Schema:
from ckan.config.declaration import Key
from ckan.config.declaration.load import option_types
def key_from_string(s: str):
return Key.from_string(s)
def importable_string(value: str):
from ckan.logic.validators import Invalid
from werkzeug.utils import ImportStringError, import_string
try:
return import_string(value)
except ImportStringError as e:
raise Invalid(str(e))
return {
"groups": {
"annotation": [default(""), unicode_safe],
"section": [default("app:main"), unicode_safe],
"options": {
"key": [not_empty, key_from_string],
"legacy_key": [ignore_empty, unicode_safe],
"default": [ignore_missing],
"default_callable": [ignore_empty, importable_string],
"placeholder": [default(""), unicode_safe],
"placeholder_callable": [ignore_empty, importable_string],
"callable_args": [ignore_empty, dict_only],
"example": [ignore_missing],
"description": [default(""), unicode_safe],
"validators": [default(""), unicode_safe],
"type": [default("base"), one_of(list(option_types))],
},
}
}
|
Op | Waterline | # -*- coding: utf-8 -*-
# ***************************************************************************
# * Copyright (c) 2019 Russell Johnson (russ4262) <russ4262@gmail.com> *
# * Copyright (c) 2019 sliptonic <shopinthewoods@gmail.com> *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
import FreeCAD
__title__ = "Path Waterline Operation"
__author__ = "russ4262 (Russell Johnson), sliptonic (Brad Collette)"
__url__ = "http://www.freecad.org"
__doc__ = "Class and implementation of Waterline operation."
__contributors__ = ""
translate = FreeCAD.Qt.translate
# OCL must be installed
try:
try:
import ocl
except ImportError:
import opencamlib as ocl
except ImportError:
msg = translate(
"path_waterline", "This operation requires OpenCamLib to be installed."
)
FreeCAD.Console.PrintError(msg + "\n")
raise ImportError
import math
import time
import Path
import Path.Op.Base as PathOp
import Path.Op.SurfaceSupport as PathSurfaceSupport
import PathScripts.PathUtils as PathUtils
# lazily loaded modules
from lazy_loader.lazy_loader import LazyLoader
from PySide.QtCore import QT_TRANSLATE_NOOP
Part = LazyLoader("Part", globals(), "Part")
if FreeCAD.GuiUp:
import FreeCADGui
if False:
Path.Log.setLevel(Path.Log.Level.DEBUG, Path.Log.thisModule())
Path.Log.trackModule(Path.Log.thisModule())
else:
Path.Log.setLevel(Path.Log.Level.INFO, Path.Log.thisModule())
class ObjectWaterline(PathOp.ObjectOp):
"""Proxy object for Surfacing operation."""
def opFeatures(self, obj):
"""opFeatures(obj) ... return all standard features"""
return (
PathOp.FeatureTool
| PathOp.FeatureDepths
| PathOp.FeatureHeights
| PathOp.FeatureStepDown
| PathOp.FeatureCoolant
| PathOp.FeatureBaseFaces
)
@classmethod
def propertyEnumerations(self, dataType="data"):
"""propertyEnumerations(dataType="data")... return property enumeration lists of specified dataType.
Args:
dataType = 'data', 'raw', 'translated'
Notes:
'data' is list of internal string literals used in code
'raw' is list of (translated_text, data_string) tuples
'translated' is list of translated string literals
"""
# Enumeration lists for App::PropertyEnumeration properties
enums = {
"Algorithm": [
(translate("path_waterline", "OCL Dropcutter"), "OCL Dropcutter"),
(translate("path_waterline", "Experimental"), "Experimental"),
],
"BoundBox": [
(translate("path_waterline", "BaseBoundBox"), "BaseBoundBox"),
(translate("path_waterline", "Stock"), "Stock"),
],
"PatternCenterAt": [
(translate("path_waterline", "CenterOfMass"), "CenterOfMass"),
(translate("path_waterline", "CenterOfBoundBox"), "CenterOfBoundBox"),
(translate("path_waterline", "XminYmin"), "XminYmin"),
(translate("path_waterline", "Custom"), "Custom"),
],
"ClearLastLayer": [
(translate("path_waterline", "Off"), "Off"),
(translate("path_waterline", "Circular"), "Circular"),
(translate("path_waterline", "CircularZigZag"), "CircularZigZag"),
(translate("path_waterline", "Line"), "Line"),
(translate("path_waterline", "Offset"), "Offset"),
(translate("path_waterline", "Spiral"), "Spiral"),
(translate("path_waterline", "ZigZag"), "ZigZag"),
],
"CutMode": [
(translate("path_waterline", "Conventional"), "Conventional"),
(translate("path_waterline", "Climb"), "Climb"),
],
"CutPattern": [
(translate("path_waterline", "None"), "None"),
(translate("path_waterline", "Circular"), "Circular"),
(translate("path_waterline", "CircularZigZag"), "CircularZigZag"),
(translate("path_waterline", "Line"), "Line"),
(translate("path_waterline", "Offset"), "Offset"),
(translate("path_waterline", "Spiral"), "Spiral"),
(translate("path_waterline", "ZigZag"), "ZigZag"),
],
"HandleMultipleFeatures": [
(translate("path_waterline", "Collectively"), "Collectively"),
(translate("path_waterline", "Individually"), "Individually"),
],
"LayerMode": [
(translate("path_waterline", "Single-pass"), "Single-pass"),
(translate("path_waterline", "Multi-pass"), "Multi-pass"),
],
}
if dataType == "raw":
return enums
data = list()
idx = 0 if dataType == "translated" else 1
Path.Log.debug(enums)
for k, v in enumerate(enums):
data.append((v, [tup[idx] for tup in enums[v]]))
Path.Log.debug(data)
return data
def initOperation(self, obj):
"""initOperation(obj) ... Initialize the operation by
managing property creation and property editor status."""
self.propertiesReady = False
self.initOpProperties(obj) # Initialize operation-specific properties
# For debugging
if Path.Log.getLevel(Path.Log.thisModule()) != 4:
obj.setEditorMode("ShowTempObjects", 2) # hide
if not hasattr(obj, "DoNotSetDefaultValues"):
self.setEditorProperties(obj)
def initOpProperties(self, obj, warn=False):
"""initOpProperties(obj) ... create operation specific properties"""
self.addNewProps = list()
for prtyp, nm, grp, tt in self.opPropertyDefinitions():
if not hasattr(obj, nm):
obj.addProperty(prtyp, nm, grp, tt)
self.addNewProps.append(nm)
# Set enumeration lists for enumeration properties
if len(self.addNewProps) > 0:
ENUMS = self.propertyEnumerations()
for n in ENUMS:
if n[0] in self.addNewProps:
setattr(obj, n[0], n[1])
if warn:
newPropMsg = translate("PathWaterline", "New property added to")
newPropMsg += ' "{}": {}'.format(obj.Label, self.addNewProps) + ". "
newPropMsg += translate("PathWaterline", "Check default value(s).")
FreeCAD.Console.PrintWarning(newPropMsg + "\n")
self.propertiesReady = True
def opPropertyDefinitions(self):
"""opPropertyDefinitions() ... return list of tuples containing operation specific properties"""
return [
(
"App::PropertyBool",
"ShowTempObjects",
"Debug",
QT_TRANSLATE_NOOP(
"App::Property",
"Show the temporary path construction objects when module is in DEBUG mode.",
),
),
(
"App::PropertyDistance",
"AngularDeflection",
"Mesh Conversion",
QT_TRANSLATE_NOOP(
"App::Property",
"Smaller values yield a finer, more accurate the mesh. Smaller values increase processing time a lot.",
),
),
(
"App::PropertyDistance",
"LinearDeflection",
"Mesh Conversion",
QT_TRANSLATE_NOOP(
"App::Property",
"Smaller values yield a finer, more accurate the mesh. Smaller values do not increase processing time much.",
),
),
(
"App::PropertyInteger",
"AvoidLastX_Faces",
"Selected Geometry Settings",
QT_TRANSLATE_NOOP(
"App::Property",
"Avoid cutting the last 'N' faces in the Base Geometry list of selected faces.",
),
),
(
"App::PropertyBool",
"AvoidLastX_InternalFeatures",
"Selected Geometry Settings",
QT_TRANSLATE_NOOP(
"App::Property", "Do not cut internal features on avoided faces."
),
),
(
"App::PropertyDistance",
"BoundaryAdjustment",
"Selected Geometry Settings",
QT_TRANSLATE_NOOP(
"App::Property",
"Positive values push the cutter toward, or beyond, the boundary. Negative values retract the cutter away from the boundary.",
),
),
(
"App::PropertyBool",
"BoundaryEnforcement",
"Selected Geometry Settings",
QT_TRANSLATE_NOOP(
"App::Property",
"If true, the cutter will remain inside the boundaries of the model or selected face(s).",
),
),
(
"App::PropertyEnumeration",
"HandleMultipleFeatures",
"Selected Geometry Settings",
QT_TRANSLATE_NOOP(
"App::Property",
"Choose how to process multiple Base Geometry features.",
),
),
(
"App::PropertyDistance",
"InternalFeaturesAdjustment",
"Selected Geometry Settings",
QT_TRANSLATE_NOOP(
"App::Property",
"Positive values push the cutter toward, or into, the feature. Negative values retract the cutter away from the feature.",
),
),
(
"App::PropertyBool",
"InternalFeaturesCut",
"Selected Geometry Settings",
QT_TRANSLATE_NOOP(
"App::Property",
"Cut internal feature areas within a larger selected face.",
),
),
(
"App::PropertyEnumeration",
"Algorithm",
"Clearing Options",
QT_TRANSLATE_NOOP(
"App::Property",
"Select the algorithm to use: OCL Dropcutter*, or Experimental (Not OCL based).",
),
),
(
"App::PropertyEnumeration",
"BoundBox",
"Clearing Options",
QT_TRANSLATE_NOOP(
"App::Property", "Select the overall boundary for the operation."
),
),
(
"App::PropertyEnumeration",
"ClearLastLayer",
"Clearing Options",
QT_TRANSLATE_NOOP(
"App::Property",
"Set to clear last layer in a `Multi-pass` operation.",
),
),
(
"App::PropertyEnumeration",
"CutMode",
"Clearing Options",
QT_TRANSLATE_NOOP(
"App::Property",
"Set the direction for the cutting tool to engage the material: Climb (ClockWise) or Conventional (CounterClockWise)",
),
),
(
"App::PropertyEnumeration",
"CutPattern",
"Clearing Options",
QT_TRANSLATE_NOOP(
"App::Property",
"Set the geometric clearing pattern to use for the operation.",
),
),
(
"App::PropertyFloat",
"CutPatternAngle",
"Clearing Options",
QT_TRANSLATE_NOOP(
"App::Property", "The yaw angle used for certain clearing patterns"
),
),
(
"App::PropertyBool",
"CutPatternReversed",
"Clearing Options",
QT_TRANSLATE_NOOP(
"App::Property",
"Reverse the cut order of the stepover paths. For circular cut patterns, begin at the outside and work toward the center.",
),
),
(
"App::PropertyDistance",
"DepthOffset",
"Clearing Options",
QT_TRANSLATE_NOOP(
"App::Property",
"Set the Z-axis depth offset from the target surface.",
),
),
(
"App::PropertyDistance",
"IgnoreOuterAbove",
"Clearing Options",
QT_TRANSLATE_NOOP(
"App::Property", "Ignore outer waterlines above this height."
),
),
(
"App::PropertyEnumeration",
"LayerMode",
"Clearing Options",
QT_TRANSLATE_NOOP(
"App::Property",
"Complete the operation in a single pass at depth, or multiple passes to final depth.",
),
),
(
"App::PropertyVectorDistance",
"PatternCenterCustom",
"Clearing Options",
QT_TRANSLATE_NOOP(
"App::Property", "Set the start point for the cut pattern."
),
),
(
"App::PropertyEnumeration",
"PatternCenterAt",
"Clearing Options",
QT_TRANSLATE_NOOP(
"App::Property",
"Choose location of the center point for starting the cut pattern.",
),
),
(
"App::PropertyDistance",
"SampleInterval",
"Clearing Options",
QT_TRANSLATE_NOOP(
"App::Property",
"Set the sampling resolution. Smaller values quickly increase processing time.",
),
),
(
"App::PropertyFloat",
"StepOver",
"Clearing Options",
QT_TRANSLATE_NOOP(
"App::Property",
"Set the stepover percentage, based on the tool's diameter.",
),
),
(
"App::PropertyBool",
"OptimizeLinearPaths",
"Optimization",
QT_TRANSLATE_NOOP(
"App::Property",
"Enable optimization of linear paths (co-linear points). Removes unnecessary co-linear points from G-code output.",
),
),
(
"App::PropertyBool",
"OptimizeStepOverTransitions",
"Optimization",
QT_TRANSLATE_NOOP(
"App::Property",
"Enable separate optimization of transitions between, and breaks within, each step over path.",
),
),
(
"App::PropertyDistance",
"GapThreshold",
"Optimization",
QT_TRANSLATE_NOOP(
"App::Property",
"Collinear and co-radial artifact gaps that are smaller than this threshold are closed in the path.",
),
),
(
"App::PropertyString",
"GapSizes",
"Optimization",
QT_TRANSLATE_NOOP(
"App::Property",
"Feedback: three smallest gaps identified in the path geometry.",
),
),
(
"App::PropertyVectorDistance",
"StartPoint",
"Start Point",
QT_TRANSLATE_NOOP(
"App::Property",
"The custom start point for the path of this operation",
),
),
(
"App::PropertyBool",
"UseStartPoint",
"Start Point",
QT_TRANSLATE_NOOP(
"App::Property", "Make True, if specifying a Start Point"
),
),
]
def opPropertyDefaults(self, obj, job):
"""opPropertyDefaults(obj, job) ... returns a dictionary
of default values for the operation's properties."""
defaults = {
"OptimizeLinearPaths": True,
"InternalFeaturesCut": True,
"OptimizeStepOverTransitions": False,
"BoundaryEnforcement": True,
"UseStartPoint": False,
"AvoidLastX_InternalFeatures": True,
"CutPatternReversed": False,
"IgnoreOuterAbove": obj.StartDepth.Value + 0.00001,
"StartPoint": FreeCAD.Vector(0.0, 0.0, obj.ClearanceHeight.Value),
"Algorithm": "OCL Dropcutter",
"LayerMode": "Single-pass",
"CutMode": "Conventional",
"CutPattern": "None",
"HandleMultipleFeatures": "Collectively",
"PatternCenterAt": "CenterOfMass",
"GapSizes": "No gaps identified.",
"ClearLastLayer": "Off",
"StepOver": 100.0,
"CutPatternAngle": 0.0,
"DepthOffset": 0.0,
"SampleInterval": 1.0,
"BoundaryAdjustment": 0.0,
"InternalFeaturesAdjustment": 0.0,
"AvoidLastX_Faces": 0,
"PatternCenterCustom": FreeCAD.Vector(0.0, 0.0, 0.0),
"GapThreshold": 0.005,
"AngularDeflection": 0.25,
"LinearDeflection": 0.0001,
# For debugging
"ShowTempObjects": False,
}
warn = True
if hasattr(job, "GeometryTolerance"):
if job.GeometryTolerance.Value != 0.0:
warn = False
defaults["LinearDeflection"] = job.GeometryTolerance.Value
if warn:
msg = translate(
"PathWaterline", "The GeometryTolerance for this Job is 0.0."
)
msg += translate(
"PathWaterline", "Initializing LinearDeflection to 0.0001 mm."
)
FreeCAD.Console.PrintWarning(msg + "\n")
return defaults
def setEditorProperties(self, obj):
# Used to hide inputs in properties list
expMode = G = 0
show = hide = A = B = C = 2
obj.setEditorMode("BoundaryEnforcement", hide)
obj.setEditorMode("InternalFeaturesAdjustment", hide)
obj.setEditorMode("InternalFeaturesCut", hide)
obj.setEditorMode("AvoidLastX_Faces", hide)
obj.setEditorMode("AvoidLastX_InternalFeatures", hide)
obj.setEditorMode("BoundaryAdjustment", hide)
obj.setEditorMode("HandleMultipleFeatures", hide)
obj.setEditorMode("OptimizeLinearPaths", hide)
obj.setEditorMode("OptimizeStepOverTransitions", hide)
obj.setEditorMode("GapThreshold", hide)
obj.setEditorMode("GapSizes", hide)
if obj.Algorithm == "OCL Dropcutter":
pass
elif obj.Algorithm == "Experimental":
A = B = C = 0
expMode = G = show = hide = 2
cutPattern = obj.CutPattern
if obj.ClearLastLayer != "Off":
cutPattern = obj.ClearLastLayer
if cutPattern == "None":
show = hide = A = 2
elif cutPattern in ["Line", "ZigZag"]:
show = 0
elif cutPattern in ["Circular", "CircularZigZag"]:
show = 2 # hide
hide = 0 # show
elif cutPattern == "Spiral":
G = hide = 0
obj.setEditorMode("CutPatternAngle", show)
obj.setEditorMode("PatternCenterAt", hide)
obj.setEditorMode("PatternCenterCustom", hide)
obj.setEditorMode("CutPatternReversed", A)
obj.setEditorMode("ClearLastLayer", C)
obj.setEditorMode("StepOver", B)
obj.setEditorMode("IgnoreOuterAbove", B)
obj.setEditorMode("CutPattern", C)
obj.setEditorMode("SampleInterval", G)
obj.setEditorMode("LinearDeflection", expMode)
obj.setEditorMode("AngularDeflection", expMode)
def onChanged(self, obj, prop):
if hasattr(self, "propertiesReady"):
if self.propertiesReady:
if prop in ["Algorithm", "CutPattern"]:
self.setEditorProperties(obj)
def opOnDocumentRestored(self, obj):
self.propertiesReady = False
job = PathUtils.findParentJob(obj)
self.initOpProperties(obj, warn=True)
self.opApplyPropertyDefaults(obj, job, self.addNewProps)
mode = 2 if Path.Log.getLevel(Path.Log.thisModule()) != 4 else 0
obj.setEditorMode("ShowTempObjects", mode)
# Repopulate enumerations in case of changes
ENUMS = self.propertyEnumerations()
for n in ENUMS:
restore = False
if hasattr(obj, n[0]):
val = obj.getPropertyByName(n[0])
restore = True
setattr(obj, n[0], n[1])
if restore:
setattr(obj, n[0], val)
self.setEditorProperties(obj)
def opApplyPropertyDefaults(self, obj, job, propList):
# Set standard property defaults
PROP_DFLTS = self.opPropertyDefaults(obj, job)
for n in PROP_DFLTS:
if n in propList:
prop = getattr(obj, n)
val = PROP_DFLTS[n]
setVal = False
if hasattr(prop, "Value"):
if isinstance(val, int) or isinstance(val, float):
setVal = True
if setVal:
setattr(prop, "Value", val)
else:
setattr(obj, n, val)
def opSetDefaultValues(self, obj, job):
"""opSetDefaultValues(obj, job) ... initialize defaults"""
job = PathUtils.findParentJob(obj)
self.opApplyPropertyDefaults(obj, job, self.addNewProps)
# need to overwrite the default depth calculations for facing
d = None
if job:
if job.Stock:
d = PathUtils.guessDepths(job.Stock.Shape, None)
obj.IgnoreOuterAbove = job.Stock.Shape.BoundBox.ZMax + 0.000001
Path.Log.debug("job.Stock exists")
else:
Path.Log.debug("job.Stock NOT exist")
else:
Path.Log.debug("job NOT exist")
if d is not None:
obj.OpFinalDepth.Value = d.final_depth
obj.OpStartDepth.Value = d.start_depth
else:
obj.OpFinalDepth.Value = -10
obj.OpStartDepth.Value = 10
Path.Log.debug("Default OpFinalDepth: {}".format(obj.OpFinalDepth.Value))
Path.Log.debug("Default OpStartDepth: {}".format(obj.OpStartDepth.Value))
def opApplyPropertyLimits(self, obj):
"""opApplyPropertyLimits(obj) ... Apply necessary limits to user input property values before performing main operation."""
# Limit sample interval
if obj.SampleInterval.Value < 0.0001:
obj.SampleInterval.Value = 0.0001
Path.Log.error(
translate(
"PathWaterline",
"Sample interval limits are 0.0001 to 25.4 millimeters.",
)
)
if obj.SampleInterval.Value > 25.4:
obj.SampleInterval.Value = 25.4
Path.Log.error(
translate(
"PathWaterline",
"Sample interval limits are 0.0001 to 25.4 millimeters.",
)
)
# Limit cut pattern angle
if obj.CutPatternAngle < -360.0:
obj.CutPatternAngle = 0.0
Path.Log.error(
translate(
"PathWaterline", "Cut pattern angle limits are +-360 degrees."
)
)
if obj.CutPatternAngle >= 360.0:
obj.CutPatternAngle = 0.0
Path.Log.error(
translate(
"PathWaterline", "Cut pattern angle limits are +- 360 degrees."
)
)
# Limit StepOver to natural number percentage
if obj.StepOver > 100.0:
obj.StepOver = 100.0
if obj.StepOver < 1.0:
obj.StepOver = 1.0
# Limit AvoidLastX_Faces to zero and positive values
if obj.AvoidLastX_Faces < 0:
obj.AvoidLastX_Faces = 0
Path.Log.error(
translate(
"PathWaterline",
"AvoidLastX_Faces: Only zero or positive values permitted.",
)
)
if obj.AvoidLastX_Faces > 100:
obj.AvoidLastX_Faces = 100
Path.Log.error(
translate(
"PathWaterline",
"AvoidLastX_Faces: Avoid last X faces count limited to 100.",
)
)
def opUpdateDepths(self, obj):
if hasattr(obj, "Base") and obj.Base:
base, sublist = obj.Base[0]
fbb = base.Shape.getElement(sublist[0]).BoundBox
zmin = fbb.ZMax
for base, sublist in obj.Base:
for sub in sublist:
try:
fbb = base.Shape.getElement(sub).BoundBox
zmin = min(zmin, fbb.ZMin)
except Part.OCCError as e:
Path.Log.error(e)
obj.OpFinalDepth = zmin
elif self.job:
if hasattr(obj, "BoundBox"):
if obj.BoundBox == "BaseBoundBox":
models = self.job.Model.Group
zmin = models[0].Shape.BoundBox.ZMin
for M in models:
zmin = min(zmin, M.Shape.BoundBox.ZMin)
obj.OpFinalDepth = zmin
if obj.BoundBox == "Stock":
models = self.job.Stock
obj.OpFinalDepth = self.job.Stock.Shape.BoundBox.ZMin
def opExecute(self, obj):
"""opExecute(obj) ... process surface operation"""
Path.Log.track()
self.modelSTLs = list()
self.safeSTLs = list()
self.modelTypes = list()
self.boundBoxes = list()
self.profileShapes = list()
self.collectiveShapes = list()
self.individualShapes = list()
self.avoidShapes = list()
self.geoTlrnc = None
self.tempGroup = None
self.CutClimb = False
self.closedGap = False
self.tmpCOM = None
self.gaps = [0.1, 0.2, 0.3]
CMDS = list()
modelVisibility = list()
FCAD = FreeCAD.ActiveDocument
try:
dotIdx = __name__.index(".") + 1
except Exception:
dotIdx = 0
self.module = __name__[dotIdx:]
# make circle for workplane
self.wpc = Part.makeCircle(2.0)
# Set debugging behavior
self.showDebugObjects = False # Set to true if you want a visual DocObjects created for some path construction objects
self.showDebugObjects = obj.ShowTempObjects
deleteTempsFlag = True # Set to False for debugging
if Path.Log.getLevel(Path.Log.thisModule()) == 4:
deleteTempsFlag = False
else:
self.showDebugObjects = False
# mark beginning of operation and identify parent Job
Path.Log.info("\nBegin Waterline operation...")
startTime = time.time()
# Identify parent Job
JOB = PathUtils.findParentJob(obj)
if JOB is None:
Path.Log.error(translate("PathWaterline", "No JOB"))
return
self.stockZMin = JOB.Stock.Shape.BoundBox.ZMin
# set cut mode; reverse as needed
if obj.CutMode == "Climb":
self.CutClimb = True
if obj.CutPatternReversed is True:
if self.CutClimb is True:
self.CutClimb = False
else:
self.CutClimb = True
# Instantiate additional class operation variables
self.resetOpVariables()
# Setup cutter for OCL and cutout value for operation - based on tool controller properties
oclTool = PathSurfaceSupport.OCL_Tool(ocl, obj)
self.cutter = oclTool.getOclTool()
if not self.cutter:
Path.Log.error(
translate(
"PathWaterline",
"Canceling Waterline operation. Error creating OCL cutter.",
)
)
return
self.toolDiam = self.cutter.getDiameter()
self.radius = self.toolDiam / 2.0
self.cutOut = self.toolDiam * (float(obj.StepOver) / 100.0)
self.gaps = [self.toolDiam, self.toolDiam, self.toolDiam]
# Begin GCode for operation with basic information
# ... and move cutter to clearance height and startpoint
output = ""
if obj.Comment != "":
self.commandlist.append(Path.Command("N ({})".format(str(obj.Comment)), {}))
self.commandlist.append(Path.Command("N ({})".format(obj.Label), {}))
self.commandlist.append(
Path.Command("N (Tool type: {})".format(oclTool.toolType), {})
)
self.commandlist.append(
Path.Command(
"N (Compensated Tool Path. Diameter: {})".format(oclTool.diameter), {}
)
)
self.commandlist.append(
Path.Command(
"N (Sample interval: {})".format(str(obj.SampleInterval.Value)), {}
)
)
self.commandlist.append(
Path.Command("N (Step over %: {})".format(str(obj.StepOver)), {})
)
self.commandlist.append(Path.Command("N ({})".format(output), {}))
self.commandlist.append(
Path.Command("G0", {"Z": obj.ClearanceHeight.Value, "F": self.vertRapid})
)
if obj.UseStartPoint:
self.commandlist.append(
Path.Command(
"G0",
{
"X": obj.StartPoint.x,
"Y": obj.StartPoint.y,
"F": self.horizRapid,
},
)
)
# Impose property limits
self.opApplyPropertyLimits(obj)
# Create temporary group for temporary objects, removing existing
tempGroupName = "tempPathWaterlineGroup"
if FCAD.getObject(tempGroupName):
for to in FCAD.getObject(tempGroupName).Group:
FCAD.removeObject(to.Name)
FCAD.removeObject(tempGroupName) # remove temp directory if already exists
if FCAD.getObject(tempGroupName + "001"):
for to in FCAD.getObject(tempGroupName + "001").Group:
FCAD.removeObject(to.Name)
FCAD.removeObject(
tempGroupName + "001"
) # remove temp directory if already exists
tempGroup = FCAD.addObject("App::DocumentObjectGroup", tempGroupName)
tempGroupName = tempGroup.Name
self.tempGroup = tempGroup
tempGroup.purgeTouched()
# Add temp object to temp group folder with following code:
# ... self.tempGroup.addObject(OBJ)
# Get height offset values for later use
self.SafeHeightOffset = JOB.SetupSheet.SafeHeightOffset.Value
self.ClearHeightOffset = JOB.SetupSheet.ClearanceHeightOffset.Value
# Set deflection values for mesh generation
useDGT = False
try: # try/except is for Path Jobs created before GeometryTolerance
self.geoTlrnc = JOB.GeometryTolerance.Value
if self.geoTlrnc == 0.0:
useDGT = True
except AttributeError as ee:
Path.Log.warning(
"{}\nPlease set Job.GeometryTolerance to an acceptable value. Using Path.Preferences.defaultGeometryTolerance().".format(
ee
)
)
useDGT = True
if useDGT:
self.geoTlrnc = Path.Preferences.defaultGeometryTolerance()
# Calculate default depthparams for operation
self.depthParams = PathUtils.depth_params(
obj.ClearanceHeight.Value,
obj.SafeHeight.Value,
obj.StartDepth.Value,
obj.StepDown.Value,
0.0,
obj.FinalDepth.Value,
)
self.midDep = (obj.StartDepth.Value + obj.FinalDepth.Value) / 2.0
# Save model visibilities for restoration
if FreeCAD.GuiUp:
for m in range(0, len(JOB.Model.Group)):
mNm = JOB.Model.Group[m].Name
modelVisibility.append(
FreeCADGui.ActiveDocument.getObject(mNm).Visibility
)
# Setup STL, model type, and bound box containers for each model in Job
for m in range(0, len(JOB.Model.Group)):
M = JOB.Model.Group[m]
self.modelSTLs.append(False)
self.safeSTLs.append(False)
self.profileShapes.append(False)
# Set bound box
if obj.BoundBox == "BaseBoundBox":
if M.TypeId.startswith("Mesh"):
self.modelTypes.append("M") # Mesh
self.boundBoxes.append(M.Mesh.BoundBox)
else:
self.modelTypes.append("S") # Solid
self.boundBoxes.append(M.Shape.BoundBox)
elif obj.BoundBox == "Stock":
self.modelTypes.append("S") # Solid
self.boundBoxes.append(JOB.Stock.Shape.BoundBox)
# ###### MAIN COMMANDS FOR OPERATION ######
# Begin processing obj.Base data and creating GCode
PSF = PathSurfaceSupport.ProcessSelectedFaces(JOB, obj)
PSF.setShowDebugObjects(tempGroup, self.showDebugObjects)
PSF.radius = self.radius
PSF.depthParams = self.depthParams
pPM = PSF.preProcessModel(self.module)
# Process selected faces, if available
if pPM is False:
Path.Log.error("Unable to pre-process obj.Base.")
else:
(FACES, VOIDS) = pPM
self.modelSTLs = PSF.modelSTLs
self.profileShapes = PSF.profileShapes
for m in range(0, len(JOB.Model.Group)):
# Create OCL.stl model objects
if obj.Algorithm == "OCL Dropcutter":
PathSurfaceSupport._prepareModelSTLs(self, JOB, obj, m, ocl)
Mdl = JOB.Model.Group[m]
if FACES[m] is False:
Path.Log.error(
"No data for model base: {}".format(JOB.Model.Group[m].Label)
)
else:
if m > 0:
# Raise to clearance between models
CMDS.append(
Path.Command(
"N (Transition to base: {}.)".format(Mdl.Label)
)
)
CMDS.append(
Path.Command(
"G0",
{"Z": obj.ClearanceHeight.Value, "F": self.vertRapid},
)
)
Path.Log.info(
"Working on Model.Group[{}]: {}".format(m, Mdl.Label)
)
# make stock-model-voidShapes STL model for avoidance detection on transitions
if obj.Algorithm == "OCL Dropcutter":
PathSurfaceSupport._makeSafeSTL(
self, JOB, obj, m, FACES[m], VOIDS[m], ocl
)
# Process model/faces - OCL objects must be ready
CMDS.extend(
self._processWaterlineAreas(JOB, obj, m, FACES[m], VOIDS[m])
)
# Save gcode produced
self.commandlist.extend(CMDS)
# ###### CLOSING COMMANDS FOR OPERATION ######
# Delete temporary objects
# Restore model visibilities for restoration
if FreeCAD.GuiUp:
FreeCADGui.ActiveDocument.getObject(tempGroupName).Visibility = False
for m in range(0, len(JOB.Model.Group)):
M = JOB.Model.Group[m]
M.Visibility = modelVisibility[m]
if deleteTempsFlag is True:
for to in tempGroup.Group:
if hasattr(to, "Group"):
for go in to.Group:
FCAD.removeObject(go.Name)
FCAD.removeObject(to.Name)
FCAD.removeObject(tempGroupName)
else:
if len(tempGroup.Group) == 0:
FCAD.removeObject(tempGroupName)
else:
tempGroup.purgeTouched()
# Provide user feedback for gap sizes
gaps = list()
for g in self.gaps:
if g != self.toolDiam:
gaps.append(g)
if len(gaps) > 0:
obj.GapSizes = "{} mm".format(gaps)
else:
if self.closedGap is True:
obj.GapSizes = "Closed gaps < Gap Threshold."
else:
obj.GapSizes = "No gaps identified."
# clean up class variables
self.resetOpVariables()
self.deleteOpVariables()
self.modelSTLs = None
self.safeSTLs = None
self.modelTypes = None
self.boundBoxes = None
self.gaps = None
self.closedGap = None
self.SafeHeightOffset = None
self.ClearHeightOffset = None
self.depthParams = None
self.midDep = None
del self.modelSTLs
del self.safeSTLs
del self.modelTypes
del self.boundBoxes
del self.gaps
del self.closedGap
del self.SafeHeightOffset
del self.ClearHeightOffset
del self.depthParams
del self.midDep
execTime = time.time() - startTime
msg = translate("PathWaterline", "operation time is")
Path.Log.info("Waterline " + msg + " {} sec.".format(execTime))
return True
# Methods for constructing the cut area and creating path geometry
def _processWaterlineAreas(self, JOB, obj, mdlIdx, FCS, VDS):
"""_processWaterlineAreas(JOB, obj, mdlIdx, FCS, VDS)...
This method applies any avoided faces or regions to the selected faces.
It then calls the correct method."""
Path.Log.debug("_processWaterlineAreas()")
final = list()
# Process faces Collectively or Individually
if obj.HandleMultipleFeatures == "Collectively":
if FCS is True:
COMP = False
else:
ADD = Part.makeCompound(FCS)
if VDS is not False:
DEL = Part.makeCompound(VDS)
COMP = ADD.cut(DEL)
else:
COMP = ADD
final.append(
Path.Command("G0", {"Z": obj.SafeHeight.Value, "F": self.vertRapid})
)
if obj.Algorithm == "OCL Dropcutter":
final.extend(
self._oclWaterlineOp(JOB, obj, mdlIdx, COMP)
) # independent method set for Waterline
else:
final.extend(
self._experimentalWaterlineOp(JOB, obj, mdlIdx, COMP)
) # independent method set for Waterline
elif obj.HandleMultipleFeatures == "Individually":
for fsi in range(0, len(FCS)):
fShp = FCS[fsi]
# self.deleteOpVariables(all=False)
self.resetOpVariables(all=False)
if fShp is True:
COMP = False
else:
ADD = Part.makeCompound([fShp])
if VDS is not False:
DEL = Part.makeCompound(VDS)
COMP = ADD.cut(DEL)
else:
COMP = ADD
final.append(
Path.Command("G0", {"Z": obj.SafeHeight.Value, "F": self.vertRapid})
)
if obj.Algorithm == "OCL Dropcutter":
final.extend(
self._oclWaterlineOp(JOB, obj, mdlIdx, COMP)
) # independent method set for Waterline
else:
final.extend(
self._experimentalWaterlineOp(JOB, obj, mdlIdx, COMP)
) # independent method set for Waterline
COMP = None
# Eif
return final
def _getExperimentalWaterlinePaths(self, PNTSET, csHght, cutPattern):
"""_getExperimentalWaterlinePaths(PNTSET, csHght, cutPattern)...
Switching function for calling the appropriate path-geometry to OCL points conversion function
for the various cut patterns."""
Path.Log.debug("_getExperimentalWaterlinePaths()")
SCANS = list()
# PNTSET is list, by stepover.
if cutPattern in ["Line", "Spiral", "ZigZag"]:
stpOvr = list()
for STEP in PNTSET:
for SEG in STEP:
if SEG == "BRK":
stpOvr.append(SEG)
else:
(A, B) = SEG # format is ((p1, p2), (p3, p4))
P1 = FreeCAD.Vector(A[0], A[1], csHght)
P2 = FreeCAD.Vector(B[0], B[1], csHght)
stpOvr.append((P1, P2))
SCANS.append(stpOvr)
stpOvr = list()
elif cutPattern in ["Circular", "CircularZigZag"]:
# Each stepover is a list containing arc/loop descriptions, (sp, ep, cp)
for so in range(0, len(PNTSET)):
stpOvr = list()
erFlg = False
(aTyp, dirFlg, ARCS) = PNTSET[so]
if dirFlg == 1: # 1
cMode = True # Climb mode
else:
cMode = False
for a in range(0, len(ARCS)):
Arc = ARCS[a]
if Arc == "BRK":
stpOvr.append("BRK")
else:
(sp, ep, cp) = Arc
S = FreeCAD.Vector(sp[0], sp[1], csHght)
E = FreeCAD.Vector(ep[0], ep[1], csHght)
C = FreeCAD.Vector(cp[0], cp[1], csHght)
scan = (S, E, C, cMode)
if scan is False:
erFlg = True
else:
stpOvr.append(scan)
if erFlg is False:
SCANS.append(stpOvr)
return SCANS
# Main planar scan functions
def _stepTransitionCmds(self, obj, cutPattern, lstPnt, first, minSTH, tolrnc):
cmds = list()
rtpd = False
horizGC = "G0"
hSpeed = self.horizRapid
height = obj.SafeHeight.Value
if cutPattern in ["Line", "Circular", "Spiral"]:
if obj.OptimizeStepOverTransitions is True:
height = minSTH + 2.0
elif cutPattern in ["ZigZag", "CircularZigZag"]:
if obj.OptimizeStepOverTransitions is True:
zChng = first.z - lstPnt.z
if abs(zChng) < tolrnc: # transitions to same Z height
if (minSTH - first.z) > tolrnc:
height = minSTH + 2.0
else:
horizGC = "G1"
height = first.z
elif (minSTH + (2.0 * tolrnc)) >= max(first.z, lstPnt.z):
height = False # allow end of Zig to cut to beginning of Zag
# Create raise, shift, and optional lower commands
if height is not False:
cmds.append(Path.Command("G0", {"Z": height, "F": self.vertRapid}))
cmds.append(
Path.Command(horizGC, {"X": first.x, "Y": first.y, "F": hSpeed})
)
if rtpd is not False: # ReturnToPreviousDepth
cmds.append(Path.Command("G0", {"Z": rtpd, "F": self.vertRapid}))
return cmds
def _breakCmds(self, obj, cutPattern, lstPnt, first, minSTH, tolrnc):
cmds = list()
rtpd = False
horizGC = "G0"
hSpeed = self.horizRapid
height = obj.SafeHeight.Value
if cutPattern in ["Line", "Circular", "Spiral"]:
if obj.OptimizeStepOverTransitions is True:
height = minSTH + 2.0
elif cutPattern in ["ZigZag", "CircularZigZag"]:
if obj.OptimizeStepOverTransitions is True:
zChng = first.z - lstPnt.z
if abs(zChng) < tolrnc: # transitions to same Z height
if (minSTH - first.z) > tolrnc:
height = minSTH + 2.0
else:
height = first.z + 2.0 # first.z
cmds.append(Path.Command("G0", {"Z": height, "F": self.vertRapid}))
cmds.append(Path.Command(horizGC, {"X": first.x, "Y": first.y, "F": hSpeed}))
if rtpd is not False: # ReturnToPreviousDepth
cmds.append(Path.Command("G0", {"Z": rtpd, "F": self.vertRapid}))
return cmds
def _planarGetPDC(self, stl, finalDep, SampleInterval, cutter):
pdc = ocl.PathDropCutter() # create a pdc [PathDropCutter] object
pdc.setSTL(stl) # add stl model
pdc.setCutter(cutter) # add cutter
pdc.setZ(finalDep) # set minimumZ (final / target depth value)
pdc.setSampling(SampleInterval) # set sampling size
return pdc
# OCL Dropcutter waterline functions
def _oclWaterlineOp(self, JOB, obj, mdlIdx, subShp=None):
"""_oclWaterlineOp(obj, base) ... Main waterline function to perform waterline extraction from model."""
commands = []
base = JOB.Model.Group[mdlIdx]
bb = self.boundBoxes[mdlIdx]
stl = self.modelSTLs[mdlIdx]
depOfst = obj.DepthOffset.Value
# Prepare global holdpoint and layerEndPnt containers
if self.holdPoint is None:
self.holdPoint = FreeCAD.Vector(0.0, 0.0, 0.0)
if self.layerEndPnt is None:
self.layerEndPnt = FreeCAD.Vector(0.0, 0.0, 0.0)
# Set extra offset to diameter of cutter to allow cutter to move around perimeter of model
if subShp is None:
# Get correct boundbox
if obj.BoundBox == "Stock":
BS = JOB.Stock
bb = BS.Shape.BoundBox
elif obj.BoundBox == "BaseBoundBox":
BS = base
bb = base.Shape.BoundBox
xmin = bb.XMin
xmax = bb.XMax
ymin = bb.YMin
ymax = bb.YMax
else:
xmin = subShp.BoundBox.XMin
xmax = subShp.BoundBox.XMax
ymin = subShp.BoundBox.YMin
ymax = subShp.BoundBox.YMax
smplInt = obj.SampleInterval.Value
minSampInt = 0.001 # value is mm
if smplInt < minSampInt:
smplInt = minSampInt
# Determine bounding box length for the OCL scan
bbLength = math.fabs(ymax - ymin)
numScanLines = int(math.ceil(bbLength / smplInt) + 1) # Number of lines
# Compute number and size of stepdowns, and final depth
if obj.LayerMode == "Single-pass":
depthparams = [obj.FinalDepth.Value]
else:
depthparams = [dp for dp in self.depthParams]
lenDP = len(depthparams)
# Scan the piece to depth at smplInt
oclScan = []
oclScan = self._waterlineDropCutScan(
stl, smplInt, xmin, xmax, ymin, depthparams[lenDP - 1], numScanLines
)
oclScan = [FreeCAD.Vector(P.x, P.y, P.z + depOfst) for P in oclScan]
lenOS = len(oclScan)
ptPrLn = int(lenOS / numScanLines)
# Convert oclScan list of points to multi-dimensional list
scanLines = []
for L in range(0, numScanLines):
scanLines.append([])
for P in range(0, ptPrLn):
pi = L * ptPrLn + P
scanLines[L].append(oclScan[pi])
lenSL = len(scanLines)
pntsPerLine = len(scanLines[0])
msg = "--OCL scan: " + str(lenSL * pntsPerLine) + " points, with "
msg += str(numScanLines) + " lines and " + str(pntsPerLine) + " pts/line"
Path.Log.debug(msg)
# Extract Wl layers per depthparams
lyr = 0
cmds = []
layTime = time.time()
self.topoMap = []
for layDep in depthparams:
cmds = self._getWaterline(obj, scanLines, layDep, lyr, lenSL, pntsPerLine)
commands.extend(cmds)
lyr += 1
Path.Log.debug(
"--All layer scans combined took " + str(time.time() - layTime) + " s"
)
return commands
def _waterlineDropCutScan(self, stl, smplInt, xmin, xmax, ymin, fd, numScanLines):
"""_waterlineDropCutScan(stl, smplInt, xmin, xmax, ymin, fd, numScanLines) ...
Perform OCL scan for waterline purpose."""
pdc = ocl.PathDropCutter() # create a pdc
pdc.setSTL(stl)
pdc.setCutter(self.cutter)
pdc.setZ(fd) # set minimumZ (final / target depth value)
pdc.setSampling(smplInt)
# Create line object as path
path = ocl.Path() # create an empty path object
for nSL in range(0, numScanLines):
yVal = ymin + (nSL * smplInt)
p1 = ocl.Point(xmin, yVal, fd) # start-point of line
p2 = ocl.Point(xmax, yVal, fd) # end-point of line
path.append(ocl.Line(p1, p2))
# path.append(l) # add the line to the path
pdc.setPath(path)
pdc.run() # run drop-cutter on the path
# return the list of points
return pdc.getCLPoints()
def _getWaterline(self, obj, scanLines, layDep, lyr, lenSL, pntsPerLine):
"""_getWaterline(obj, scanLines, layDep, lyr, lenSL, pntsPerLine) ... Get waterline."""
commands = []
cmds = []
loopList = []
self.topoMap = []
# Create topo map from scanLines (highs and lows)
self.topoMap = self._createTopoMap(scanLines, layDep, lenSL, pntsPerLine)
# Add buffer lines and columns to topo map
self._bufferTopoMap(lenSL, pntsPerLine)
# Identify layer waterline from OCL scan
self._highlightWaterline(4, 9)
# Extract waterline and convert to gcode
loopList = self._extractWaterlines(obj, scanLines, lyr, layDep)
# save commands
for loop in loopList:
cmds = self._loopToGcode(obj, layDep, loop)
commands.extend(cmds)
return commands
def _createTopoMap(self, scanLines, layDep, lenSL, pntsPerLine):
"""_createTopoMap(scanLines, layDep, lenSL, pntsPerLine) ... Create topo map version of OCL scan data."""
topoMap = []
for L in range(0, lenSL):
topoMap.append([])
for P in range(0, pntsPerLine):
if scanLines[L][P].z > layDep:
topoMap[L].append(2)
else:
topoMap[L].append(0)
return topoMap
def _bufferTopoMap(self, lenSL, pntsPerLine):
"""_bufferTopoMap(lenSL, pntsPerLine) ... Add buffer boarder of zeros to all sides to topoMap data."""
pre = [0, 0]
post = [0, 0]
for p in range(0, pntsPerLine):
pre.append(0)
post.append(0)
for i in range(0, lenSL):
self.topoMap[i].insert(0, 0)
self.topoMap[i].append(0)
self.topoMap.insert(0, pre)
self.topoMap.append(post)
return True
def _highlightWaterline(self, extraMaterial, insCorn):
"""_highlightWaterline(extraMaterial, insCorn) ... Highlight the waterline data, separating from extra material."""
TM = self.topoMap
lastPnt = len(TM[1]) - 1
lastLn = len(TM) - 1
highFlag = 0
# ("--Convert parallel data to ridges")
for lin in range(1, lastLn):
for pt in range(1, lastPnt): # Ignore first and last points
if TM[lin][pt] == 0:
if TM[lin][pt + 1] == 2: # step up
TM[lin][pt] = 1
if TM[lin][pt - 1] == 2: # step down
TM[lin][pt] = 1
# ("--Convert perpendicular data to ridges and highlight ridges")
for pt in range(1, lastPnt): # Ignore first and last points
for lin in range(1, lastLn):
if TM[lin][pt] == 0:
highFlag = 0
if TM[lin + 1][pt] == 2: # step up
TM[lin][pt] = 1
if TM[lin - 1][pt] == 2: # step down
TM[lin][pt] = 1
elif TM[lin][pt] == 2:
highFlag += 1
if highFlag == 3:
if TM[lin - 1][pt - 1] < 2 or TM[lin - 1][pt + 1] < 2:
highFlag = 2
else:
TM[lin - 1][pt] = extraMaterial
highFlag = 2
# ("--Square corners")
for pt in range(1, lastPnt):
for lin in range(1, lastLn):
if TM[lin][pt] == 1: # point == 1
cont = True
if TM[lin + 1][pt] == 0: # forward == 0
if TM[lin + 1][pt - 1] == 1: # forward left == 1
if TM[lin][pt - 1] == 2: # left == 2
TM[lin + 1][pt] = 1 # square the corner
cont = False
if (
cont is True and TM[lin + 1][pt + 1] == 1
): # forward right == 1
if TM[lin][pt + 1] == 2: # right == 2
TM[lin + 1][pt] = 1 # square the corner
cont = True
if TM[lin - 1][pt] == 0: # back == 0
if TM[lin - 1][pt - 1] == 1: # back left == 1
if TM[lin][pt - 1] == 2: # left == 2
TM[lin - 1][pt] = 1 # square the corner
cont = False
if cont is True and TM[lin - 1][pt + 1] == 1: # back right == 1
if TM[lin][pt + 1] == 2: # right == 2
TM[lin - 1][pt] = 1 # square the corner
# remove inside corners
for pt in range(1, lastPnt):
for lin in range(1, lastLn):
if TM[lin][pt] == 1: # point == 1
if TM[lin][pt + 1] == 1:
if TM[lin - 1][pt + 1] == 1 or TM[lin + 1][pt + 1] == 1:
TM[lin][pt + 1] = insCorn
elif TM[lin][pt - 1] == 1:
if TM[lin - 1][pt - 1] == 1 or TM[lin + 1][pt - 1] == 1:
TM[lin][pt - 1] = insCorn
return True
def _extractWaterlines(self, obj, oclScan, lyr, layDep):
"""_extractWaterlines(obj, oclScan, lyr, layDep) ... Extract water lines from OCL scan data."""
srch = True
lastPnt = len(self.topoMap[0]) - 1
lastLn = len(self.topoMap) - 1
maxSrchs = 5
srchCnt = 1
loopList = []
loop = []
loopNum = 0
if self.CutClimb is True:
lC = [
-1,
-1,
-1,
0,
1,
1,
1,
0,
-1,
-1,
-1,
0,
1,
1,
1,
0,
-1,
-1,
-1,
0,
1,
1,
1,
0,
]
pC = [
-1,
0,
1,
1,
1,
0,
-1,
-1,
-1,
0,
1,
1,
1,
0,
-1,
-1,
-1,
0,
1,
1,
1,
0,
-1,
-1,
]
else:
lC = [
1,
1,
1,
0,
-1,
-1,
-1,
0,
1,
1,
1,
0,
-1,
-1,
-1,
0,
1,
1,
1,
0,
-1,
-1,
-1,
0,
]
pC = [
-1,
0,
1,
1,
1,
0,
-1,
-1,
-1,
0,
1,
1,
1,
0,
-1,
-1,
-1,
0,
1,
1,
1,
0,
-1,
-1,
]
while srch is True:
srch = False
if srchCnt > maxSrchs:
Path.Log.debug(
"Max search scans, "
+ str(maxSrchs)
+ " reached\nPossible incomplete waterline result!"
)
break
for L in range(1, lastLn):
for P in range(1, lastPnt):
if self.topoMap[L][P] == 1:
# start loop follow
srch = True
loopNum += 1
loop = self._trackLoop(oclScan, lC, pC, L, P, loopNum)
self.topoMap[L][P] = 0 # Mute the starting point
loopList.append(loop)
srchCnt += 1
Path.Log.debug(
"Search count for layer "
+ str(lyr)
+ " is "
+ str(srchCnt)
+ ", with "
+ str(loopNum)
+ " loops."
)
return loopList
def _trackLoop(self, oclScan, lC, pC, L, P, loopNum):
"""_trackLoop(oclScan, lC, pC, L, P, loopNum) ... Track the loop direction."""
loop = [oclScan[L - 1][P - 1]] # Start loop point list
cur = [L, P, 1]
prv = [L, P - 1, 1]
nxt = [L, P + 1, 1]
follow = True
ptc = 0
ptLmt = 200000
while follow is True:
ptc += 1
if ptc > ptLmt:
Path.Log.debug(
"Loop number "
+ str(loopNum)
+ " at ["
+ str(nxt[0])
+ ", "
+ str(nxt[1])
+ "] pnt count exceeds, "
+ str(ptLmt)
+ ". Stopped following loop."
)
break
nxt = self._findNextWlPoint(
lC, pC, cur[0], cur[1], prv[0], prv[1]
) # get next point
loop.append(oclScan[nxt[0] - 1][nxt[1] - 1]) # add it to loop point list
self.topoMap[nxt[0]][nxt[1]] = nxt[2] # Mute the point, if not Y stem
if nxt[0] == L and nxt[1] == P: # check if loop complete
follow = False
elif (
nxt[0] == cur[0] and nxt[1] == cur[1]
): # check if line cannot be detected
follow = False
prv = cur
cur = nxt
return loop
def _findNextWlPoint(self, lC, pC, cl, cp, pl, pp):
"""_findNextWlPoint(lC, pC, cl, cp, pl, pp) ...
Find the next waterline point in the point cloud layer provided."""
dl = cl - pl
dp = cp - pp
num = 0
i = 3
s = 0
mtch = 0
found = False
while mtch < 8: # check all 8 points around current point
if lC[i] == dl:
if pC[i] == dp:
s = i - 3
found = True
# Check for y branch where current point is connection between branches
for y in range(1, mtch):
if lC[i + y] == dl:
if pC[i + y] == dp:
num = 1
break
break
i += 1
mtch += 1
if found is False:
# ("_findNext: No start point found.")
return [cl, cp, num]
for r in range(0, 8):
l = cl + lC[s + r]
p = cp + pC[s + r]
if self.topoMap[l][p] == 1:
return [l, p, num]
# ("_findNext: No next pnt found")
return [cl, cp, num]
def _loopToGcode(self, obj, layDep, loop):
"""_loopToGcode(obj, layDep, loop) ... Convert set of loop points to Gcode."""
# generate the path commands
output = []
# prev = FreeCAD.Vector(2135984513.165, -58351896873.17455, 13838638431.861)
nxt = FreeCAD.Vector(0.0, 0.0, 0.0)
# Create first point
pnt = FreeCAD.Vector(loop[0].x, loop[0].y, layDep)
# Position cutter to begin loop
output.append(
Path.Command("G0", {"Z": obj.ClearanceHeight.Value, "F": self.vertRapid})
)
output.append(
Path.Command("G0", {"X": pnt.x, "Y": pnt.y, "F": self.horizRapid})
)
output.append(Path.Command("G1", {"Z": pnt.z, "F": self.vertFeed}))
lenCLP = len(loop)
lastIdx = lenCLP - 1
# Cycle through each point on loop
for i in range(0, lenCLP):
if i < lastIdx:
nxt.x = loop[i + 1].x
nxt.y = loop[i + 1].y
nxt.z = layDep
output.append(
Path.Command("G1", {"X": pnt.x, "Y": pnt.y, "F": self.horizFeed})
)
# Rotate point data
pnt = nxt
# Save layer end point for use in transitioning to next layer
self.layerEndPnt = pnt
return output
# Experimental waterline functions
def _experimentalWaterlineOp(self, JOB, obj, mdlIdx, subShp=None):
"""_waterlineOp(JOB, obj, mdlIdx, subShp=None) ...
Main waterline function to perform waterline extraction from model."""
Path.Log.debug("_experimentalWaterlineOp()")
commands = []
base = JOB.Model.Group[mdlIdx]
# safeSTL = self.safeSTLs[mdlIdx]
self.endVector = None
finDep = obj.FinalDepth.Value + (self.geoTlrnc / 10.0)
depthParams = PathUtils.depth_params(
obj.ClearanceHeight.Value,
obj.SafeHeight.Value,
obj.StartDepth.Value,
obj.StepDown.Value,
0.0,
finDep,
)
# Compute number and size of stepdowns, and final depth
if obj.LayerMode == "Single-pass":
depthparams = [finDep]
else:
depthparams = [dp for dp in depthParams]
Path.Log.debug("Experimental Waterline depthparams:\n{}".format(depthparams))
# Prepare PathDropCutter objects with STL data
# safePDC = self._planarGetPDC(safeSTL, depthparams[lenDP - 1], obj.SampleInterval.Value, self.cutter)
buffer = self.cutter.getDiameter() * 10.0
borderFace = Part.Face(
self._makeExtendedBoundBox(JOB.Stock.Shape.BoundBox, buffer, 0.0)
)
# Get correct boundbox
if obj.BoundBox == "Stock":
stockEnv = PathSurfaceSupport.getShapeEnvelope(JOB.Stock.Shape)
bbFace = PathSurfaceSupport.getCrossSection(stockEnv) # returned at Z=0.0
elif obj.BoundBox == "BaseBoundBox":
baseEnv = PathSurfaceSupport.getShapeEnvelope(base.Shape)
bbFace = PathSurfaceSupport.getCrossSection(baseEnv) # returned at Z=0.0
trimFace = borderFace.cut(bbFace)
self.showDebugObject(trimFace, "TrimFace")
# Cycle through layer depths
CUTAREAS = self._getCutAreas(
base.Shape, depthparams, bbFace, trimFace, borderFace
)
if not CUTAREAS:
Path.Log.error("No cross-section cut areas identified.")
return commands
caCnt = 0
ofst = obj.BoundaryAdjustment.Value
ofst -= self.radius # (self.radius + (tolrnc / 10.0))
caLen = len(CUTAREAS)
lastCA = caLen - 1
lastClearArea = None
lastCsHght = None
clearLastLayer = True
for ca in range(0, caLen):
area = CUTAREAS[ca]
csHght = area.BoundBox.ZMin
csHght += obj.DepthOffset.Value
cont = False
caCnt += 1
if area.Area > 0.0:
cont = True
self.showDebugObject(area, "CutArea_{}".format(caCnt))
else:
data = FreeCAD.Units.Quantity(csHght, FreeCAD.Units.Length).UserString
Path.Log.debug("Cut area at {} is zero.".format(data))
# get offset wire(s) based upon cross-section cut area
if cont:
area.translate(FreeCAD.Vector(0.0, 0.0, 0.0 - area.BoundBox.ZMin))
activeArea = area.cut(trimFace)
self.showDebugObject(activeArea, "ActiveArea_{}".format(caCnt))
ofstArea = PathUtils.getOffsetArea(activeArea, ofst, self.wpc)
if not ofstArea:
data = FreeCAD.Units.Quantity(
csHght, FreeCAD.Units.Length
).UserString
Path.Log.debug(
"No offset area returned for cut area depth at {}.".format(data)
)
cont = False
if cont:
# Identify solid areas in the offset data
if obj.CutPattern == "Offset" or obj.CutPattern == "None":
ofstSolidFacesList = self._getSolidAreasFromPlanarFaces(ofstArea)
if ofstSolidFacesList:
clearArea = Part.makeCompound(ofstSolidFacesList)
self.showDebugObject(clearArea, "ClearArea_{}".format(caCnt))
else:
cont = False
data = FreeCAD.Units.Quantity(
csHght, FreeCAD.Units.Length
).UserString
Path.Log.error(
"Could not determine solid faces at {}.".format(data)
)
else:
clearArea = activeArea
if cont:
data = FreeCAD.Units.Quantity(csHght, FreeCAD.Units.Length).UserString
Path.Log.debug("... Clearning area at {}.".format(data))
# Make waterline path for current CUTAREA depth (csHght)
commands.extend(self._wiresToWaterlinePath(obj, clearArea, csHght))
clearArea.translate(
FreeCAD.Vector(0.0, 0.0, 0.0 - clearArea.BoundBox.ZMin)
)
lastClearArea = clearArea
lastCsHght = csHght
# Clear layer as needed
(clrLyr, clearLastLayer) = self._clearLayer(
obj, ca, lastCA, clearLastLayer
)
if clrLyr == "Offset":
commands.extend(self._makeOffsetLayerPaths(obj, clearArea, csHght))
elif clrLyr:
cutPattern = obj.CutPattern
if clearLastLayer is False:
cutPattern = obj.ClearLastLayer
commands.extend(
self._makeCutPatternLayerPaths(
JOB, obj, clearArea, csHght, cutPattern
)
)
# Efor
if clearLastLayer and obj.ClearLastLayer != "Off":
Path.Log.debug("... Clearning last layer")
(clrLyr, cLL) = self._clearLayer(obj, 1, 1, False)
lastClearArea.translate(
FreeCAD.Vector(0.0, 0.0, 0.0 - lastClearArea.BoundBox.ZMin)
)
if clrLyr == "Offset":
commands.extend(
self._makeOffsetLayerPaths(obj, lastClearArea, lastCsHght)
)
elif clrLyr:
commands.extend(
self._makeCutPatternLayerPaths(
JOB, obj, lastClearArea, lastCsHght, obj.ClearLastLayer
)
)
return commands
def _getCutAreas(self, shape, depthparams, bbFace, trimFace, borderFace):
"""_getCutAreas(JOB, shape, depthparams, bbFace, borderFace) ...
Takes shape, depthparams and base-envelope-cross-section, and
returns a list of cut areas - one for each depth."""
Path.Log.debug("_getCutAreas()")
CUTAREAS = list()
isFirst = True
lenDP = len(depthparams)
# Cycle through layer depths
for dp in range(0, lenDP):
csHght = depthparams[dp]
# Path.Log.debug('Depth {} is {}'.format(dp + 1, csHght))
# Get slice at depth of shape
csFaces = self._getModelCrossSection(shape, csHght) # returned at Z=0.0
if csFaces:
if len(csFaces) > 0:
useFaces = self._getSolidAreasFromPlanarFaces(csFaces)
else:
useFaces = False
if useFaces:
compAdjFaces = Part.makeCompound(useFaces)
self.showDebugObject(compAdjFaces, "Solids_{}".format(dp + 1))
if isFirst:
allPrevComp = compAdjFaces
cutArea = borderFace.cut(compAdjFaces)
else:
preCutArea = borderFace.cut(compAdjFaces)
cutArea = preCutArea.cut(
allPrevComp
) # cut out higher layers to avoid cutting recessed areas
allPrevComp = allPrevComp.fuse(compAdjFaces)
cutArea.translate(
FreeCAD.Vector(0.0, 0.0, csHght - cutArea.BoundBox.ZMin)
)
CUTAREAS.append(cutArea)
isFirst = False
else:
Path.Log.error("No waterline at depth: {} mm.".format(csHght))
# Efor
if len(CUTAREAS) > 0:
return CUTAREAS
return False
def _wiresToWaterlinePath(self, obj, ofstPlnrShp, csHght):
Path.Log.debug("_wiresToWaterlinePath()")
commands = list()
# Translate path geometry to layer height
ofstPlnrShp.translate(
FreeCAD.Vector(0.0, 0.0, csHght - ofstPlnrShp.BoundBox.ZMin)
)
self.showDebugObject(
ofstPlnrShp, "WaterlinePathArea_{}".format(round(csHght, 2))
)
commands.append(Path.Command("N (Cut Area {}.)".format(round(csHght, 2))))
start = 1
if csHght < obj.IgnoreOuterAbove:
start = 0
for w in range(start, len(ofstPlnrShp.Wires)):
wire = ofstPlnrShp.Wires[w]
V = wire.Vertexes
if obj.CutMode == "Climb":
lv = len(V) - 1
startVect = FreeCAD.Vector(V[lv].X, V[lv].Y, V[lv].Z)
else:
startVect = FreeCAD.Vector(V[0].X, V[0].Y, V[0].Z)
commands.append(Path.Command("N (Wire {}.)".format(w)))
(cmds, endVect) = self._wireToPath(obj, wire, startVect)
commands.extend(cmds)
commands.append(
Path.Command("G0", {"Z": obj.SafeHeight.Value, "F": self.vertRapid})
)
return commands
def _makeCutPatternLayerPaths(self, JOB, obj, clrAreaShp, csHght, cutPattern):
Path.Log.debug("_makeCutPatternLayerPaths()")
commands = []
clrAreaShp.translate(FreeCAD.Vector(0.0, 0.0, 0.0 - clrAreaShp.BoundBox.ZMin))
# Convert pathGeom to gcode more efficiently
if cutPattern == "Offset":
commands.extend(self._makeOffsetLayerPaths(obj, clrAreaShp, csHght))
else:
# Request path geometry from external support class
PGG = PathSurfaceSupport.PathGeometryGenerator(obj, clrAreaShp, cutPattern)
if self.showDebugObjects:
PGG.setDebugObjectsGroup(self.tempGroup)
self.tmpCOM = PGG.getCenterOfPattern()
pathGeom = PGG.generatePathGeometry()
if not pathGeom:
Path.Log.warning("No path geometry generated.")
return commands
pathGeom.translate(
FreeCAD.Vector(0.0, 0.0, csHght - pathGeom.BoundBox.ZMin)
)
self.showDebugObject(pathGeom, "PathGeom_{}".format(round(csHght, 2)))
if cutPattern == "Line":
# pntSet = PathSurfaceSupport.pathGeomToLinesPointSet(obj, pathGeom, self.CutClimb, self.toolDiam, self.closedGap, self.gaps)
pntSet = PathSurfaceSupport.pathGeomToLinesPointSet(self, obj, pathGeom)
elif cutPattern == "ZigZag":
# pntSet = PathSurfaceSupport.pathGeomToZigzagPointSet(obj, pathGeom, self.CutClimb, self.toolDiam, self.closedGap, self.gaps)
pntSet = PathSurfaceSupport.pathGeomToZigzagPointSet(
self, obj, pathGeom
)
elif cutPattern in ["Circular", "CircularZigZag"]:
# pntSet = PathSurfaceSupport.pathGeomToCircularPointSet(obj, pathGeom, self.CutClimb, self.toolDiam, self.closedGap, self.gaps, self.tmpCOM)
pntSet = PathSurfaceSupport.pathGeomToCircularPointSet(
self, obj, pathGeom
)
elif cutPattern == "Spiral":
pntSet = PathSurfaceSupport.pathGeomToSpiralPointSet(obj, pathGeom)
stpOVRS = self._getExperimentalWaterlinePaths(pntSet, csHght, cutPattern)
safePDC = False
cmds = self._clearGeomToPaths(JOB, obj, safePDC, stpOVRS, cutPattern)
commands.extend(cmds)
return commands
def _makeOffsetLayerPaths(self, obj, clrAreaShp, csHght):
Path.Log.debug("_makeOffsetLayerPaths()")
cmds = list()
ofst = 0.0 - self.cutOut
shape = clrAreaShp
cont = True
cnt = 0
while cont:
ofstArea = PathUtils.getOffsetArea(shape, ofst, self.wpc)
if not ofstArea:
break
for F in ofstArea.Faces:
cmds.extend(self._wiresToWaterlinePath(obj, F, csHght))
shape = ofstArea
if cnt == 0:
ofst = 0.0 - self.cutOut
cnt += 1
Path.Log.debug(
" -Offset path count: {} at height: {}".format(cnt, round(csHght, 2))
)
return cmds
def _clearGeomToPaths(self, JOB, obj, safePDC, stpOVRS, cutPattern):
Path.Log.debug("_clearGeomToPaths()")
GCODE = [Path.Command("N (Beginning of Single-pass layer.)", {})]
tolrnc = JOB.GeometryTolerance.Value
lenstpOVRS = len(stpOVRS)
# lstSO = lenstpOVRS - 1
# lstStpOvr = False
gDIR = ["G3", "G2"]
if self.CutClimb is True:
gDIR = ["G2", "G3"]
# Send cutter to x,y position of first point on first line
first = stpOVRS[0][0][0] # [step][item][point]
GCODE.append(
Path.Command("G0", {"X": first.x, "Y": first.y, "F": self.horizRapid})
)
# Cycle through step-over sections (line segments or arcs)
odd = True
lstStpEnd = None
for so in range(0, lenstpOVRS):
cmds = list()
PRTS = stpOVRS[so]
lenPRTS = len(PRTS)
first = PRTS[0][0] # first point of arc/line stepover group
last = None
cmds.append(Path.Command("N (Begin step {}.)".format(so), {}))
if so > 0:
if cutPattern == "CircularZigZag":
if odd:
odd = False
else:
odd = True
# minTrnsHght = self._getMinSafeTravelHeight(safePDC, lstStpEnd, first) # Check safe travel height against fullSTL
minTrnsHght = obj.SafeHeight.Value
# cmds.append(Path.Command('N (Transition: last, first: {}, {}: minSTH: {})'.format(lstStpEnd, first, minTrnsHght), {}))
cmds.extend(
self._stepTransitionCmds(
obj, cutPattern, lstStpEnd, first, minTrnsHght, tolrnc
)
)
# Cycle through current step-over parts
for i in range(0, lenPRTS):
prt = PRTS[i]
# Path.Log.debug('prt: {}'.format(prt))
if prt == "BRK":
nxtStart = PRTS[i + 1][0]
# minSTH = self._getMinSafeTravelHeight(safePDC, last, nxtStart) # Check safe travel height against fullSTL
minSTH = obj.SafeHeight.Value
cmds.append(Path.Command("N (Break)", {}))
cmds.extend(
self._breakCmds(obj, cutPattern, last, nxtStart, minSTH, tolrnc)
)
else:
cmds.append(Path.Command("N (part {}.)".format(i + 1), {}))
if cutPattern in ["Line", "ZigZag", "Spiral"]:
start, last = prt
cmds.append(
Path.Command(
"G1",
{
"X": start.x,
"Y": start.y,
"Z": start.z,
"F": self.horizFeed,
},
)
)
cmds.append(
Path.Command(
"G1", {"X": last.x, "Y": last.y, "F": self.horizFeed}
)
)
elif cutPattern in ["Circular", "CircularZigZag"]:
# isCircle = True if lenPRTS == 1 else False
isZigZag = True if cutPattern == "CircularZigZag" else False
Path.Log.debug(
"so, isZigZag, odd, cMode: {}, {}, {}, {}".format(
so, isZigZag, odd, prt[3]
)
)
gcode = self._makeGcodeArc(prt, gDIR, odd, isZigZag)
cmds.extend(gcode)
cmds.append(Path.Command("N (End of step {}.)".format(so), {}))
GCODE.extend(cmds) # save line commands
lstStpEnd = last
# Efor
# Raise to safe height after clearing
GCODE.append(
Path.Command("G0", {"Z": obj.SafeHeight.Value, "F": self.vertRapid})
)
return GCODE
def _getSolidAreasFromPlanarFaces(self, csFaces):
Path.Log.debug("_getSolidAreasFromPlanarFaces()")
holds = list()
useFaces = list()
lenCsF = len(csFaces)
Path.Log.debug("lenCsF: {}".format(lenCsF))
if lenCsF == 1:
useFaces = csFaces
else:
fIds = list()
aIds = list()
pIds = list()
cIds = list()
for af in range(0, lenCsF):
fIds.append(af) # face ids
aIds.append(af) # face ids
pIds.append(-1) # parent ids
cIds.append(False) # cut ids
holds.append(False)
while len(fIds) > 0:
li = fIds.pop()
low = csFaces[li] # senior face
pIds = self._idInternalFeature(csFaces, fIds, pIds, li, low)
for af in range(lenCsF - 1, -1, -1): # cycle from last item toward first
prnt = pIds[af]
if prnt == -1:
stack = -1
else:
stack = [af]
# get_face_ids_to_parent
stack.insert(0, prnt)
nxtPrnt = pIds[prnt]
# find af value for nxtPrnt
while nxtPrnt != -1:
stack.insert(0, nxtPrnt)
nxtPrnt = pIds[nxtPrnt]
cIds[af] = stack
for af in range(0, lenCsF):
pFc = cIds[af]
if pFc == -1:
# Simple, independent region
holds[af] = csFaces[af] # place face in hold
else:
# Compound region
cnt = len(pFc)
if cnt % 2.0 == 0.0:
# even is donut cut
inr = pFc[cnt - 1]
otr = pFc[cnt - 2]
holds[otr] = holds[otr].cut(csFaces[inr])
else:
# odd is floating solid
holds[af] = csFaces[af]
for af in range(0, lenCsF):
if holds[af]:
useFaces.append(holds[af]) # save independent solid
# Eif
if len(useFaces) > 0:
return useFaces
return False
def _getModelCrossSection(self, shape, csHght):
Path.Log.debug("_getModelCrossSection()")
wires = list()
def byArea(fc):
return fc.Area
for i in shape.slice(FreeCAD.Vector(0, 0, 1), csHght):
wires.append(i)
if len(wires) > 0:
for w in wires:
if w.isClosed() is False:
return False
FCS = list()
for w in wires:
w.translate(FreeCAD.Vector(0.0, 0.0, 0.0 - w.BoundBox.ZMin))
FCS.append(Part.Face(w))
FCS.sort(key=byArea, reverse=True)
return FCS
else:
Path.Log.debug(" -No wires from .slice() method")
return False
def _isInBoundBox(self, outShp, inShp):
obb = outShp.BoundBox
ibb = inShp.BoundBox
if obb.XMin < ibb.XMin:
if obb.XMax > ibb.XMax:
if obb.YMin < ibb.YMin:
if obb.YMax > ibb.YMax:
return True
return False
def _idInternalFeature(self, csFaces, fIds, pIds, li, low):
Ids = list()
for i in fIds:
Ids.append(i)
while len(Ids) > 0:
hi = Ids.pop()
high = csFaces[hi]
if self._isInBoundBox(high, low):
cmn = high.common(low)
if cmn.Area > 0.0:
pIds[li] = hi
break
return pIds
def _wireToPath(self, obj, wire, startVect):
"""_wireToPath(obj, wire, startVect) ... wire to path."""
Path.Log.track()
paths = []
pathParams = {}
pathParams["shapes"] = [wire]
pathParams["feedrate"] = self.horizFeed
pathParams["feedrate_v"] = self.vertFeed
pathParams["verbose"] = True
pathParams["resume_height"] = obj.SafeHeight.Value
pathParams["retraction"] = obj.ClearanceHeight.Value
pathParams["return_end"] = True
# Note that emitting preambles between moves breaks some dressups and prevents path optimization on some controllers
pathParams["preamble"] = False
pathParams["start"] = startVect
(pp, end_vector) = Path.fromShapes(**pathParams)
paths.extend(pp.Commands)
self.endVector = end_vector
return (paths, end_vector)
def _makeExtendedBoundBox(self, wBB, bbBfr, zDep):
pl = FreeCAD.Placement()
pl.Rotation = FreeCAD.Rotation(FreeCAD.Vector(0, 0, 1), 0)
pl.Base = FreeCAD.Vector(0, 0, 0)
p1 = FreeCAD.Vector(wBB.XMin - bbBfr, wBB.YMin - bbBfr, zDep)
p2 = FreeCAD.Vector(wBB.XMax + bbBfr, wBB.YMin - bbBfr, zDep)
p3 = FreeCAD.Vector(wBB.XMax + bbBfr, wBB.YMax + bbBfr, zDep)
p4 = FreeCAD.Vector(wBB.XMin - bbBfr, wBB.YMax + bbBfr, zDep)
bb = Part.makePolygon([p1, p2, p3, p4, p1])
return bb
def _makeGcodeArc(self, prt, gDIR, odd, isZigZag):
cmds = list()
strtPnt, endPnt, cntrPnt, cMode = prt
gdi = 0
if odd:
gdi = 1
else:
if not cMode and isZigZag:
gdi = 1
gCmd = gDIR[gdi]
# ijk = self.tmpCOM - strtPnt
# ijk = self.tmpCOM.sub(strtPnt) # vector from start to center
ijk = cntrPnt.sub(strtPnt) # vector from start to center
xyz = endPnt
cmds.append(
Path.Command(
"G1",
{"X": strtPnt.x, "Y": strtPnt.y, "Z": strtPnt.z, "F": self.horizFeed},
)
)
cmds.append(
Path.Command(
gCmd,
{
"X": xyz.x,
"Y": xyz.y,
"Z": xyz.z,
"I": ijk.x,
"J": ijk.y,
"K": ijk.z, # leave same xyz.z height
"F": self.horizFeed,
},
)
)
cmds.append(
Path.Command(
"G1", {"X": endPnt.x, "Y": endPnt.y, "Z": endPnt.z, "F": self.horizFeed}
)
)
return cmds
def _clearLayer(self, obj, ca, lastCA, clearLastLayer):
Path.Log.debug("_clearLayer()")
clrLyr = False
if obj.ClearLastLayer == "Off":
if obj.CutPattern != "None":
clrLyr = obj.CutPattern
else:
obj.CutPattern = "None"
if ca == lastCA: # if current iteration is last layer
Path.Log.debug("... Clearing bottom layer.")
clrLyr = obj.ClearLastLayer
clearLastLayer = False
return (clrLyr, clearLastLayer)
# Support methods
def resetOpVariables(self, all=True):
"""resetOpVariables() ... Reset class variables used for instance of operation."""
self.holdPoint = None
self.layerEndPnt = None
self.onHold = False
self.SafeHeightOffset = 2.0
self.ClearHeightOffset = 4.0
self.layerEndzMax = 0.0
self.resetTolerance = 0.0
self.holdPntCnt = 0
self.bbRadius = 0.0
self.axialFeed = 0.0
self.axialRapid = 0.0
self.FinalDepth = 0.0
self.clearHeight = 0.0
self.safeHeight = 0.0
self.faceZMax = -999999999999.0
if all is True:
self.cutter = None
self.stl = None
self.fullSTL = None
self.cutOut = 0.0
self.useTiltCutter = False
return True
def deleteOpVariables(self, all=True):
"""deleteOpVariables() ... Reset class variables used for instance of operation."""
del self.holdPoint
del self.layerEndPnt
del self.onHold
del self.SafeHeightOffset
del self.ClearHeightOffset
del self.layerEndzMax
del self.resetTolerance
del self.holdPntCnt
del self.bbRadius
del self.axialFeed
del self.axialRapid
del self.FinalDepth
del self.clearHeight
del self.safeHeight
del self.faceZMax
if all is True:
del self.cutter
del self.stl
del self.fullSTL
del self.cutOut
del self.radius
del self.useTiltCutter
return True
def showDebugObject(self, objShape, objName):
if self.showDebugObjects:
do = FreeCAD.ActiveDocument.addObject("Part::Feature", "tmp_" + objName)
do.Shape = objShape
do.purgeTouched()
self.tempGroup.addObject(do)
def SetupProperties():
"""SetupProperties() ... Return list of properties required for operation."""
return [tup[1] for tup in ObjectWaterline.opPropertyDefinitions(False)]
def Create(name, obj=None, parentJob=None):
"""Create(name) ... Creates and returns a Waterline operation."""
if obj is None:
obj = FreeCAD.ActiveDocument.addObject("Path::FeaturePython", name)
obj.Proxy = ObjectWaterline(obj, name, parentJob)
return obj
|
s3 | domain | import logging
import types
from boto.route53.connection import Route53Connection
from boto.route53.record import ResourceRecordSets
from boto.s3.connection import S3Connection
from boto.s3.website import RedirectLocation
# Generic steps for custom domains can be found here
# http://docs.aws.amazon.com/AmazonS3/latest/dev/website-hosting-custom-domain-walkthrough.html
# Hosted zone tables
# https://forums.aws.amazon.com/thread.jspa?threadID=116724
# http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
# Amazon, you owe me four hours of my life...
HOSTED_ZONES = {
# US Standard
"s3-website-us-east-1.amazonaws.com": "Z3AQBSTGFYJSTF",
# US West (Oregon) Region
"s3-website-us-west-2.amazonaws.com": "Z3BJ6K6RIION7M",
# US West (Northern California) Region
"s3-website-us-west-1.amazonaws.com": "Z2F56UZL2M1ACD",
# EU (Ireland) Region
"s3-website-eu-west-1.amazonaws.com": "Z1BKCTXD74EZPE",
# Asia Pacific (Singapore) Region
"s3-website-ap-southeast-1.amazonaws.com": "Z3O0J2DXBE1FTB",
# Asia Pacific (Sydney) Region
"s3-website-ap-southeast-2.amazonaws.com": "Z1WCIGYICN2BYD",
# Asia Pacific (Tokyo) Region
"s3-website-ap-northeast-1.amazonaws.com": "Z2M4EHUR26P7ZW",
# South America (Sao Paulo) Region
"s3-website-sa-east-1.amazonaws.com": "Z7KQH4QJS55SO",
# AWS GovCloud (US)
"s3-website-us-gov-west-1.amazonaws.com": "Z31GFT0UA1I2HV",
}
class AWSBucket(object):
def __init__(self, accessKey, secretKey, name):
self.name = name
self.accessKey = accessKey
self.secretKey = secretKey
self.connection = S3Connection(
aws_access_key_id=accessKey,
aws_secret_access_key=secretKey,
)
self._cache = {}
def bucket(self):
try:
return self.connection.get_bucket(self.name)
except:
return None
def create(self):
logging.info("Create bucket %s", self.name)
self.connection.create_bucket(self.name, policy="public-read")
def isCreated(self):
return self.bucket() is not None
def configureWebsite(self):
logging.info("Configuring website endpoint %s", self.name)
self.bucket().configure_website("index.html", "error.html")
def configureRedirect(self, url):
logging.info("Setup redirect %s -> %s", self.name, url)
self.bucket().configure_website(
redirect_all_requests_to=RedirectLocation(hostname=url)
)
def websiteEndpoint(self):
return self.bucket().get_website_endpoint()
class AWSDomain(object):
def __init__(self, accessKey, secretKey, domain):
self.domain = domain
self.accessKey = accessKey
self.secretKey = secretKey
self.connection = Route53Connection(
aws_access_key_id=accessKey,
aws_secret_access_key=secretKey,
)
self._cache = {}
@property
def id(self):
return self.hostedZone()["HostedZone"]["Id"].replace("/hostedzone/", "")
@property
def fullDomain(self):
parts = self.domain.split(".")
return parts[len(parts) - 2] + "." + parts[len(parts) - 1]
@property
def dnsDomain(self):
return self.domain + "."
def isValidDomain(self):
pass
def isNakedDomain(self):
pass
def records(self):
pass
def createHostedZone(self):
logging.info("Creating hosted zone for %s", self.fullDomain)
self.connection.create_hosted_zone(self.fullDomain)
def hostedZone(self):
if not "hostedZone" in self._cache:
hostedZone = self.connection.get_hosted_zone_by_name(self.fullDomain)
if not hostedZone:
return
self._cache["hostedZone"] = hostedZone.get("GetHostedZoneResponse", None)
return self._cache["hostedZone"]
def nameServers(self):
hostedZone = self.hostedZone()
if hostedZone:
return self.hostedZone()["DelegationSet"]["NameServers"]
else:
return None
def records(self):
return self.connection.get_all_rrsets(self.id)
def createRecord(self, name, recordType, values, ttl=60 * 60 * 3):
self._changeRecord("CREATE", name, recordType, values, ttl)
def deleteRecord(self, name, recordType, values, ttl=60 * 60 * 3):
self._changeRecord("DELETE", name, recordType, values, ttl)
def _changeRecord(self, change, name, recordType, values, ttl):
logging.info(
"%s record %s:%s in zone %s", change, name, recordType, self.domain
)
if type(values) is not types.ListType:
values = [values]
changes = ResourceRecordSets(self.connection, self.id)
change = changes.add_change(change, name, recordType, ttl)
for value in values:
change.add_value(value)
changes.commit()
def createAlias(
self,
name,
recordType,
aliasHostedZoneId,
aliasDNSName,
identifier=None,
weight=None,
comment="",
):
self._changeAlias(
"CREATE",
name,
recordType,
aliasHostedZoneId,
aliasDNSName,
identifier,
weight,
comment,
)
def deleteAlias(
self,
name,
recordType,
aliasHostedZoneId,
aliasDNSName,
identifier=None,
weight=None,
comment="",
):
self._changeAlias(
"DELETE",
name,
recordType,
aliasHostedZoneId,
aliasDNSName,
identifier,
weight,
comment,
)
def _changeAlias(
self,
change,
name,
recordType,
aliasHostedZoneId,
aliasDNSName,
identifier,
weight,
comment,
):
logging.info("%s alias %s:%s in zone %s", change, name, recordType, self.domain)
changes = ResourceRecordSets(self.connection, self.id, comment)
change = changes.add_change(
change, name, recordType, identifier=identifier, weight=weight
)
change.set_alias(aliasHostedZoneId, aliasDNSName)
changes.commit()
def delete(self, record):
if record.alias_dns_name:
self.deleteAlias(
record.name,
record.type,
record.alias_hosted_zone_id,
record.alias_dns_name,
identifier=record.identifier,
weight=record.weight,
)
else:
self.deleteRecord(
record.name, record.type, record.resource_records, ttl=record.ttl
)
def pointRootToBucket(self):
# Make sure the correct bucket exists and is ours
bucket = AWSBucket(self.accessKey, self.secretKey, self.domain)
endpoint = bucket.websiteEndpoint()
endpointDomain = endpoint[len(self.dnsDomain) :]
# Remove old A record for the root domain
for record in self.records():
if record.type in ["A", "CNAME"] and record.name == self.dnsDomain:
self.delete(record)
# Create new root domain record that points to the bucket
self.createAlias(
self.dnsDomain, "A", HOSTED_ZONES[endpointDomain], endpointDomain
)
def setupRedirect(self):
redirectDomain = "www.%s" % self.domain
redirectDNSDomain = redirectDomain + "."
bucket = AWSBucket(self.accessKey, self.secretKey, redirectDomain)
if bucket.isCreated():
logging.info(
"Bucket with name %s already exists, so skipping redirect bucket setup. \
If you've set this up before, this will still work. Delete the bucket if you want Cactus to\
set it up again.",
redirectDomain,
)
return
bucket.create()
bucket.configureRedirect(self.domain)
for record in self.records():
if record.type == "CNAME" and record.name == redirectDNSDomain:
self.delete(record)
self.createRecord(redirectDomain, "CNAME", [self.domain])
def setup(self):
if not self.hostedZone():
self.createHostedZone()
self.pointRootToBucket()
if not self.domain.startswith("www."):
self.setupRedirect()
|
builtinMarketBrowser | marketTree | import wx
from gui.builtinMarketBrowser.events import RECENTLY_USED_MODULES
from gui.cachingImageList import CachingImageList
from logbook import Logger
pyfalog = Logger(__name__)
_t = wx.GetTranslation
class MarketTree(wx.TreeCtrl):
def __init__(self, parent, marketBrowser):
wx.TreeCtrl.__init__(self, parent, style=wx.TR_DEFAULT_STYLE | wx.TR_HIDE_ROOT)
pyfalog.debug("Initialize marketTree")
self.root = self.AddRoot("root")
self.imageList = CachingImageList(16, 16)
self.SetImageList(self.imageList)
self.sMkt = marketBrowser.sMkt
self.marketBrowser = marketBrowser
# Form market tree root
sMkt = self.sMkt
for mktGrp in sMkt.getMarketRoot():
iconId = self.addImage(sMkt.getIconByMarketGroup(mktGrp))
childId = self.AppendItem(self.root, mktGrp.name, iconId, data=mktGrp.ID)
# All market groups which were never expanded are dummies, here we assume
# that all root market groups are expandable
self.AppendItem(childId, "dummy")
self.SortChildren(self.root)
# Add recently used modules node
rumIconId = self.addImage("market_small", "gui")
self.AppendItem(
self.root, _t("Recently Used Items"), rumIconId, data=RECENTLY_USED_MODULES
)
# Bind our lookup method to when the tree gets expanded
self.Bind(wx.EVT_TREE_ITEM_EXPANDING, self.expandLookup)
self.Bind(wx.EVT_TREE_ITEM_COLLAPSED, self.OnCollapsed)
def addImage(self, iconFile, location="icons"):
if iconFile is None:
return -1
return self.imageList.GetImageIndex(iconFile, location)
def expandLookup(self, event):
"""Process market tree expands"""
root = event.Item
child = self.GetFirstChild(root)[0]
# If child of given market group is a dummy
if self.GetItemText(child) == "dummy":
# Delete it
self.Delete(child)
# And add real market group contents
sMkt = self.sMkt
currentMktGrp = sMkt.getMarketGroup(
self.GetItemData(root), eager="children"
)
for childMktGrp in sMkt.getMarketGroupChildren(currentMktGrp):
# If market should have items but it doesn't, do not show it
if sMkt.marketGroupValidityCheck(childMktGrp) is False:
continue
icon = sMkt.getIconByMarketGroup(childMktGrp)
iconId = -1 if icon is None else self.addImage(icon)
try:
childId = self.AppendItem(
root, childMktGrp.name, iconId, data=childMktGrp.ID
)
except (KeyboardInterrupt, SystemExit):
raise
except Exception as e:
pyfalog.debug("Error appending item.")
pyfalog.debug(e)
continue
if sMkt.marketGroupHasTypesCheck(childMktGrp) is False:
self.AppendItem(childId, "dummy")
self.SortChildren(root)
def OnCollapsed(self, event):
self.CollapseAllChildren(event.Item)
event.Skip()
def jump(self, item):
"""Open market group and meta tab of given item"""
sMkt = self.sMkt
mg = sMkt.getMarketGroupByItem(item)
jumpList = []
while mg is not None:
jumpList.append(mg.ID)
mg = mg.parent
for id in sMkt.ROOT_MARKET_GROUPS:
if id in jumpList:
jumpList = jumpList[: jumpList.index(id) + 1]
item = self.root
for i in range(len(jumpList) - 1, -1, -1):
target = jumpList[i]
child, cookie = self.GetFirstChild(item)
while self.GetItemData(child) != target:
child, cookie = self.GetNextChild(item, cookie)
item = child
self.Expand(item)
self.SelectItem(item)
|
Mesh | Init | # FreeCAD init script of the Mesh module
# (c) 2004 Werner Mayer LGPL
import FreeCAD
# Append the open handler
FreeCAD.addImportType("STL Mesh (*.stl *.ast)", "Mesh")
FreeCAD.addImportType("Binary Mesh (*.bms)", "Mesh")
FreeCAD.addImportType("Alias Mesh (*.obj)", "Mesh")
FreeCAD.addImportType("Object File Format Mesh (*.off)", "Mesh")
FreeCAD.addImportType("Stanford Triangle Mesh (*.ply)", "Mesh")
FreeCAD.addImportType("Simple Model Format (*.smf)", "Mesh")
FreeCAD.addImportType("3D Manufacturing Format (*.3mf)", "Mesh")
FreeCAD.addExportType("STL Mesh (*.stl *.ast)", "Mesh")
FreeCAD.addExportType("Binary Mesh (*.bms)", "Mesh")
FreeCAD.addExportType("Alias Mesh (*.obj)", "Mesh")
FreeCAD.addExportType("Object File Format Mesh (*.off)", "Mesh")
FreeCAD.addExportType("Stanford Triangle Mesh (*.ply)", "Mesh")
FreeCAD.addExportType("Additive Manufacturing Format (*.amf)", "Mesh")
FreeCAD.addExportType("Simple Model Format (*.smf)", "Mesh")
FreeCAD.addExportType("3D Manufacturing Format (*.3mf)", "Mesh")
FreeCAD.__unit_test__ += ["MeshTestsApp"]
|
playlist | export_to_squeezebox | # Copyright 2014-2021 Nick Boultbee
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
from quodlibet import _, app, qltk
from quodlibet.ext._shared.squeezebox.base import SqueezeboxPluginMixin
from quodlibet.plugins.playlist import PlaylistPlugin
from quodlibet.qltk import Icons
from quodlibet.qltk.getstring import GetStringDialog
from quodlibet.qltk.notif import Task
from quodlibet.util import copool
from quodlibet.util.dprint import print_d
class SqueezeboxPlaylistPlugin(PlaylistPlugin, SqueezeboxPluginMixin):
PLUGIN_ID = "Export to Squeezebox Playlist"
PLUGIN_NAME = _("Export to Squeezebox")
PLUGIN_DESC_MARKUP = (
_(
"Dynamically exports a playlist to Logitech Squeezebox "
"playlist, provided both share a directory structure."
)
+ "\n"
+ _(
'Shares configuration with <a href="%(plugin_link)s">Squeezebox '
"Sync plugin</a>."
)
% {"plugin_link": "quodlibet:///prefs/plugins/Squeezebox Output"}
)
PLUGIN_ICON = Icons.NETWORK_WORKGROUP
ELLIPSIZE_NAME = True
_PERSIST_FUDGE = 100
TEMP_PLAYLIST = "_quodlibet"
def __add_songs(self, task, songs, name):
"""Generator for copool to add songs to the temp playlist"""
print_d(
"Backing up current Squeezebox playlist."
"This can take a while if your current playlist is big..."
)
self.__cancel = False
# Arbitrarily assign playlist operations a value of 2 * addition
task_total = float(len(songs) + 2 * self._PERSIST_FUDGE + 3 * 2)
self.server.playlist_save(self.TEMP_PLAYLIST)
task.update(self._PERSIST_FUDGE / task_total)
yield True
self.server.playlist_clear()
task.update((self._PERSIST_FUDGE + 2.0) // task_total)
yield True
# Check if we're currently playing.
stopped = self.server.is_stopped()
total = len(songs)
print_d(
"Adding %d song(s) to Squeezebox playlist. "
"This might take a while..." % total
)
for i, song in enumerate(songs):
if self.__cancel:
print_d("Cancelled squeezebox export")
self.__cancel = False
break
# Actually do the (slow) call
self.server.playlist_add(self.get_sb_path(song))
task.update(float(i) / task_total)
yield True
print_d('Saving Squeezebox playlist "%s"' % name)
self.server.playlist_save(name)
task.update((task_total - 2) / task_total)
yield True
# Resume if we actually stopped
self.server.playlist_resume(self.TEMP_PLAYLIST, not stopped, True)
task.finish()
def __cancel_add(self):
"""Tell the copool to stop (adding songs)"""
self.__cancel = True
@staticmethod
def __get_playlist_name(name="Quod Libet playlist"):
dialog = GetStringDialog(
None,
_("Export playlist to Squeezebox"),
_("Playlist name (will overwrite existing)"),
button_label=_("_Save"),
button_icon=Icons.DOCUMENT_SAVE,
)
name = dialog.run(text=name)
return name
def plugin_playlist(self, playlist):
self.init_server()
if not self.server.is_connected:
qltk.ErrorMessage(
app.window,
_("Error finding Squeezebox server"),
_("Error finding %s. Please check settings") % self.server.config,
).run()
else:
name = self.__get_playlist_name(name=playlist.name)
if name:
task = Task(
"Squeezebox",
_("Export to Squeezebox playlist"),
stop=self.__cancel_add,
)
copool.add(
self.__add_songs,
task,
playlist.songs,
name,
funcid="squeezebox-playlist-save",
)
|
pykakasi | kakasi | # -*- coding: utf-8 -*-
# kakasi.py
#
# Copyright 2011 Hiroshi Miura <miurahr@linux.com>
#
# Original Copyright:
# * KAKASI (Kanji Kana Simple inversion program)
# * $Id: jj2.c,v 1.7 2001-04-12 05:57:34 rug Exp $
# * Copyright (C) 1992
# * Hironobu Takahashi (takahasi@tiny.or.jp)
# *
# * This program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation; either versions 2, or (at your option)
# * any later version.
# *
# * This program is distributed in the hope that it will be useful
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# */
from calibre.ebooks.unihandecode.pykakasi.h2a import H2a
from calibre.ebooks.unihandecode.pykakasi.j2h import J2H
from calibre.ebooks.unihandecode.pykakasi.k2a import K2a
class kakasi(object):
j2h = None
h2a = None
k2a = None
def __init__(self):
self.j2h = J2H()
self.h2a = H2a()
self.k2a = K2a()
def do(self, text):
otext = ""
i = 0
while True:
if i >= len(text):
break
if self.j2h.isKanji(text[i]):
(t, l) = self.j2h.convert(text[i:])
if l <= 0:
otext = otext + text[i]
i = i + 1
continue
i = i + l
m = 0
tmptext = ""
while True:
if m >= len(t):
break
(s, n) = self.h2a.convert(t[m:])
if n <= 0:
break
m = m + n
tmptext = tmptext + s
if i >= len(text):
otext = otext + tmptext.capitalize()
else:
otext = otext + tmptext.capitalize() + " "
elif self.h2a.isHiragana(text[i]):
tmptext = ""
while True:
(t, l) = self.h2a.convert(text[i:])
tmptext = tmptext + t
i = i + l
if i >= len(text):
otext = otext + tmptext
break
elif not self.h2a.isHiragana(text[i]):
otext = otext + tmptext + " "
break
elif self.k2a.isKatakana(text[i]):
tmptext = ""
while True:
(t, l) = self.k2a.convert(text[i:])
tmptext = tmptext + t
i = i + l
if i >= len(text):
otext = otext + tmptext
break
elif not self.k2a.isKatakana(text[i]):
otext = otext + tmptext + " "
break
else:
otext = otext + text[i]
i += 1
return otext
|
templatetags | timers | # -*- coding: utf-8 -*-
from core.models import Child, Timer
from django import template
from django.urls import reverse
register = template.Library()
@register.inclusion_tag("core/timer_nav.html", takes_context=True)
def timer_nav(context):
"""
Get a list of Timer instances to include in the nav menu.
:param context: Django's context data.
:returns: a dictionary with timers data.
"""
request = context["request"] or None
timers = Timer.objects.filter()
children = Child.objects.all()
perms = context["perms"] or None
# The 'next' parameter is currently not used.
return {
"timers": timers,
"children": children,
"perms": perms,
"next": request.path,
}
@register.inclusion_tag("core/quick_timer_nav.html", takes_context=True)
def quick_timer_nav(context):
children = Child.objects.all()
perms = context["perms"] or None
return {"children": children, "perms": perms}
@register.simple_tag(takes_context=True)
def instance_add_url(context, url_name):
timer = context["timer"]
url = "{}?timer={}".format(reverse(url_name), timer.id)
if timer.child:
url += "&child={}".format(timer.child.slug)
return url
|
PyObjCTest | test_nsmenu | from AppKit import *
from PyObjCTools.TestSupport import *
try:
unicode
except NameError:
unicode = str
class TestNSMenuHelper(NSObject):
def validateMenuItem_(self, item):
return 1
def numberOfItemsInMenu_(self, menu):
return 1
def menu_updateItem_atIndex_shouldCancel_(self, m, i, d, s):
return 1
def menuHasKeyEquivalent_forEvent_target_action_(self, m, e, t, a):
return 1
def confinementRectForMenu_onScreen_(self, m, s):
return 1
class TestNSMenu(TestCase):
def testProtocol(self):
self.assertResultIsBOOL(TestNSMenuHelper.validateMenuItem_)
self.assertResultHasType(
TestNSMenuHelper.numberOfItemsInMenu_, objc._C_NSInteger
)
self.assertResultIsBOOL(TestNSMenuHelper.menu_updateItem_atIndex_shouldCancel_)
self.assertArgHasType(
TestNSMenuHelper.menu_updateItem_atIndex_shouldCancel_, 2, objc._C_NSInteger
)
self.assertArgIsBOOL(TestNSMenuHelper.menu_updateItem_atIndex_shouldCancel_, 3)
self.assertResultIsBOOL(
TestNSMenuHelper.menuHasKeyEquivalent_forEvent_target_action_
)
self.assertArgHasType(
TestNSMenuHelper.menuHasKeyEquivalent_forEvent_target_action_, 2, b"o^@"
)
self.assertArgHasType(
TestNSMenuHelper.menuHasKeyEquivalent_forEvent_target_action_, 3, b"o^:"
)
def testMethods(self):
self.assertResultIsBOOL(NSMenu.menuBarVisible)
self.assertArgIsBOOL(NSMenu.setMenuBarVisible_, 0)
self.assertResultIsBOOL(NSMenu.autoenablesItems)
self.assertArgIsBOOL(NSMenu.setAutoenablesItems_, 0)
self.assertResultIsBOOL(NSMenu.performKeyEquivalent_)
self.assertResultIsBOOL(NSMenu.autoenablesItems)
self.assertArgIsBOOL(NSMenu.setMenuChangedMessagesEnabled_, 0)
self.assertResultIsBOOL(NSMenu.isTornOff)
self.assertResultIsBOOL(NSMenu.isAttached)
self.assertResultIsBOOL(NSMenu.showsStateColumn)
self.assertArgIsBOOL(NSMenu.setShowsStateColumn_, 0)
self.assertResultIsBOOL(NSMenu.menuChangedMessagesEnabled)
self.assertArgIsBOOL(NSMenu.setMenuChangedMessagesEnabled_, 0)
self.assertResultHasType(NSMenu.locationForSubmenu_, NSPoint.__typestr__)
def testConstants(self):
self.assertIsInstance(NSMenuWillSendActionNotification, unicode)
self.assertIsInstance(NSMenuDidSendActionNotification, unicode)
self.assertIsInstance(NSMenuDidAddItemNotification, unicode)
self.assertIsInstance(NSMenuDidRemoveItemNotification, unicode)
self.assertIsInstance(NSMenuDidChangeItemNotification, unicode)
self.assertIsInstance(NSMenuDidBeginTrackingNotification, unicode)
self.assertIsInstance(NSMenuDidEndTrackingNotification, unicode)
@min_os_level("10.6")
def testMethods10_6(self):
self.assertResultIsBOOL(NSMenu.popUpMenuPositioningItem_atLocation_inView_)
self.assertArgHasType(
NSMenu.popUpMenuPositioningItem_atLocation_inView_, 1, NSPoint.__typestr__
)
self.assertResultHasType(NSMenu.size, NSSize.__typestr__)
self.assertResultIsBOOL(NSMenu.allowsContextMenuPlugIns)
self.assertArgIsBOOL(NSMenu.setAllowsContextMenuPlugIns_, 0)
self.assertResultHasType(
TestNSMenuHelper.confinementRectForMenu_onScreen_, NSRect.__typestr__
)
@min_os_level("10.6")
def testConstants10_6(self):
self.assertEqual(NSMenuPropertyItemTitle, 1 << 0)
self.assertEqual(NSMenuPropertyItemAttributedTitle, 1 << 1)
self.assertEqual(NSMenuPropertyItemKeyEquivalent, 1 << 2)
self.assertEqual(NSMenuPropertyItemImage, 1 << 3)
self.assertEqual(NSMenuPropertyItemEnabled, 1 << 4)
self.assertEqual(NSMenuPropertyItemAccessibilityDescription, 1 << 5)
if __name__ == "__main__":
main()
|
babybuddy | urls | # -*- coding: utf-8 -*-
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.contrib.auth import views as auth_views
from django.urls import include, path, reverse_lazy
from . import views
app_patterns = [
path("login/", auth_views.LoginView.as_view(), name="login"),
path("logout/", views.LogoutView.as_view(), name="logout"),
path(
"reset/",
auth_views.PasswordResetView.as_view(
success_url=reverse_lazy("babybuddy:password_reset_done")
),
name="password_reset",
),
path(
"reset/<uidb64>/<token>/",
auth_views.PasswordResetConfirmView.as_view(
success_url=reverse_lazy("babybuddy:password_reset_complete")
),
name="password_reset_confirm",
),
path(
"reset/done/",
auth_views.PasswordResetDoneView.as_view(),
name="password_reset_done",
),
path(
"reset/complete/",
auth_views.PasswordResetCompleteView.as_view(),
name="password_reset_complete",
),
path("", views.RootRouter.as_view(), name="root-router"),
path("welcome/", views.Welcome.as_view(), name="welcome"),
path("users/", views.UserList.as_view(), name="user-list"),
path("users/add/", views.UserAdd.as_view(), name="user-add"),
path("users/<int:pk>/edit/", views.UserUpdate.as_view(), name="user-update"),
path("users/<int:pk>/unlock/", views.UserUnlock.as_view(), name="user-unlock"),
path("users/<int:pk>/delete/", views.UserDelete.as_view(), name="user-delete"),
path("user/password/", views.UserPassword.as_view(), name="user-password"),
path("user/settings/", views.UserSettings.as_view(), name="user-settings"),
path("user/add-device/", views.UserAddDevice.as_view(), name="user-add-device"),
path("settings/", include("dbsettings.urls")),
]
urlpatterns = [
path("admin/", admin.site.urls),
path("", include("api.urls", namespace="api")),
path("", include((app_patterns, "babybuddy"), namespace="babybuddy")),
path("user/lang", include("django.conf.urls.i18n")),
path("", include("core.urls", namespace="core")),
path("", include("dashboard.urls", namespace="dashboard")),
path("", include("reports.urls", namespace="reports")),
]
if settings.DEBUG: # pragma: no cover
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
downloader | niconico | # coding: utf-8
from __future__ import unicode_literals
try:
import threading
except ImportError:
threading = None
from ..downloader import get_suitable_downloader
from ..extractor.niconico import NiconicoIE
from ..utils import sanitized_Request
from .common import FileDownloader
class NiconicoDmcFD(FileDownloader):
"""Downloading niconico douga from DMC with heartbeat"""
FD_NAME = "niconico_dmc"
def real_download(self, filename, info_dict):
self.to_screen("[%s] Downloading from DMC" % self.FD_NAME)
ie = NiconicoIE(self.ydl)
info_dict, heartbeat_info_dict = ie._get_heartbeat_info(info_dict)
fd = get_suitable_downloader(info_dict, params=self.params)(
self.ydl, self.params
)
for ph in self._progress_hooks:
fd.add_progress_hook(ph)
if not threading:
self.to_screen("[%s] Threading for Heartbeat not available" % self.FD_NAME)
return fd.real_download(filename, info_dict)
success = download_complete = False
timer = [None]
heartbeat_lock = threading.Lock()
heartbeat_url = heartbeat_info_dict["url"]
heartbeat_data = heartbeat_info_dict["data"].encode()
heartbeat_interval = heartbeat_info_dict.get("interval", 30)
request = sanitized_Request(heartbeat_url, heartbeat_data)
def heartbeat():
try:
self.ydl.urlopen(request).read()
except Exception:
self.to_screen("[%s] Heartbeat failed" % self.FD_NAME)
with heartbeat_lock:
if not download_complete:
timer[0] = threading.Timer(heartbeat_interval, heartbeat)
timer[0].start()
heartbeat_info_dict["ping"]()
self.to_screen(
"[%s] Heartbeat with %d second interval ..."
% (self.FD_NAME, heartbeat_interval)
)
try:
heartbeat()
if type(fd).__name__ == "HlsFD":
info_dict.update(
ie._extract_m3u8_formats(info_dict["url"], info_dict["id"])[0]
)
success = fd.real_download(filename, info_dict)
finally:
if heartbeat_lock:
with heartbeat_lock:
timer[0].cancel()
download_complete = True
return success
|
libtorrent | settings | from enum import Enum
from typing import Optional
from pydantic import validator
from tribler.core.config.tribler_config_section import TriblerConfigSection
from tribler.core.utilities.network_utils import NetworkUtils
from tribler.core.utilities.osutils import get_home_dir
from tribler.core.utilities.path_util import Path
TRIBLER_DOWNLOADS_DEFAULT = "TriblerDownloads"
# pylint: disable=no-self-argument
@validator("port", "anon_listen_port")
def validate_port_with_minus_one(v):
assert (
v is None or -1 <= v <= NetworkUtils.MAX_PORT
), "Port must be in range [-1..65535]"
return v
class LibtorrentSettings(TriblerConfigSection):
enabled: bool = True
port: Optional[int] = None
proxy_type: int = 0
proxy_server: str = ":"
proxy_auth: str = ":"
max_connections_download: int = -1
max_download_rate: int = 0
max_upload_rate: int = 0
utp: bool = True
dht: bool = True
dht_readiness_timeout: int = 30
upnp: bool = True
natpmp: bool = True
lsd: bool = True
_port_validator = validator("port", allow_reuse=True)(validate_port_with_minus_one)
@validator("proxy_type")
def validate_proxy_type(cls, v):
assert v is None or 0 <= v <= 5, "Proxy type must be in range [0..5]"
return v
class SeedingMode(str, Enum):
forever = "forever"
never = "never"
ratio = "ratio"
time = "time"
def get_default_download_dir(
home: Optional[Path] = None, tribler_downloads_name=TRIBLER_DOWNLOADS_DEFAULT
) -> Path:
"""
Returns the default dir to save content to.
Could be one of:
- TriblerDownloads
- $HOME/Downloads/TriblerDownloads
- $HOME/TriblerDownloads
"""
path = Path(tribler_downloads_name)
if path.is_dir():
return path.resolve()
home = home or get_home_dir()
downloads = home / "Downloads"
if downloads.is_dir():
return downloads.resolve() / tribler_downloads_name
return home.resolve() / tribler_downloads_name
class DownloadDefaultsSettings(TriblerConfigSection):
anonymity_enabled: bool = True
number_hops: int = 1
safeseeding_enabled: bool = True
saveas: str = str(get_default_download_dir())
seeding_mode: SeedingMode = SeedingMode.forever
seeding_ratio: float = 2.0
seeding_time: float = 60
channel_download: bool = False
add_download_to_channel: bool = False
@validator("number_hops")
def validate_number_hops(cls, v):
assert 0 <= v <= 3, "Number hops must be in range [0..3]"
return v
|
auth | views | # -*- coding: utf-8 -*-
"""
flaskbb.auth.views
~~~~~~~~~~~~~~~~~~
This view provides user authentication, registration and a view for
resetting the password of a user if he has lost his password
:copyright: (c) 2014 by the FlaskBB Team.
:license: BSD, see LICENSE for more details.
"""
import logging
from datetime import datetime
from flask import Blueprint, current_app, flash, g, redirect, request, url_for
from flask.views import MethodView
from flask_babelplus import gettext as _
from flask_login import (
confirm_login,
current_user,
login_fresh,
login_required,
login_user,
logout_user,
)
from flaskbb.auth.forms import (
AccountActivationForm,
ForgotPasswordForm,
LoginForm,
LoginRecaptchaForm,
ReauthForm,
RegisterForm,
RequestActivationForm,
ResetPasswordForm,
)
from flaskbb.extensions import db, limiter
from flaskbb.utils.helpers import (
anonymous_required,
enforce_recaptcha,
format_timedelta,
get_available_languages,
redirect_or_next,
register_view,
registration_enabled,
render_template,
requires_unactivated,
)
from flaskbb.utils.settings import flaskbb_config
from ..core.auth.authentication import StopAuthentication
from ..core.auth.registration import UserRegistrationInfo
from ..core.exceptions import PersistenceError, StopValidation, ValidationError
from ..core.tokens import TokenError
from .plugins import impl
from .services import (
account_activator_factory,
authentication_manager_factory,
reauthentication_manager_factory,
registration_service_factory,
reset_service_factory,
)
logger = logging.getLogger(__name__)
class Logout(MethodView):
decorators = [limiter.exempt, login_required]
def get(self):
logout_user()
flash(_("Logged out"), "success")
return redirect(url_for("forum.index"))
class Login(MethodView):
decorators = [anonymous_required]
def __init__(self, authentication_manager_factory):
self.authentication_manager_factory = authentication_manager_factory
def form(self):
if enforce_recaptcha(limiter):
return LoginRecaptchaForm()
return LoginForm()
def get(self):
return render_template("auth/login.html", form=self.form())
def post(self):
form = self.form()
if form.validate_on_submit():
auth_manager = self.authentication_manager_factory()
try:
user = auth_manager.authenticate(
identifier=form.login.data, secret=form.password.data
)
login_user(user, remember=form.remember_me.data)
return redirect_or_next(url_for("forum.index"), False)
except StopAuthentication as e:
flash(e.reason, "danger")
except Exception:
flash(_("Unrecoverable error while handling login"))
return render_template("auth/login.html", form=form)
class Reauth(MethodView):
decorators = [login_required, limiter.exempt]
form = ReauthForm
def __init__(self, reauthentication_factory):
self.reauthentication_factory = reauthentication_factory
def get(self):
if not login_fresh():
return render_template("auth/reauth.html", form=self.form())
return redirect_or_next(current_user.url)
def post(self):
form = self.form()
if form.validate_on_submit():
reauth_manager = self.reauthentication_factory()
try:
reauth_manager.reauthenticate(
user=current_user, secret=form.password.data
)
confirm_login()
flash(_("Reauthenticated."), "success")
return redirect_or_next(current_user.url)
except StopAuthentication as e:
flash(e.reason, "danger")
except Exception:
flash(_("Unrecoverable error while handling reauthentication"))
raise
return render_template("auth/reauth.html", form=form)
class Register(MethodView):
decorators = [anonymous_required, registration_enabled]
def __init__(self, registration_service_factory):
self.registration_service_factory = registration_service_factory
def form(self):
current_app.pluggy.hook.flaskbb_form_registration(form=RegisterForm)
form = RegisterForm()
form.language.choices = get_available_languages()
form.language.default = flaskbb_config["DEFAULT_LANGUAGE"]
form.process(request.form) # needed because a default is overriden
return form
def get(self):
return render_template("auth/register.html", form=self.form())
def post(self):
form = self.form()
if form.validate_on_submit():
registration_info = UserRegistrationInfo(
username=form.username.data,
password=form.password.data,
group=4,
email=form.email.data,
language=form.language.data,
)
service = self.registration_service_factory()
try:
service.register(registration_info)
except StopValidation as e:
form.populate_errors(e.reasons)
return render_template("auth/register.html", form=form)
except PersistenceError:
logger.exception("Database error while persisting user")
flash(
_("Could not process registration due" "to an unrecoverable error"),
"danger",
)
return render_template("auth/register.html", form=form)
current_app.pluggy.hook.flaskbb_event_user_registered(
username=registration_info.username
)
return redirect_or_next(url_for("forum.index"))
return render_template("auth/register.html", form=form)
class ForgotPassword(MethodView):
decorators = [anonymous_required]
form = ForgotPasswordForm
def __init__(self, password_reset_service_factory):
self.password_reset_service_factory = password_reset_service_factory
def get(self):
return render_template("auth/forgot_password.html", form=self.form())
def post(self):
form = self.form()
if form.validate_on_submit():
try:
service = self.password_reset_service_factory()
service.initiate_password_reset(form.email.data)
except ValidationError:
flash(
_(
"You have entered an username or email address that "
"is not linked with your account."
),
"danger",
)
else:
flash(_("Email sent! Please check your inbox."), "info")
return redirect(url_for("auth.forgot_password"))
return render_template("auth/forgot_password.html", form=form)
class ResetPassword(MethodView):
decorators = [anonymous_required]
form = ResetPasswordForm
def __init__(self, password_reset_service_factory):
self.password_reset_service_factory = password_reset_service_factory
def get(self, token):
form = self.form()
form.token.data = token
return render_template("auth/reset_password.html", form=form)
def post(self, token):
form = self.form()
if form.validate_on_submit():
try:
service = self.password_reset_service_factory()
service.reset_password(token, form.email.data, form.password.data)
except TokenError as e:
flash(e.reason, "danger")
return redirect(url_for("auth.forgot_password"))
except StopValidation as e:
form.populate_errors(e.reasons)
form.token.data = token
return render_template("auth/reset_password.html", form=form)
except Exception:
logger.exception("Error when resetting password")
flash(_("Error when resetting password"))
return redirect(url_for("auth.forgot_password"))
finally:
try:
db.session.commit()
except Exception:
logger.exception(
"Error while finalizing database when resetting password" # noqa
)
db.session.rollback()
flash(_("Your password has been updated."), "success")
return redirect(url_for("auth.login"))
form.token.data = token
return render_template("auth/reset_password.html", form=form)
class RequestActivationToken(MethodView):
decorators = [requires_unactivated]
form = RequestActivationForm
def __init__(self, account_activator_factory):
self.account_activator_factory = account_activator_factory
def get(self):
return render_template("auth/request_account_activation.html", form=self.form())
def post(self):
form = self.form()
if form.validate_on_submit():
activator = self.account_activator_factory()
try:
activator.initiate_account_activation(form.email.data)
except ValidationError as e:
form.populate_errors([(e.attribute, e.reason)])
else:
flash(
_(
"A new account activation token has been sent to "
"your email address."
),
"success",
)
return redirect(url_for("forum.index"))
return render_template("auth/request_account_activation.html", form=form)
class AutoActivateAccount(MethodView):
decorators = [requires_unactivated]
def __init__(self, account_activator_factory):
self.account_activator_factory = account_activator_factory
def get(self, token):
activator = self.account_activator_factory()
try:
activator.activate_account(token)
except TokenError as e:
flash(e.reason, "danger")
except ValidationError as e:
flash(e.reason, "danger")
return redirect(url_for("forum.index"))
else:
try:
db.session.commit()
except Exception: # noqa
logger.exception("Database error while activating account")
db.session.rollback()
flash(
_(
"Could not activate account due to an unrecoverable error" # noqa
),
"danger",
)
return redirect(url_for("auth.request_activation_token"))
flash(
_("Your account has been activated and you can now login."), "success"
)
return redirect(url_for("forum.index"))
return redirect(url_for("auth.activate_account"))
class ActivateAccount(MethodView):
decorators = [requires_unactivated]
form = AccountActivationForm
def __init__(self, account_activator_factory):
self.account_activator_factory = account_activator_factory
def get(self):
return render_template("auth/account_activation.html", form=self.form())
def post(self):
form = self.form()
if form.validate_on_submit():
token = form.token.data
activator = self.account_activator_factory()
try:
activator.activate_account(token)
except TokenError as e:
form.populate_errors([("token", e.reason)])
except ValidationError as e:
flash(e.reason, "danger")
return redirect(url_for("forum.index"))
else:
try:
db.session.commit()
except Exception: # noqa
logger.exception("Database error while activating account")
db.session.rollback()
flash(
_(
"Could not activate account due to an unrecoverable error" # noqa
),
"danger",
)
return redirect(url_for("auth.request_activation_token"))
flash(
_("Your account has been activated and you can now login."),
"success",
)
return redirect(url_for("forum.index"))
return render_template("auth/account_activation.html", form=form)
@impl(tryfirst=True)
def flaskbb_load_blueprints(app):
auth = Blueprint("auth", __name__)
def login_rate_limit():
"""Dynamically load the rate limiting config from the database."""
# [count] [per|/] [n (optional)] [second|minute|hour|day|month|year]
return "{count}/{timeout}minutes".format(
count=flaskbb_config["AUTH_REQUESTS"],
timeout=flaskbb_config["AUTH_TIMEOUT"],
)
def login_rate_limit_message():
"""Display the amount of time left until the user can access the requested
resource again."""
current_limit = getattr(g, "view_rate_limit", None)
if current_limit is not None:
window_stats = limiter.limiter.get_window_stats(*current_limit)
reset_time = datetime.utcfromtimestamp(window_stats[0])
timeout = reset_time - datetime.utcnow()
return "{timeout}".format(timeout=format_timedelta(timeout))
@auth.before_request
def check_rate_limiting():
"""Check the the rate limits for each request for this blueprint."""
if not flaskbb_config["AUTH_RATELIMIT_ENABLED"]:
return None
return limiter.check()
@auth.errorhandler(429)
def login_rate_limit_error(error):
"""Register a custom error handler for a 'Too Many Requests'
(HTTP CODE 429) error."""
return render_template("errors/too_many_logins.html", timeout=error.description)
# Activate rate limiting on the whole blueprint
limiter.limit(login_rate_limit, error_message=login_rate_limit_message)(auth)
register_view(auth, routes=["/logout"], view_func=Logout.as_view("logout"))
register_view(
auth,
routes=["/login"],
view_func=Login.as_view(
"login", authentication_manager_factory=authentication_manager_factory
),
)
register_view(
auth,
routes=["/reauth"],
view_func=Reauth.as_view(
"reauth", reauthentication_factory=reauthentication_manager_factory
),
)
register_view(
auth,
routes=["/register"],
view_func=Register.as_view(
"register", registration_service_factory=registration_service_factory
),
)
register_view(
auth,
routes=["/reset-password"],
view_func=ForgotPassword.as_view(
"forgot_password", password_reset_service_factory=reset_service_factory
),
)
register_view(
auth,
routes=["/reset-password/<token>"],
view_func=ResetPassword.as_view(
"reset_password", password_reset_service_factory=reset_service_factory
),
)
register_view(
auth,
routes=["/activate"],
view_func=RequestActivationToken.as_view(
"request_activation_token",
account_activator_factory=account_activator_factory,
),
)
register_view(
auth,
routes=["/activate/confirm"],
view_func=ActivateAccount.as_view(
"activate_account", account_activator_factory=account_activator_factory
),
)
register_view(
auth,
routes=["/activate/confirm/<token>"],
view_func=AutoActivateAccount.as_view(
"autoactivate_account", account_activator_factory=account_activator_factory
),
)
app.register_blueprint(auth, url_prefix=app.config["AUTH_URL_PREFIX"])
|
navigation | iterativeclosestpoint | # --------------------------------------------------------------------------
# Software: InVesalius - Software de Reconstrucao 3D de Imagens Medicas
# Copyright: (C) 2001 Centro de Pesquisas Renato Archer
# Homepage: http://www.softwarepublico.gov.br
# Contact: invesalius@cti.gov.br
# License: GNU - GPL 2 (LICENSE.txt/LICENCA.txt)
# --------------------------------------------------------------------------
# Este programa e software livre; voce pode redistribui-lo e/ou
# modifica-lo sob os termos da Licenca Publica Geral GNU, conforme
# publicada pela Free Software Foundation; de acordo com a versao 2
# da Licenca.
#
# Este programa eh distribuido na expectativa de ser util, mas SEM
# QUALQUER GARANTIA; sem mesmo a garantia implicita de
# COMERCIALIZACAO ou de ADEQUACAO A QUALQUER PROPOSITO EM
# PARTICULAR. Consulte a Licenca Publica Geral GNU para obter mais
# detalhes.
# --------------------------------------------------------------------------
import invesalius.data.bases as db
import invesalius.gui.dialogs as dlg
import invesalius.session as ses
import numpy as np
import wx
from invesalius.utils import Singleton
class IterativeClosestPoint(metaclass=Singleton):
def __init__(self):
self.use_icp = False
self.m_icp = None
self.icp_fre = None
self.LoadState()
def SaveState(self):
m_icp = self.m_icp.tolist() if self.m_icp is not None else None
state = {
"use_icp": self.use_icp,
"m_icp": m_icp,
"icp_fre": self.icp_fre,
}
session = ses.Session()
session.SetState("icp", state)
def LoadState(self):
session = ses.Session()
state = session.GetState("icp")
if state is None:
return
self.use_icp = state["use_icp"]
self.m_icp = np.array(state["m_icp"])
self.icp_fre = state["icp_fre"]
def RegisterICP(self, navigation, tracker):
# If ICP is already in use, return.
if self.use_icp:
return
# Show dialog to ask whether to use ICP. If not, return.
if not dlg.ICPcorregistration(navigation.fre):
return
# Show dialog to register ICP.
dialog = dlg.ICPCorregistrationDialog(navigation=navigation, tracker=tracker)
success = dialog.ShowModal()
(
self.m_icp,
point_coord,
transformed_points,
prev_error,
final_error,
) = dialog.GetValue()
dialog.Destroy()
if success != wx.ID_OK or self.m_icp is None:
self.use_icp = False
return
# TODO: checkbox in the dialog to transfer the icp points to 3D viewer
# create markers
# for i in range(len(point_coord)):
# img_coord = point_coord[i][0],-point_coord[i][1],point_coord[i][2], 0, 0, 0
# transf_coord = transformed_points[i][0],-transformed_points[i][1],transformed_points[i][2], 0, 0, 0
# Publisher.sendMessage('Create marker', coord=img_coord, marker_id=None, colour=(1,0,0))
# Publisher.sendMessage('Create marker', coord=transf_coord, marker_id=None, colour=(0,0,1))
self.use_icp = True
dlg.ReportICPerror(prev_error, final_error)
# Compute FRE (fiducial registration error).
ref_mode_id = navigation.GetReferenceMode()
self.icp_fre = db.calculate_fre(
tracker.tracker_fiducials_raw,
navigation.all_fiducials,
ref_mode_id,
navigation.m_change,
self.m_icp,
)
self.SetICP(navigation, self.use_icp)
def SetICP(self, navigation, use_icp):
self.use_icp = use_icp
navigation.icp_queue.put_nowait([self.use_icp, self.m_icp])
self.SaveState()
def ResetICP(self):
self.use_icp = False
self.m_icp = None
self.icp_fre = None
self.SaveState()
def GetFreForUI(self):
return "{:.2f}".format(self.icp_fre) if self.icp_fre else ""
|
hogql | parse_string | from antlr4 import ParserRuleContext
from posthog.hogql.errors import HogQLException
def parse_string(text: str) -> str:
"""Converts a string received from antlr via ctx.getText() into a Python string"""
if text.startswith("'") and text.endswith("'"):
text = text[1:-1]
text = text.replace("''", "'")
text = text.replace("\\'", "'")
elif text.startswith('"') and text.endswith('"'):
text = text[1:-1]
text = text.replace('""', '"')
text = text.replace('\\"', '"')
elif text.startswith("`") and text.endswith("`"):
text = text[1:-1]
text = text.replace("``", "`")
text = text.replace("\\`", "`")
elif text.startswith("{") and text.endswith("}"):
text = text[1:-1]
text = text.replace("{{", "{")
text = text.replace("\\{", "{")
else:
raise HogQLException(
f"Invalid string literal, must start and end with the same quote type: {text}"
)
# copied from clickhouse_driver/util/escape.py
text = text.replace("\\b", "\b")
text = text.replace("\\f", "\f")
text = text.replace("\\r", "\r")
text = text.replace("\\n", "\n")
text = text.replace("\\t", "\t")
text = text.replace("\\0", "\0")
text = text.replace("\\a", "\a")
text = text.replace("\\v", "\v")
text = text.replace("\\\\", "\\")
return text
def parse_string_literal(ctx: ParserRuleContext) -> str:
"""Converts a STRING_LITERAL received from antlr via ctx.getText() into a Python string"""
text = ctx.getText()
return parse_string(text)
|
searx | testing | # -*- coding: utf-8 -*-
# SPDX-License-Identifier: AGPL-3.0-or-later
"""Shared testing code."""
# pylint: disable=missing-function-docstring,consider-using-with
import os
import subprocess
import traceback
from os.path import abspath, dirname, join, realpath
from unittest import TestCase
from splinter import Browser
class SearxTestLayer:
"""Base layer for non-robot tests."""
__name__ = "SearxTestLayer"
@classmethod
def setUp(cls):
pass
@classmethod
def tearDown(cls):
pass
@classmethod
def testSetUp(cls):
pass
@classmethod
def testTearDown(cls):
pass
class SearxRobotLayer:
"""Searx Robot Test Layer"""
def setUp(self):
os.setpgrp() # create new process group, become its leader
# get program paths
webapp = join(abspath(dirname(realpath(__file__))), "webapp.py")
exe = "python"
# The Flask app is started by Flask.run(...), don't enable Flask's debug
# mode, the debugger from Flask will cause wired process model, where
# the server never dies. Further read:
#
# - debug mode: https://flask.palletsprojects.com/quickstart/#debug-mode
# - Flask.run(..): https://flask.palletsprojects.com/api/#flask.Flask.run
os.environ["SEARX_DEBUG"] = "0"
# set robot settings path
os.environ["SEARX_SETTINGS_PATH"] = abspath(
dirname(__file__) + "/settings_robot.yml"
)
# run the server
self.server = subprocess.Popen(
[exe, webapp], stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
if hasattr(self.server.stdout, "read1"):
print(self.server.stdout.read1(1024).decode())
def tearDown(self):
os.kill(self.server.pid, 9)
# remove previously set environment variable
del os.environ["SEARX_SETTINGS_PATH"]
# SEARXROBOTLAYER = SearxRobotLayer()
def run_robot_tests(tests):
print("Running {0} tests".format(len(tests)))
for test in tests:
with Browser("firefox", headless=True) as browser:
test(browser)
class SearxTestCase(TestCase):
"""Base test case for non-robot tests."""
layer = SearxTestLayer
def setattr4test(self, obj, attr, value):
"""
setattr(obj, attr, value)
but reset to the previous value in the cleanup.
"""
previous_value = getattr(obj, attr)
def cleanup_patch():
setattr(obj, attr, previous_value)
self.addCleanup(cleanup_patch)
setattr(obj, attr, value)
if __name__ == "__main__":
import sys
# test cases
from tests import robot
base_dir = abspath(join(dirname(__file__), "../tests"))
if sys.argv[1] == "robot":
test_layer = SearxRobotLayer()
errors = False
try:
test_layer.setUp()
run_robot_tests(
[getattr(robot, x) for x in dir(robot) if x.startswith("test_")]
)
except Exception: # pylint: disable=broad-except
errors = True
print("Error occurred: {0}".format(traceback.format_exc()))
test_layer.tearDown()
sys.exit(1 if errors else 0)
|
ToDos | ToDosDocument | #
# ToDosDocument.py
# ToDos
#
# Converted by u.fiedler on 09.02.05.
#
# The original version was written in Objective-C by Malcolm Crawford
# at http://homepage.mac.com/mmalc/CocoaExamples/controllers.html
from AppKit import *
from Category import Category
from OverdueTransformer import OverdueTransformer
from PriorityToColourTransformer import PriorityToColourTransformer
class ToDosDocument(NSDocument):
nix = objc.IBOutlet()
toDos = objc.ivar()
def init(self):
self = super(ToDosDocument, self).init()
if self is None:
return None
self.toDos = NSMutableArray.array()
return self # if this line is missing you will get the
# simple message: "Can't create new document"
def categories(self):
return Category.allCategories()
def windowNibName(self):
return "ToDosDocument"
def dataRepresentationOfType_(self, aType):
return NSKeyedArchiver.archivedDataWithRootObject_(self.toDos)
def loadDataRepresentation_ofType_(self, data, aType):
self.toDos = NSKeyedUnarchiver.unarchiveObjectWithData_(data)
return True
priorityTransformer = PriorityToColourTransformer.alloc().init()
NSValueTransformer.setValueTransformer_forName_(
priorityTransformer, "PriorityToColourTransformer"
)
overdueTransformer = OverdueTransformer.alloc().init()
NSValueTransformer.setValueTransformer_forName_(
overdueTransformer, "OverdueTransformer"
)
|
session | _xsmp | # Copyright 2018 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import ctypes
import enum
from gi.repository import GLib, GObject
try:
h = ctypes.cdll.LoadLibrary("libSM.so.6")
except OSError as e:
raise ImportError(e)
@ctypes.POINTER
class SmcConn(ctypes.Structure):
pass
SmPointer = ctypes.c_void_p
Bool = ctypes.c_int
SmcSaveYourselfProc = ctypes.CFUNCTYPE(
None, SmcConn, SmPointer, ctypes.c_int, Bool, ctypes.c_int, Bool
)
SmcDieProc = ctypes.CFUNCTYPE(None, SmcConn, SmPointer)
SmcSaveCompleteProc = ctypes.CFUNCTYPE(None, SmcConn, SmPointer)
SmcShutdownCancelledProc = ctypes.CFUNCTYPE(None, SmcConn, SmPointer)
class save_yourself(ctypes.Structure):
_fields_ = [
("callback", SmcSaveYourselfProc),
("client_data", SmPointer),
]
class die(ctypes.Structure):
_fields_ = [
("callback", SmcDieProc),
("client_data", SmPointer),
]
class save_complete(ctypes.Structure):
_fields_ = [
("callback", SmcSaveCompleteProc),
("client_data", SmPointer),
]
class shutdown_cancelled(ctypes.Structure):
_fields_ = [
("callback", SmcShutdownCancelledProc),
("client_data", SmPointer),
]
class SmcCallbacks(ctypes.Structure):
_fields_ = [
("save_yourself", save_yourself),
("die", die),
("save_complete", save_complete),
("shutdown_cancelled", shutdown_cancelled),
]
SmProtoMajor = 1
SmProtoMinor = 0
SmcSaveYourselfProcMask = 1 << 0
SmcDieProcMask = 1 << 1
SmcSaveCompleteProcMask = 1 << 2
SmcShutdownCancelledProcMask = 1 << 3
SmcCloseStatus = ctypes.c_int
SmcClosedNow = 0
SmcClosedASAP = 1
SmcConnectionInUse = 2
SmcOpenConnection = h.SmcOpenConnection
SmcOpenConnection.argtypes = [
ctypes.c_char_p,
SmPointer,
ctypes.c_int,
ctypes.c_int,
ctypes.c_ulong,
ctypes.POINTER(SmcCallbacks),
ctypes.c_char_p,
ctypes.POINTER(ctypes.c_char_p),
ctypes.c_int,
ctypes.c_char_p,
]
SmcOpenConnection.restype = SmcConn
SmcCloseConnection = h.SmcCloseConnection
SmcCloseConnection.argtypes = [SmcConn, ctypes.c_int, ctypes.POINTER(ctypes.c_char_p)]
SmcCloseConnection.restype = SmcCloseStatus
SmcSaveYourselfDone = h.SmcSaveYourselfDone
SmcSaveYourselfDone.argtypes = [SmcConn, Bool]
SmcSaveYourselfDone.restype = None
@ctypes.POINTER
class IceConn(ctypes.Structure):
pass
IcePointer = ctypes.c_void_p
IceWatchProc = ctypes.CFUNCTYPE(
None, IceConn, IcePointer, Bool, ctypes.POINTER(IcePointer)
)
Status = ctypes.c_int
IceAddConnectionWatch = h.IceAddConnectionWatch
IceAddConnectionWatch.argtypes = [IceWatchProc, IcePointer]
IceAddConnectionWatch.restype = Status
IceRemoveConnectionWatch = h.IceRemoveConnectionWatch
IceRemoveConnectionWatch.argtypes = [IceWatchProc, IcePointer]
IceRemoveConnectionWatch.restype = None
IceConnectionNumber = h.IceConnectionNumber
IceConnectionNumber.argtypes = [IceConn]
IceConnectionNumber.restype = ctypes.c_int
IceProcessMessagesStatus = ctypes.c_int
IceProcessMessagesSuccess = 0
IceProcessMessagesIOError = 1
IceProcessMessagesConnectionClosed = 2
@ctypes.POINTER
class FIXMEPtr(ctypes.Structure):
pass
IceProcessMessages = h.IceProcessMessages
IceProcessMessages.argtypes = [IceConn, FIXMEPtr, FIXMEPtr]
IceProcessMessages.restype = IceProcessMessagesStatus
IceSetShutdownNegotiation = h.IceSetShutdownNegotiation
IceSetShutdownNegotiation.argtypes = [IceConn, Bool]
IceSetShutdownNegotiation.restype = None
IceCloseStatus = ctypes.c_int
IceCloseConnection = h.IceCloseConnection
IceCloseConnection.argtypes = [IceConn]
IceCloseConnection.restype = IceCloseStatus
class SaveType(enum.IntEnum):
GLOBAL = 0
LOCAL = 1
BOTH = 2
class InteractStyle(enum.IntEnum):
NONE = 0
ERRORS = 1
ANY = 2
class XSMPError(Exception):
pass
class XSMPSource:
"""Dispatches SM messages in the glib mainloop"""
def __init__(self):
self._watch_id = None
self._watch_proc = None
def open(self):
if self._watch_proc is not None:
raise XSMPError("already open")
@IceWatchProc
def watch_proc(conn, client_data, opening, watch_data):
if opening:
fd = IceConnectionNumber(conn)
channel = GLib.IOChannel.unix_new(fd)
self._watch_id = GLib.io_add_watch(
channel,
GLib.PRIORITY_DEFAULT,
(GLib.IOCondition.ERR | GLib.IOCondition.HUP | GLib.IOCondition.IN),
self._process_func,
conn,
)
else:
if self._watch_id is not None:
GObject.source_remove(self._watch_id)
self._watch_id = None
self._watch_proc = watch_proc
status = IceAddConnectionWatch(watch_proc, None)
if status == 0:
raise XSMPError("IceAddConnectionWatch failed with %d" % status)
def close(self):
if self._watch_proc is not None:
IceRemoveConnectionWatch(self._watch_proc, None)
self._watch_proc = None
if self._watch_id is not None:
GObject.source_remove(self._watch_id)
self._watch_id = None
def _process_func(self, channel, condition, conn):
status = IceProcessMessages(conn, None, None)
if status != IceProcessMessagesSuccess:
if status != IceProcessMessagesConnectionClosed:
IceCloseConnection(conn)
self._watch_id = None
return False
return True
class XSMPClient(GObject.Object):
__gsignals__ = {
"save-yourself": (
GObject.SignalFlags.RUN_LAST,
None,
(object, object, object, object),
),
"die": (GObject.SignalFlags.RUN_LAST, None, tuple()),
"save-complete": (GObject.SignalFlags.RUN_LAST, None, tuple()),
"shutdown-cancelled": (GObject.SignalFlags.RUN_LAST, None, tuple()),
}
def __init__(self):
super().__init__()
self._source = None
self._callbacks = SmcCallbacks()
self._conn = None
self._id = None
def wrap_cb(func_type, cb):
def c_callback(*args):
return cb(self, func_type, *args[2:])
return func_type(c_callback)
self._callbacks.save_yourself.callback = wrap_cb(
SmcSaveYourselfProc, self._on_save_yourself
)
self._callbacks.die.callback = wrap_cb(SmcDieProc, self._on_die)
self._callbacks.save_complete.callback = wrap_cb(
SmcSaveCompleteProc, self._on_save_complete
)
self._callbacks.shutdown_cancelled.callback = wrap_cb(
SmcShutdownCancelledProc, self._on_shutdown_cancelled
)
def _on_save_yourself(
self, conn, client_data, save_type, shutdown, interact_style, fast
):
self.emit(
"save-yourself",
SaveType(save_type),
bool(shutdown),
InteractStyle(interact_style),
bool(fast),
)
def _on_die(self, conn, client_data):
self.emit("die")
def _on_save_complete(self, conn, client_data):
self.emit("save-complete")
def _on_shutdown_cancelled(self, conn, client_data):
self.emit("shutdown-cancelled")
@property
def client_id(self):
if self._conn is None:
raise XSMPError("connection closed")
return self._id
def open(self):
if self._conn is not None:
raise XSMPError("connection already open")
self._source = XSMPSource()
self._source.open()
error_string = ctypes.create_string_buffer(250)
id_ = ctypes.c_char_p()
self._conn = SmcOpenConnection(
None,
None,
SmProtoMajor,
SmProtoMinor,
(
SmcDieProcMask
| SmcSaveCompleteProcMask
| SmcSaveYourselfProcMask
| SmcShutdownCancelledProcMask
),
ctypes.byref(self._callbacks),
None,
ctypes.byref(id_),
len(error_string),
error_string,
)
# null ptr still returns an object, but its falsy
if not self._conn:
self._conn = None
if self._conn is None:
self._conn = None
self._source.close()
self._source = None
raise XSMPError("open failed: %r" % error_string.value.decode("utf-8"))
# FIXME: id_ should be freed with free()
self._id = id_.value.decode("utf-8")
def save_yourself_done(self, success):
if self._conn is None:
raise XSMPError("connection closed")
SmcSaveYourselfDone(self._conn, success)
def close(self):
if self._conn is not None:
SmcCloseConnection(self._conn, 0, None)
self._conn = None
if self._source is not None:
self._source.close()
self._source = None
def __del__(self):
self.close()
self._callbacks = None
|
snippet | actions | # This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2008 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
Functions to access the built-in and user defined templates.
"""
from PyQt5.QtWidgets import QAction
from . import snippets
def action(name, parent=None, collection=None):
"""Returns a QAction with text and icon for the given snippet name.
Returns None is no such snippet is available.
If collection is provided, it is used to set shortcuts to the action.
"""
title = snippets.title(name)
if not title:
return
a = QAction(parent)
a.setObjectName(name)
a.setText(title.replace("&", "&&"))
icon = snippets.icon(name)
if icon:
a.setIcon(icon)
if collection:
shortcuts = collection.shortcuts(name)
if shortcuts:
a.setShortcuts(shortcuts)
return a
|
versions | 018_05a0778051ca_adjust_licenses | # encoding: utf-8
"""018 Adjust licenses
Revision ID: 05a0778051ca
Revises: 1250b2ff3e36
Create Date: 2018-09-04 18:48:54.288030
"""
import sqlalchemy as sa
from alembic import op
from ckan.migration import skip_based_on_legacy_engine_version
# revision identifiers, used by Alembic.
revision = "05a0778051ca"
down_revision = "1250b2ff3e36"
branch_labels = None
depends_on = None
tables = ["package", "package_revision"]
def upgrade():
if skip_based_on_legacy_engine_version(op, __name__):
return
for table in tables:
op.drop_column(table, "license_id")
op.add_column(table, sa.Column("license_id", sa.UnicodeText))
op.drop_table("license")
def downgrade():
op.create_table(
"license",
sa.Column("id", sa.Integer, primary_key=True, nullable=False),
sa.Column("name", sa.Unicode(100)),
sa.Column("state", sa.UnicodeText),
)
for table in tables:
op.drop_column(table, "license_id")
op.add_column(
table, sa.Column("license_id", sa.Integer, sa.ForeignKey("license.id"))
)
|
PyObjCTest | test_nslayoutconstraint | from AppKit import *
from PyObjCTools.TestSupport import *
class TestNSLayoutContraintManual(TestCase):
def testNSDictionaryOfVariableBindings(self):
var1 = "foo"
var2 = "bar"
self.assertEqual(
NSDictionaryOfVariableBindings("var1", "var2"),
{"var1": "foo", "var2": "bar"},
)
self.assertRaises(KeyError, NSDictionaryOfVariableBindings, "var1", "var3")
@min_os_level("10.7")
def testConstants10_7(self):
self.assertIsInstance(NSViewNoInstrinsicMetric, float)
self.assertEqual(NSLayoutRelationLessThanOrEqual, -1)
self.assertEqual(NSLayoutRelationEqual, 0)
self.assertEqual(NSLayoutRelationGreaterThanOrEqual, 1)
self.assertEqual(NSLayoutAttributeLeft, 1)
self.assertEqual(NSLayoutAttributeRight, 2)
self.assertEqual(NSLayoutAttributeTop, 3)
self.assertEqual(NSLayoutAttributeBottom, 4)
self.assertEqual(NSLayoutAttributeLeading, 5)
self.assertEqual(NSLayoutAttributeTrailing, 6)
self.assertEqual(NSLayoutAttributeWidth, 7)
self.assertEqual(NSLayoutAttributeHeight, 8)
self.assertEqual(NSLayoutAttributeCenterX, 9)
self.assertEqual(NSLayoutAttributeCenterY, 10)
self.assertEqual(NSLayoutAttributeBaseline, 11)
self.assertEqual(NSLayoutAttributeNotAnAttribute, 0)
self.assertEqual(NSLayoutFormatAlignAllLeft, (1 << NSLayoutAttributeLeft))
self.assertEqual(NSLayoutFormatAlignAllRight, (1 << NSLayoutAttributeRight))
self.assertEqual(NSLayoutFormatAlignAllTop, (1 << NSLayoutAttributeTop))
self.assertEqual(NSLayoutFormatAlignAllBottom, (1 << NSLayoutAttributeBottom))
self.assertEqual(NSLayoutFormatAlignAllLeading, (1 << NSLayoutAttributeLeading))
self.assertEqual(
NSLayoutFormatAlignAllTrailing, (1 << NSLayoutAttributeTrailing)
)
self.assertEqual(NSLayoutFormatAlignAllCenterX, (1 << NSLayoutAttributeCenterX))
self.assertEqual(NSLayoutFormatAlignAllCenterY, (1 << NSLayoutAttributeCenterY))
self.assertEqual(
NSLayoutFormatAlignAllBaseline, (1 << NSLayoutAttributeBaseline)
)
self.assertEqual(NSLayoutFormatAlignmentMask, 0xFFFF)
self.assertEqual(NSLayoutFormatDirectionLeadingToTrailing, 0 << 16)
self.assertEqual(NSLayoutFormatDirectionLeftToRight, 1 << 16)
self.assertEqual(NSLayoutFormatDirectionRightToLeft, 2 << 16)
self.assertEqual(NSLayoutFormatDirectionMask, 0x3 << 16)
self.assertEqual(NSLayoutConstraintOrientationHorizontal, 0)
self.assertEqual(NSLayoutConstraintOrientationVertical, 1)
self.assertEqual(NSLayoutPriorityRequired, 1000)
self.assertEqual(NSLayoutPriorityDefaultHigh, 750)
self.assertEqual(NSLayoutPriorityDragThatCanResizeWindow, 510)
self.assertEqual(NSLayoutPriorityWindowSizeStayPut, 500)
self.assertEqual(NSLayoutPriorityDragThatCannotResizeWindow, 490)
self.assertEqual(NSLayoutPriorityDefaultLow, 250)
self.assertEqual(NSLayoutPriorityFittingSizeCompression, 50)
@min_os_level("10.7")
def testRecords10_7(self):
v = NSEdgeInsets()
self.assertEqual(v.top, 0.0)
self.assertEqual(v.left, 0.0)
self.assertEqual(v.bottom, 0.0)
self.assertEqual(v.right, 0.0)
self.assertEqual(
NSEdgeInsets.__typestr__,
b"{_NSEdgeInsets="
+ objc._C_CGFloat
+ objc._C_CGFloat
+ objc._C_CGFloat
+ objc._C_CGFloat
+ b"}",
)
@min_os_level("10.7")
def testFunctions10_7(self):
v = NSEdgeInsetsMake(1, 2, 3, 4)
self.assertIsInstance(v, NSEdgeInsets)
self.assertEqual(v.top, 1.0)
self.assertEqual(v.left, 2.0)
self.assertEqual(v.bottom, 3.0)
self.assertEqual(v.right, 4.0)
@min_os_level("10.7")
def testMethods10_7(self):
self.assertResultIsBOOL(NSLayoutConstraint.shouldBeArchived)
self.assertArgIsBOOL(NSLayoutConstraint.setShouldBeArchived_, 0)
self.assertResultIsBOOL(NSView.needsUpdateConstraints)
self.assertArgIsBOOL(NSView.setNeedsUpdateConstraints_, 0)
self.assertResultIsBOOL(NSView.needsLayout)
self.assertArgIsBOOL(NSView.setNeedsLayout_, 0)
self.assertResultIsBOOL(NSView.translatesAutoresizingMaskIntoConstraints)
self.assertArgIsBOOL(NSView.setTranslatesAutoresizingMaskIntoConstraints_, 0)
self.assertResultIsBOOL(NSView.requiresConstraintBasedLayout)
self.assertResultIsBOOL(NSView.hasAmbiguousLayout)
if __name__ == "__main__":
main()
|
Gui | FreeCADGuiInit | # ***************************************************************************
# * Copyright (c) 2002,2003 Jürgen Riegel <juergen.riegel@web.de> *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * FreeCAD is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Lesser General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with FreeCAD; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************/
# FreeCAD gui init module
#
# Gathering all the information to start FreeCAD
# This is the second one of three init scripts, the third one
# runs when the gui is up
from enum import IntEnum
# imports the one and only
import FreeCAD
import FreeCADGui
# shortcuts
Gui = FreeCADGui
# this is to keep old code working
Gui.listCommands = Gui.Command.listAll
Gui.isCommandActive = lambda cmd: Gui.Command.get(cmd).isActive()
# The values must match with that of the C++ enum class ResolveMode
class ResolveMode(IntEnum):
NoResolve = 0
OldStyleElement = 1
NewStyleElement = 2
FollowLink = 3
Gui.Selection.ResolveMode = ResolveMode
# The values must match with that of the C++ enum class SelectionStyle
class SelectionStyle(IntEnum):
NormalSelection = 0
GreedySelection = 1
Gui.Selection.SelectionStyle = SelectionStyle
# Important definitions
class Workbench:
"""The workbench base class."""
MenuText = ""
ToolTip = ""
Icon = None
def Initialize(self):
"""Initializes this workbench."""
App.Console.PrintWarning(
str(self) + ": Workbench.Initialize() not implemented in subclass!"
)
def ContextMenu(self, recipient):
pass
def appendToolbar(self, name, cmds):
self.__Workbench__.appendToolbar(name, cmds)
def removeToolbar(self, name):
self.__Workbench__.removeToolbar(name)
def listToolbars(self):
return self.__Workbench__.listToolbars()
def getToolbarItems(self):
return self.__Workbench__.getToolbarItems()
def appendCommandbar(self, name, cmds):
self.__Workbench__.appendCommandbar(name, cmds)
def removeCommandbar(self, name):
self.__Workbench__.removeCommandbar(name)
def listCommandbars(self):
return self.__Workbench__.listCommandbars()
def appendMenu(self, name, cmds):
self.__Workbench__.appendMenu(name, cmds)
def removeMenu(self, name):
self.__Workbench__.removeMenu(name)
def listMenus(self):
return self.__Workbench__.listMenus()
def appendContextMenu(self, name, cmds):
self.__Workbench__.appendContextMenu(name, cmds)
def removeContextMenu(self, name):
self.__Workbench__.removeContextMenu(name)
def reloadActive(self):
self.__Workbench__.reloadActive()
def name(self):
return self.__Workbench__.name()
def GetClassName(self):
"""Return the name of the associated C++ class."""
# as default use this to simplify writing workbenches in Python
return "Gui::PythonWorkbench"
class StandardWorkbench(Workbench):
"""A workbench defines the tool bars, command bars, menus,
context menu and dockable windows of the main window.
"""
def Initialize(self):
"""Initialize this workbench."""
# load the module
Log("Init: Loading FreeCAD GUI\n")
def GetClassName(self):
"""Return the name of the associated C++ class."""
return "Gui::StdWorkbench"
class NoneWorkbench(Workbench):
"""An empty workbench."""
MenuText = "<none>"
ToolTip = "The default empty workbench"
def Initialize(self):
"""Initialize this workbench."""
# load the module
Log("Init: Loading FreeCAD GUI\n")
def GetClassName(self):
"""Return the name of the associated C++ class."""
return "Gui::NoneWorkbench"
def InitApplications():
import io as cStringIO
import os
import sys
import traceback
# Searching modules dirs +++++++++++++++++++++++++++++++++++++++++++++++++++
# (additional module paths are already cached)
ModDirs = FreeCAD.__ModDirs__
# print ModDirs
Log("Init: Searching modules...\n")
def RunInitGuiPy(Dir) -> bool:
InstallFile = os.path.join(Dir, "InitGui.py")
if os.path.exists(InstallFile):
try:
with open(InstallFile, "rt", encoding="utf-8") as f:
exec(compile(f.read(), InstallFile, "exec"))
except Exception as inst:
Log("Init: Initializing " + Dir + "... failed\n")
Log("-" * 100 + "\n")
Log(traceback.format_exc())
Log("-" * 100 + "\n")
Err(
'During initialization the error "'
+ str(inst)
+ '" occurred in '
+ InstallFile
+ "\n"
)
Err("Please look into the log file for further information\n")
else:
Log("Init: Initializing " + Dir + "... done\n")
return True
else:
Log("Init: Initializing " + Dir + "(InitGui.py not found)... ignore\n")
return False
def processMetadataFile(Dir, MetadataFile):
meta = FreeCAD.Metadata(MetadataFile)
if not meta.supportsCurrentFreeCAD():
return None
content = meta.Content
if "workbench" in content:
FreeCAD.Gui.addIconPath(Dir)
workbenches = content["workbench"]
for workbench_metadata in workbenches:
if not workbench_metadata.supportsCurrentFreeCAD():
return None
subdirectory = (
workbench_metadata.Name
if not workbench_metadata.Subdirectory
else workbench_metadata.Subdirectory
)
subdirectory = subdirectory.replace("/", os.path.sep)
subdirectory = os.path.join(Dir, subdirectory)
ran_init = RunInitGuiPy(subdirectory)
if ran_init:
# Try to generate a new icon from the metadata-specified information
classname = workbench_metadata.Classname
if classname:
try:
wb_handle = FreeCAD.Gui.getWorkbench(classname)
except Exception:
Log(
f"Failed to get handle to {classname} -- no icon\
can be generated,\n check classname in package.xml\n"
)
else:
GeneratePackageIcon(
dir, subdirectory, workbench_metadata, wb_handle
)
def tryProcessMetadataFile(Dir, MetadataFile):
try:
processMetadataFile(Dir, MetadataFile)
except Exception as exc:
Err(str(exc))
for Dir in ModDirs:
if (Dir != "") & (Dir != "CVS") & (Dir != "__init__.py"):
stopFile = os.path.join(Dir, "ADDON_DISABLED")
if os.path.exists(stopFile):
Msg(
f'NOTICE: Addon "{Dir}" disabled by presence of ADDON_DISABLED stopfile\n'
)
continue
MetadataFile = os.path.join(Dir, "package.xml")
if os.path.exists(MetadataFile):
tryProcessMetadataFile(Dir, MetadataFile)
else:
RunInitGuiPy(Dir)
Log("All modules with GUIs using InitGui.py are now initialized\n")
try:
import importlib
import pkgutil
import freecad
freecad.gui = FreeCADGui
for _, freecad_module_name, freecad_module_ispkg in pkgutil.iter_modules(
freecad.__path__, "freecad."
):
# Check for a stopfile
stopFile = os.path.join(
FreeCAD.getUserAppDataDir(),
"Mod",
freecad_module_name[8:],
"ADDON_DISABLED",
)
if os.path.exists(stopFile):
continue
# Make sure that package.xml (if present) does not exclude this version of FreeCAD
MetadataFile = os.path.join(
FreeCAD.getUserAppDataDir(),
"Mod",
freecad_module_name[8:],
"package.xml",
)
if os.path.exists(MetadataFile):
meta = FreeCAD.Metadata(MetadataFile)
if not meta.supportsCurrentFreeCAD():
continue
if freecad_module_ispkg:
Log("Init: Initializing " + freecad_module_name + "\n")
try:
freecad_module = importlib.import_module(freecad_module_name)
if any(
module_name == "init_gui"
for _, module_name, ispkg in pkgutil.iter_modules(
freecad_module.__path__
)
):
importlib.import_module(freecad_module_name + ".init_gui")
Log("Init: Initializing " + freecad_module_name + "... done\n")
else:
Log(
"Init: No init_gui module found in "
+ freecad_module_name
+ ", skipping\n"
)
except Exception as inst:
Err(
'During initialization the error "'
+ str(inst)
+ '" occurred in '
+ freecad_module_name
+ "\n"
)
Err("-" * 80 + "\n")
Err(traceback.format_exc())
Err("-" * 80 + "\n")
Log(
"Init: Initializing "
+ freecad_module_name
+ "... failed\n"
)
Log("-" * 80 + "\n")
Log(traceback.format_exc())
Log("-" * 80 + "\n")
except ImportError as inst:
Err('During initialization the error "' + str(inst) + '" occurred\n')
Log("All modules with GUIs initialized using pkgutil are now initialized\n")
def GeneratePackageIcon(
dir: str,
subdirectory: str,
workbench_metadata: FreeCAD.Metadata,
wb_handle: Workbench,
) -> None:
relative_filename = workbench_metadata.Icon
if not relative_filename:
# Although a required element, this content item does not have an icon. Just bail out
return
absolute_filename = os.path.join(subdirectory, relative_filename)
if hasattr(wb_handle, "Icon") and wb_handle.Icon:
Log(
f"Init: Packaged workbench {workbench_metadata.Name} specified icon\
in class {workbench_metadata.Classname}"
)
Log(f" ... replacing with icon from package.xml data.\n")
wb_handle.__dict__["Icon"] = absolute_filename
Log("Init: Running FreeCADGuiInit.py start script...\n")
# init the gui
# signal that the gui is up
App.GuiUp = 1
App.Gui = FreeCADGui
FreeCADGui.Workbench = Workbench
Gui.addWorkbench(NoneWorkbench())
# Monkey patching pivy.coin.SoGroup.removeAllChildren to work around a bug
# https://bitbucket.org/Coin3D/coin/pull-requests/119/fix-sochildlist-auditing/diff
def _SoGroup_init(self, *args):
import types
_SoGroup_init_orig(self, *args)
self.removeAllChildren = types.MethodType(FreeCADGui.coinRemoveAllChildren, self)
try:
from pivy import coin
_SoGroup_init_orig = coin.SoGroup.__init__
coin.SoGroup.__init__ = _SoGroup_init
except Exception:
pass
# init modules
InitApplications()
# set standard workbench (needed as fallback)
Gui.activateWorkbench("NoneWorkbench")
# Register .py, .FCScript and .FCMacro
FreeCAD.addImportType("Inventor V2.1 (*.iv)", "FreeCADGui")
FreeCAD.addImportType("VRML V2.0 (*.wrl *.vrml *.wrz *.wrl.gz)", "FreeCADGui")
FreeCAD.addImportType("Python (*.py *.FCMacro *.FCScript)", "FreeCADGui")
FreeCAD.addExportType("Inventor V2.1 (*.iv)", "FreeCADGui")
FreeCAD.addExportType("VRML V2.0 (*.wrl *.vrml *.wrz *.wrl.gz)", "FreeCADGui")
FreeCAD.addExportType("X3D Extensible 3D (*.x3d *.x3dz)", "FreeCADGui")
FreeCAD.addExportType("WebGL/X3D (*.xhtml)", "FreeCADGui")
# FreeCAD.addExportType("IDTF (for 3D PDF) (*.idtf)","FreeCADGui")
# FreeCAD.addExportType("3D View (*.svg)","FreeCADGui")
FreeCAD.addExportType("Portable Document Format (*.pdf)", "FreeCADGui")
del InitApplications
del NoneWorkbench
del StandardWorkbench
Log("Init: Running FreeCADGuiInit.py start script... done\n")
|
utils | http_utils | # The contents of this file are subject to the Common Public Attribution
# License Version 1.0. (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://code.reddit.com/LICENSE. The License is based on the Mozilla Public
# License Version 1.1, but Sections 14 and 15 have been added to cover use of
# software over a computer network and provide for limited attribution for the
# Original Developer. In addition, Exhibit A has been modified to be consistent
# with Exhibit B.
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for
# the specific language governing rights and limitations under the License.
#
# The Original Code is reddit.
#
# The Original Developer is the Initial Developer. The Initial Developer of
# the Original Code is reddit Inc.
#
# All portions of the code written by reddit are Copyright (c) 2006-2015 reddit
# Inc. All Rights Reserved.
###############################################################################
from datetime import datetime
import pytz
DATE_RFC822 = "%a, %d %b %Y %H:%M:%S %Z"
DATE_RFC850 = "%A, %d-%b-%y %H:%M:%S %Z"
DATE_ANSI = "%a %b %d %H:%M:%S %Y"
def read_http_date(date_str):
try:
date = datetime.strptime(date_str, DATE_RFC822)
except ValueError:
try:
date = datetime.strptime(date_str, DATE_RFC850)
except ValueError:
try:
date = datetime.strptime(date_str, DATE_ANSI)
except ValueError:
return None
date = date.replace(tzinfo=pytz.timezone("GMT"))
return date
def http_date_str(date):
date = date.astimezone(pytz.timezone("GMT"))
return date.strftime(DATE_RFC822)
def get_requests_resp_json(resp):
"""Kludge so we can use `requests` versions below or above 1.x"""
if callable(resp.json):
return resp.json()
return resp.json
|
plugins | htv | """
$description Vietnamese live TV channels owned by the People's Committee of Ho Chi Minh City.
$url htv.com.vn
$type live
$region Vietnam
"""
import logging
import re
from streamlink.plugin import Plugin, pluginmatcher
from streamlink.plugin.api import validate
from streamlink.stream.hls import HLSStream
from streamlink.utils.times import localnow
log = logging.getLogger(__name__)
@pluginmatcher(
re.compile(
r"https?://(?:www\.)?htv\.com\.vn/truc-tuyen(?:\?channel=(?P<channel>\w+)&?|$)",
)
)
class HTV(Plugin):
def get_channels(self):
data = self.session.http.get(
self.url,
schema=validate.Schema(
validate.parse_html(),
validate.xml_xpath(".//*[contains(@class,'channel-list')]//a[@data-id][@data-code]"),
[
validate.union_get("data-id", "data-code"),
],
),
)
return dict(data)
def _get_streams(self):
channels = self.get_channels()
if not channels:
log.error("No channels found")
return
log.debug(f"channels={channels}")
channel_id = self.match.group("channel")
if channel_id is None:
channel_id, channel_code = next(iter(channels.items()))
elif channel_id in channels:
channel_code = channels[channel_id]
else:
log.error(f"Unknown channel ID: {channel_id}")
return
log.info(f"Channel: {channel_code}")
json = self.session.http.post(
"https://www.htv.com.vn/HTVModule/Services/htvService.aspx",
data={
"method": "GetScheduleList",
"channelid": channel_id,
"template": "AjaxSchedules.xslt",
"channelcode": channel_code,
"date": localnow().strftime("%d-%m-%Y"),
},
schema=validate.Schema(
validate.parse_json(),
{
"success": bool,
"chanelUrl": validate.url(),
},
),
)
if not json["success"]:
log.error("API error: success not true")
return
hls_url = self.session.http.get(
json["chanelUrl"],
headers={"Referer": self.url},
schema=validate.Schema(
validate.parse_html(),
validate.xml_xpath_string(".//script[contains(text(), 'playlist.m3u8')]/text()"),
validate.none_or_all(
re.compile(r"""var\s+iosUrl\s*=\s*(?P<q>")(?P<url>.+?)(?P=q)"""),
validate.none_or_all(
validate.get("url"),
validate.url(),
),
),
),
)
if hls_url:
return HLSStream.parse_variant_playlist(
self.session,
hls_url,
headers={"Referer": "https://hplus.com.vn/"},
)
__plugin__ = HTV
|
utils | fields | # -*- coding: utf-8 -*-
"""
flaskbb.utils.fields
~~~~~~~~~~~~~~~~~~~~
Additional fields and widgets for wtforms.
The reCAPTCHA Field was taken from Flask-WTF and modified
to use our own settings system.
:copyright: (c) 2014 by the FlaskBB Team.
:license: BSD, see LICENSE for more details.
"""
import logging
import urllib
from flask import current_app, json, request
from flaskbb.utils.helpers import to_bytes, to_unicode
from flaskbb.utils.settings import flaskbb_config
from markupsafe import Markup
from werkzeug.urls import url_encode
from wtforms import ValidationError
from wtforms.fields import Field
logger = logging.getLogger(__name__)
JSONEncoder = json.JSONEncoder
RECAPTCHA_SCRIPT = "https://www.google.com/recaptcha/api.js"
RECAPTCHA_TEMPLATE = """
<script src='%s' async defer></script>
<div class="g-recaptcha" %s></div>
"""
RECAPTCHA_VERIFY_SERVER = "https://www.google.com/recaptcha/api/siteverify"
RECAPTCHA_ERROR_CODES = {
"missing-input-secret": "The secret parameter is missing.",
"invalid-input-secret": "The secret parameter is invalid or malformed.",
"missing-input-response": "The response parameter is missing.",
"invalid-input-response": "The response parameter is invalid or malformed.",
}
class RecaptchaValidator(object):
"""Validates a ReCaptcha."""
def __init__(self, message=None):
if message is None:
message = RECAPTCHA_ERROR_CODES["missing-input-response"]
self.message = message
def __call__(self, form, field):
if current_app.testing or not flaskbb_config["RECAPTCHA_ENABLED"]:
return True
if request.json:
response = request.json.get("g-recaptcha-response", "")
else:
response = request.form.get("g-recaptcha-response", "")
remote_ip = request.remote_addr
if not response:
raise ValidationError(field.gettext(self.message))
if not self._validate_recaptcha(response, remote_ip):
field.recaptcha_error = "incorrect-captcha-sol"
raise ValidationError(field.gettext(self.message))
def _validate_recaptcha(self, response, remote_addr):
"""Performs the actual validation."""
try:
private_key = flaskbb_config["RECAPTCHA_PRIVATE_KEY"]
except KeyError:
raise RuntimeError("No RECAPTCHA_PRIVATE_KEY config set")
data = url_encode(
{"secret": private_key, "remoteip": remote_addr, "response": response}
)
http_response = urllib.request.urlopen(RECAPTCHA_VERIFY_SERVER, to_bytes(data))
if http_response.code != 200:
return False
json_resp = json.loads(to_unicode(http_response.read()))
if json_resp["success"]:
return True
for error in json_resp.get("error-codes", []):
if error in RECAPTCHA_ERROR_CODES:
raise ValidationError(RECAPTCHA_ERROR_CODES[error])
return False
class RecaptchaWidget(object):
def recaptcha_html(self, public_key):
html = current_app.config.get("RECAPTCHA_HTML")
if html:
return Markup(html)
params = current_app.config.get("RECAPTCHA_PARAMETERS")
script = RECAPTCHA_SCRIPT
if params:
script += "?" + url_encode(params)
attrs = current_app.config.get("RECAPTCHA_DATA_ATTRS", {})
attrs["sitekey"] = public_key
snippet = " ".join(['data-%s="%s"' % (k, attrs[k]) for k in attrs])
return Markup(RECAPTCHA_TEMPLATE % (script, snippet))
def __call__(self, field, error=None, **kwargs):
"""Returns the recaptcha input HTML."""
if not flaskbb_config["RECAPTCHA_ENABLED"]:
return
try:
public_key = flaskbb_config["RECAPTCHA_PUBLIC_KEY"]
except KeyError:
raise RuntimeError("RECAPTCHA_PUBLIC_KEY config not set")
return self.recaptcha_html(public_key)
class RecaptchaField(Field):
widget = RecaptchaWidget()
# error message if recaptcha validation fails
recaptcha_error = None
def __init__(self, label="", validators=None, **kwargs):
validators = validators or [RecaptchaValidator()]
super(RecaptchaField, self).__init__(label, validators, **kwargs)
|
hogql | constants | from datetime import date, datetime
from typing import List, Literal, Optional, Tuple, TypeAlias
from uuid import UUID
from pydantic import BaseModel, ConfigDict
ConstantDataType: TypeAlias = Literal[
"int",
"float",
"str",
"bool",
"array",
"tuple",
"date",
"datetime",
"uuid",
"unknown",
]
ConstantSupportedPrimitive: TypeAlias = (
int | float | str | bool | date | datetime | UUID | None
)
ConstantSupportedData: TypeAlias = (
ConstantSupportedPrimitive
| List[ConstantSupportedPrimitive]
| Tuple[ConstantSupportedPrimitive, ...]
)
# Keywords passed to ClickHouse without transformation
KEYWORDS = ["true", "false", "null"]
# Keywords you can't alias to
RESERVED_KEYWORDS = KEYWORDS + ["team_id"]
# Limit applied to SELECT statements without LIMIT clause when queried via the API
DEFAULT_RETURNED_ROWS = 100
# Max limit for all SELECT queries, and the default for CSV exports.
MAX_SELECT_RETURNED_ROWS = 10000
# Settings applied on top of all HogQL queries.
class HogQLSettings(BaseModel):
model_config = ConfigDict(extra="forbid")
readonly: Optional[int] = 2
max_execution_time: Optional[int] = 60
allow_experimental_object_type: Optional[bool] = True
|
comicapi | comicinfoxml | """A class to encapsulate ComicRack's ComicInfo.xml data"""
# Copyright 2012-2014 Anthony Beville
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import xml.etree.ElementTree as ET
import utils
from genericmetadata import GenericMetadata
# from datetime import datetime
# from pprint import pprint
# import zipfile
class ComicInfoXml:
writer_synonyms = ["writer", "plotter", "scripter"]
penciller_synonyms = ["artist", "penciller", "penciler", "breakdowns"]
inker_synonyms = ["inker", "artist", "finishes"]
colorist_synonyms = ["colorist", "colourist", "colorer", "colourer"]
letterer_synonyms = ["letterer"]
cover_synonyms = ["cover", "covers", "coverartist", "cover artist"]
editor_synonyms = ["editor"]
def getParseableCredits(self):
parsable_credits = []
parsable_credits.extend(self.writer_synonyms)
parsable_credits.extend(self.penciller_synonyms)
parsable_credits.extend(self.inker_synonyms)
parsable_credits.extend(self.colorist_synonyms)
parsable_credits.extend(self.letterer_synonyms)
parsable_credits.extend(self.cover_synonyms)
parsable_credits.extend(self.editor_synonyms)
return parsable_credits
def metadataFromString(self, string):
tree = ET.ElementTree(ET.fromstring(string))
return self.convertXMLToMetadata(tree)
def stringFromMetadata(self, metadata):
header = '<?xml version="1.0"?>\n'
tree = self.convertMetadataToXML(self, metadata)
return header + ET.tostring(tree.getroot())
def indent(self, elem, level=0):
# for making the XML output readable
i = "\n" + level * " "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
self.indent(elem, level + 1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def convertMetadataToXML(self, filename, metadata):
# shorthand for the metadata
md = metadata
# build a tree structure
root = ET.Element("ComicInfo")
root.attrib["xmlns:xsi"] = "http://www.w3.org/2001/XMLSchema-instance"
root.attrib["xmlns:xsd"] = "http://www.w3.org/2001/XMLSchema"
# helper func
def assign(cix_entry, md_entry):
if md_entry is not None:
ET.SubElement(root, cix_entry).text = "{0}".format(md_entry)
assign("Title", md.title)
assign("Series", md.series)
assign("Number", md.issue)
assign("Count", md.issueCount)
assign("Volume", md.volume)
assign("AlternateSeries", md.alternateSeries)
assign("AlternateNumber", md.alternateNumber)
assign("StoryArc", md.storyArc)
assign("SeriesGroup", md.seriesGroup)
assign("AlternateCount", md.alternateCount)
assign("Summary", md.comments)
assign("Notes", md.notes)
assign("Year", md.year)
assign("Month", md.month)
assign("Day", md.day)
# need to specially process the credits, since they are structured
# differently than CIX
credit_writer_list = list()
credit_penciller_list = list()
credit_inker_list = list()
credit_colorist_list = list()
credit_letterer_list = list()
credit_cover_list = list()
credit_editor_list = list()
# first, loop thru credits, and build a list for each role that CIX
# supports
for credit in metadata.credits:
if credit["role"].lower() in set(self.writer_synonyms):
credit_writer_list.append(credit["person"].replace(",", ""))
if credit["role"].lower() in set(self.penciller_synonyms):
credit_penciller_list.append(credit["person"].replace(",", ""))
if credit["role"].lower() in set(self.inker_synonyms):
credit_inker_list.append(credit["person"].replace(",", ""))
if credit["role"].lower() in set(self.colorist_synonyms):
credit_colorist_list.append(credit["person"].replace(",", ""))
if credit["role"].lower() in set(self.letterer_synonyms):
credit_letterer_list.append(credit["person"].replace(",", ""))
if credit["role"].lower() in set(self.cover_synonyms):
credit_cover_list.append(credit["person"].replace(",", ""))
if credit["role"].lower() in set(self.editor_synonyms):
credit_editor_list.append(credit["person"].replace(",", ""))
# second, convert each list to string, and add to XML struct
if len(credit_writer_list) > 0:
node = ET.SubElement(root, "Writer")
node.text = utils.listToString(credit_writer_list)
if len(credit_penciller_list) > 0:
node = ET.SubElement(root, "Penciller")
node.text = utils.listToString(credit_penciller_list)
if len(credit_inker_list) > 0:
node = ET.SubElement(root, "Inker")
node.text = utils.listToString(credit_inker_list)
if len(credit_colorist_list) > 0:
node = ET.SubElement(root, "Colorist")
node.text = utils.listToString(credit_colorist_list)
if len(credit_letterer_list) > 0:
node = ET.SubElement(root, "Letterer")
node.text = utils.listToString(credit_letterer_list)
if len(credit_cover_list) > 0:
node = ET.SubElement(root, "CoverArtist")
node.text = utils.listToString(credit_cover_list)
if len(credit_editor_list) > 0:
node = ET.SubElement(root, "Editor")
node.text = utils.listToString(credit_editor_list)
assign("Publisher", md.publisher)
assign("Imprint", md.imprint)
assign("Genre", md.genre)
assign("Web", md.webLink)
assign("PageCount", md.pageCount)
assign("LanguageISO", md.language)
assign("Format", md.format)
assign("AgeRating", md.maturityRating)
if md.blackAndWhite is not None and md.blackAndWhite:
ET.SubElement(root, "BlackAndWhite").text = "Yes"
assign("Manga", md.manga)
assign("Characters", md.characters)
assign("Teams", md.teams)
assign("Locations", md.locations)
assign("ScanInformation", md.scanInfo)
# loop and add the page entries under pages node
if len(md.pages) > 0:
pages_node = ET.SubElement(root, "Pages")
for page_dict in md.pages:
page_node = ET.SubElement(pages_node, "Page")
page_node.attrib = page_dict
# self pretty-print
self.indent(root)
# wrap it in an ElementTree instance, and save as XML
tree = ET.ElementTree(root)
return tree
def convertXMLToMetadata(self, tree):
root = tree.getroot()
if root.tag != "ComicInfo":
raise 1
return None
metadata = GenericMetadata()
md = metadata
# Helper function
def xlate(tag):
node = root.find(tag)
if node is not None:
return node.text
else:
return None
md.series = xlate("Series")
md.title = xlate("Title")
md.issue = xlate("Number")
md.issueCount = xlate("Count")
md.volume = xlate("Volume")
md.alternateSeries = xlate("AlternateSeries")
md.alternateNumber = xlate("AlternateNumber")
md.alternateCount = xlate("AlternateCount")
md.comments = xlate("Summary")
md.notes = xlate("Notes")
md.year = xlate("Year")
md.month = xlate("Month")
md.day = xlate("Day")
md.publisher = xlate("Publisher")
md.imprint = xlate("Imprint")
md.genre = xlate("Genre")
md.webLink = xlate("Web")
md.language = xlate("LanguageISO")
md.format = xlate("Format")
md.manga = xlate("Manga")
md.characters = xlate("Characters")
md.teams = xlate("Teams")
md.locations = xlate("Locations")
md.pageCount = xlate("PageCount")
md.scanInfo = xlate("ScanInformation")
md.storyArc = xlate("StoryArc")
md.seriesGroup = xlate("SeriesGroup")
md.maturityRating = xlate("AgeRating")
tmp = xlate("BlackAndWhite")
md.blackAndWhite = False
if tmp is not None and tmp.lower() in ["yes", "true", "1"]:
md.blackAndWhite = True
# Now extract the credit info
for n in root:
if (
n.tag == "Writer"
or n.tag == "Penciller"
or n.tag == "Inker"
or n.tag == "Colorist"
or n.tag == "Letterer"
or n.tag == "Editor"
):
if n.text is not None:
for name in n.text.split(","):
metadata.addCredit(name.strip(), n.tag)
if n.tag == "CoverArtist":
if n.text is not None:
for name in n.text.split(","):
metadata.addCredit(name.strip(), "Cover")
# parse page data now
pages_node = root.find("Pages")
if pages_node is not None:
for page in pages_node:
metadata.pages.append(page.attrib)
# print page.attrib
metadata.isEmpty = False
return metadata
def writeToExternalFile(self, filename, metadata):
tree = self.convertMetadataToXML(self, metadata)
# ET.dump(tree)
tree.write(filename, encoding="utf-8")
def readFromExternalFile(self, filename):
tree = ET.parse(filename)
return self.convertXMLToMetadata(tree)
|
dialogs | filldlg | # -*- coding: utf-8 -*-
#
# Copyright (C) 2015 by Ihor E. Novikov
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import wal
from sk1 import _, config
from sk1.pwidgets import GradientFill, PatternFill, SolidFill
from uc2 import sk2const
class FillDialog(wal.OkCancelDialog):
presenter = None
nb = None
tabs = None
orig_fill = []
start = True
def __init__(self, parent, title, presenter, fill_style):
self.presenter = presenter
self.app = presenter.app
self.cms = presenter.cms
self.orig_fill = fill_style
size = config.fill_dlg_size
wal.OkCancelDialog.__init__(
self,
parent,
title,
style=wal.VERTICAL,
resizable=True,
size=size,
add_line=False,
action_button=wal.BUTTON_APPLY,
)
self.set_minsize(config.fill_dlg_minsize)
def build(self):
self.nb = wal.Notebook(self, on_change=self.on_change)
self.tabs = [
SolidFill(self.nb, self, self.cms),
GradientFill(self.nb, self, self.cms),
PatternFill(self.nb, self, self.cms),
]
for item in self.tabs:
self.nb.add_page(item, item.name)
if wal.IS_GTK3:
item.activate(self.orig_fill)
self.pack(self.nb, fill=True, expand=True)
index = 0
if self.orig_fill:
if self.orig_fill[1] == sk2const.FILL_GRADIENT:
index = 1
elif self.orig_fill[1] == sk2const.FILL_PATTERN:
index = 2
if not wal.IS_GTK3:
self.tabs[index].activate(self.orig_fill)
self.nb.set_active_index(index)
self.start = False
def on_change(self, index):
new_color = None
if self.tabs[0].active_panel and not self.start:
new_color = self.tabs[0].active_panel.get_color()
if index in (1, 2) and new_color:
self.nb.get_active_page().activate(self.orig_fill, new_color)
else:
self.nb.get_active_page().activate(self.orig_fill)
def get_result(self):
return self.nb.get_active_page().get_result()
def show(self):
ret = None
if self.show_modal() == wal.BUTTON_OK:
ret = self.get_result()
w, h = self.get_size()
if wal.is_unity_16_04():
h = max(h - 28, config.fill_dlg_minsize[1])
config.fill_dlg_size = (w, h)
self.destroy()
return ret
def fill_dlg(parent, presenter, fill_style, title=_("Fill")):
return FillDialog(parent, title, presenter, fill_style).show()
|
updaters | pip | __author__ = "Gina Häußge <osd@foosel.net>"
__license__ = "GNU Affero General Public License http://www.gnu.org/licenses/agpl.html"
__copyright__ = "Copyright (C) 2014 The OctoPrint Project - Released under terms of the AGPLv3 License"
import collections
import logging
import threading
from octoprint.util.pip import (
UnknownPip,
create_pip_caller,
is_already_installed,
is_egg_problem,
)
from octoprint.util.version import get_comparable_version
from .. import exceptions
logger = logging.getLogger("octoprint.plugins.softwareupdate.updaters.pip")
console_logger = logging.getLogger(
"octoprint.plugins.softwareupdate.updaters.pip.console"
)
_pip_callers = {}
_pip_caller_mutex = collections.defaultdict(threading.RLock)
def can_perform_update(target, check, online=True):
from .. import MINIMUM_PIP
pip_caller = _get_pip_caller(
command=check["pip_command"] if "pip_command" in check else None
)
return (
"pip" in check
and pip_caller is not None
and pip_caller.available
and pip_caller.version >= get_comparable_version(MINIMUM_PIP)
and (online or check.get("offline", False))
)
def _get_pip_caller(command=None):
global _pip_callers
global _pip_caller_mutex
key = command
if command is None:
key = "__default"
with _pip_caller_mutex[key]:
if key not in _pip_callers:
try:
_pip_callers[key] = create_pip_caller(command=command)
except UnknownPip:
pass
return _pip_callers.get(key)
def perform_update(
target, check, target_version, log_cb=None, online=True, force=False
):
pip_command = check.get("pip_command")
pip_working_directory = check.get("pip_cwd")
if not online and not check.get("offline", False):
raise exceptions.CannotUpdateOffline()
force = force or check.get("force_reinstall", False)
pip_caller = _get_pip_caller(command=pip_command)
if pip_caller is None:
raise exceptions.UpdateError("Can't run pip", None)
def _log_call(*lines):
_log(lines, prefix=" ", stream="call")
def _log_stdout(*lines):
_log(lines, prefix=">", stream="stdout")
def _log_stderr(*lines):
_log(lines, prefix="!", stream="stderr")
def _log_message(*lines):
_log(lines, prefix="#", stream="message")
def _log(lines, prefix=None, stream=None):
if log_cb is None:
return
log_cb(lines, prefix=prefix, stream=stream)
if log_cb is not None:
pip_caller.on_log_call = _log_call
pip_caller.on_log_stdout = _log_stdout
pip_caller.on_log_stderr = _log_stderr
install_arg = check["pip"].format(
target_version=target_version, target=target_version
)
logger.debug(f"Target: {target}, executing pip install {install_arg}")
pip_args = ["--disable-pip-version-check", "install", install_arg, "--no-cache-dir"]
pip_kwargs = {
"env": {"PYTHONWARNINGS": "ignore:DEPRECATION::pip._internal.cli.base_command"}
}
if pip_working_directory is not None:
pip_kwargs.update(cwd=pip_working_directory)
if "dependency_links" in check and check["dependency_links"]:
pip_args += ["--process-dependency-links"]
returncode, stdout, stderr = pip_caller.execute(*pip_args, **pip_kwargs)
if returncode != 0:
if is_egg_problem(stdout) or is_egg_problem(stderr):
_log_message(
'This looks like an error caused by a specific issue in upgrading Python "eggs"',
"via current versions of pip.",
"Performing a second install attempt as a work around.",
)
returncode, stdout, stderr = pip_caller.execute(*pip_args, **pip_kwargs)
if returncode != 0:
raise exceptions.UpdateError(
"Error while executing pip install", (stdout, stderr)
)
else:
raise exceptions.UpdateError(
"Error while executing pip install", (stdout, stderr)
)
if not force and is_already_installed(stdout):
_log_message(
"Looks like we were already installed in this version. Forcing a reinstall."
)
force = True
if force:
logger.debug(
"Target: %s, executing pip install %s --ignore-reinstalled --force-reinstall --no-deps"
% (target, install_arg)
)
pip_args += ["--ignore-installed", "--force-reinstall", "--no-deps"]
returncode, stdout, stderr = pip_caller.execute(*pip_args, **pip_kwargs)
if returncode != 0:
raise exceptions.UpdateError(
"Error while executing pip install --force-reinstall", (stdout, stderr)
)
return "ok"
|
gst | sink_windows | # Copyright (C) 2013-2015 Dustin Spicuzza
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
# The developers of the Exaile media player hereby grant permission
# for non-GPL compatible GStreamer and Exaile plugins to be used and
# distributed together with GStreamer and Exaile. This permission is
# above and beyond the permissions granted by the GPL license by which
# Exaile is covered. If you modify this code, you may extend this
# exception to your version of the code, but you are not obligated to
# do so. If you do not wish to do so, delete this exception statement
# from your version.
import ctypes
import ctypes.wintypes as cwin
import logging
from gi.repository import Gst
logger = logging.getLogger(__name__)
def get_priority_booster():
"""
This hack allows us to boost the priority of GStreamer task threads on
Windows. See https://github.com/exaile/exaile/issues/76 and
https://bugzilla.gnome.org/show_bug.cgi?id=781998
"""
avrt_dll = ctypes.windll.LoadLibrary("avrt.dll")
AvSetMmThreadCharacteristics = avrt_dll.AvSetMmThreadCharacteristicsW
AvSetMmThreadCharacteristics.argtypes = [cwin.LPCWSTR, ctypes.POINTER(cwin.DWORD)]
AvSetMmThreadCharacteristics.restype = cwin.HANDLE
AvRevertMmThreadCharacteristics = avrt_dll.AvRevertMmThreadCharacteristics
AvRevertMmThreadCharacteristics.argtypes = [cwin.HANDLE]
AvRevertMmThreadCharacteristics.restype = cwin.BOOL
def on_stream_status(bus, message):
"""
Called synchronously from GStreamer processing threads -- do what
we need to do and then get out ASAP
"""
status = message.parse_stream_status()
# A gstreamer thread starts
if status.type == Gst.StreamStatusType.ENTER:
obj = message.get_stream_status_object()
# note that we use "Pro Audio" because it gives a higher priority, and
# that's what Chrome does anyways...
unused = cwin.DWORD()
obj.task_handle = AvSetMmThreadCharacteristics(
"Pro Audio", ctypes.byref(unused)
)
# A gstreamer thread ends
elif status.type == Gst.StreamStatusType.LEAVE:
obj = message.get_stream_status_object()
task_handle = getattr(obj, "task_handle", None)
if task_handle:
AvRevertMmThreadCharacteristics(task_handle)
def attach_priority_hook(player):
bus = player.get_bus()
bus.connect("sync-message::stream-status", on_stream_status)
bus.enable_sync_message_emission()
return attach_priority_hook
|
src | helper_sent | """
Insert values into sent table
"""
import time
import uuid
from addresses import decodeAddress
from bmconfigparser import config
from helper_ackPayload import genAckPayload
from helper_sql import sqlExecute, sqlQuery
# pylint: disable=too-many-arguments
def insert(
msgid=None,
toAddress="[Broadcast subscribers]",
fromAddress=None,
subject=None,
message=None,
status="msgqueued",
ripe=None,
ackdata=None,
sentTime=None,
lastActionTime=None,
sleeptill=0,
retryNumber=0,
encoding=2,
ttl=None,
folder="sent",
):
"""Perform an insert into the `sent` table"""
# pylint: disable=unused-variable
# pylint: disable-msg=too-many-locals
valid_addr = True
if not ripe or not ackdata:
addr = fromAddress if toAddress == "[Broadcast subscribers]" else toAddress
new_status, addressVersionNumber, streamNumber, new_ripe = decodeAddress(addr)
valid_addr = True if new_status == "success" else False
if not ripe:
ripe = new_ripe
if not ackdata:
stealthLevel = config.safeGetInt("bitmessagesettings", "ackstealthlevel")
new_ackdata = genAckPayload(streamNumber, stealthLevel)
ackdata = new_ackdata
if valid_addr:
msgid = msgid if msgid else uuid.uuid4().bytes
sentTime = (
sentTime if sentTime else int(time.time())
) # sentTime (this doesn't change)
lastActionTime = lastActionTime if lastActionTime else int(time.time())
ttl = ttl if ttl else config.getint("bitmessagesettings", "ttl")
t = (
msgid,
toAddress,
ripe,
fromAddress,
subject,
message,
ackdata,
sentTime,
lastActionTime,
sleeptill,
status,
retryNumber,
folder,
encoding,
ttl,
)
sqlExecute("""INSERT INTO sent VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)""", *t)
return ackdata
else:
return None
def delete(ack_data):
"""Perform Delete query"""
sqlExecute("DELETE FROM sent WHERE ackdata = ?", ack_data)
def retrieve_message_details(ack_data):
"""Retrieving Message details"""
data = sqlQuery(
"select toaddress, fromaddress, subject, message, received from inbox where msgid = ?",
ack_data,
)
return data
def trash(ackdata):
"""Mark a message in the `sent` as `trash`"""
rowcount = sqlExecute("""UPDATE sent SET folder='trash' WHERE ackdata=?""", ackdata)
return rowcount
|
writer8 | header | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import absolute_import, division, print_function, unicode_literals
__license__ = "GPL v3"
__copyright__ = "2012, Kovid Goyal <kovid@kovidgoyal.net>"
__docformat__ = "restructuredtext en"
import random
from collections import OrderedDict
from io import BytesIO
from struct import pack
from calibre.ebooks.mobi.utils import align_block
NULL = 0xFFFFFFFF
zeroes = lambda x: b"\0" * x
nulls = lambda x: b"\xff" * x
short = lambda x: pack(b">H", x)
class Header(OrderedDict):
HEADER_NAME = b""
DEFINITION = """
"""
ALIGN_BLOCK = False
POSITIONS = {} # Mapping of position field to field whose position should
# be stored in the position field
SHORT_FIELDS = set()
def __init__(self):
OrderedDict.__init__(self)
for line in self.DEFINITION.splitlines():
line = line.strip()
if not line or line.startswith("#"):
continue
name, val = [x.strip() for x in line.partition("=")[0::2]]
if val:
val = eval(
val,
{
"zeroes": zeroes,
"NULL": NULL,
"DYN": None,
"nulls": nulls,
"short": short,
"random": random,
},
)
else:
val = 0
if name in self:
raise ValueError("Duplicate field in definition: %r" % name)
self[name] = val
@property
def dynamic_fields(self):
return tuple(k for k, v in self.iteritems() if v is None)
def __call__(self, **kwargs):
positions = {}
for name, val in kwargs.iteritems():
if name not in self:
raise KeyError("Not a valid header field: %r" % name)
self[name] = val
buf = BytesIO()
buf.write(bytes(self.HEADER_NAME))
for name, val in self.iteritems():
val = self.format_value(name, val)
positions[name] = buf.tell()
if val is None:
raise ValueError("Dynamic field %r not set" % name)
if isinstance(val, (int, long)):
fmt = b"H" if name in self.SHORT_FIELDS else b"I"
val = pack(b">" + fmt, val)
buf.write(val)
for pos_field, field in self.POSITIONS.iteritems():
buf.seek(positions[pos_field])
buf.write(pack(b">I", positions[field]))
ans = buf.getvalue()
if self.ALIGN_BLOCK:
ans = align_block(ans)
return ans
def format_value(self, name, val):
return val
|
preferences | general | # This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2008 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
Keyboard shortcuts settings page.
"""
import app
import appinfo
import i18n
import icons
import language_names
import preferences
import remote
import sessions
import util
from PyQt5.QtCore import QSettings
from PyQt5.QtWidgets import (
QApplication,
QCheckBox,
QComboBox,
QGridLayout,
QHBoxLayout,
QLabel,
QLineEdit,
QRadioButton,
QStyleFactory,
QTabWidget,
QVBoxLayout,
QWidget,
)
from widgets.urlrequester import UrlRequester
class GeneralPrefs(preferences.ScrolledGroupsPage):
def __init__(self, dialog):
super().__init__(dialog)
layout = QVBoxLayout()
self.scrolledWidget.setLayout(layout)
layout.addWidget(General(self))
layout.addWidget(SessionsAndFiles(self))
layout.addWidget(ExperimentalFeatures(self))
class General(preferences.Group):
def __init__(self, page):
super().__init__(page)
grid = QGridLayout()
self.setLayout(grid)
self.langLabel = QLabel()
self.lang = QComboBox(currentIndexChanged=self.changed)
grid.addWidget(self.langLabel, 0, 0)
grid.addWidget(self.lang, 0, 1)
self.styleLabel = QLabel()
self.styleCombo = QComboBox(currentIndexChanged=self.changed)
grid.addWidget(self.styleLabel, 1, 0)
grid.addWidget(self.styleCombo, 1, 1)
self.systemIcons = QCheckBox(toggled=self.changed)
grid.addWidget(self.systemIcons, 2, 0, 1, 3)
self.tabsClosable = QCheckBox(toggled=self.changed)
grid.addWidget(self.tabsClosable, 3, 0, 1, 3)
self.splashScreen = QCheckBox(toggled=self.changed)
grid.addWidget(self.splashScreen, 4, 0, 1, 3)
self.allowRemote = QCheckBox(toggled=self.changed)
grid.addWidget(self.allowRemote, 5, 0, 1, 3)
grid.setColumnStretch(2, 1)
# fill in the language combo
self._langs = ["C", ""]
self.lang.addItems(("", ""))
langnames = [
(language_names.languageName(lang, lang), lang) for lang in i18n.available()
]
langnames.sort()
for name, lang in langnames:
self._langs.append(lang)
self.lang.addItem(name)
# fill in style combo
self.styleCombo.addItem("")
self.styleCombo.addItems(QStyleFactory.keys())
app.translateUI(self)
def loadSettings(self):
s = QSettings()
lang = s.value("language", "", str)
try:
index = self._langs.index(lang)
except ValueError:
index = 1
self.lang.setCurrentIndex(index)
style = s.value("guistyle", "", str).lower()
styles = [name.lower() for name in QStyleFactory.keys()]
try:
index = styles.index(style) + 1
except ValueError:
index = 0
self.styleCombo.setCurrentIndex(index)
self.systemIcons.setChecked(s.value("system_icons", True, bool))
self.tabsClosable.setChecked(s.value("tabs_closable", True, bool))
self.splashScreen.setChecked(s.value("splash_screen", True, bool))
self.allowRemote.setChecked(remote.enabled())
def saveSettings(self):
s = QSettings()
s.setValue("language", self._langs[self.lang.currentIndex()])
s.setValue("system_icons", self.systemIcons.isChecked())
s.setValue("tabs_closable", self.tabsClosable.isChecked())
s.setValue("splash_screen", self.splashScreen.isChecked())
s.setValue("allow_remote", self.allowRemote.isChecked())
if self.styleCombo.currentIndex() == 0:
s.remove("guistyle")
else:
s.setValue("guistyle", self.styleCombo.currentText())
# update all top-level windows, so icon changes are picked up
for w in QApplication.topLevelWidgets():
if w.isVisible():
w.update()
def translateUI(self):
self.setTitle(_("General Preferences"))
self.langLabel.setText(_("Language:"))
self.lang.setItemText(0, _("No Translation"))
self.lang.setItemText(1, _("System Default Language (if available)"))
self.styleLabel.setText(_("Style:"))
self.styleCombo.setItemText(0, _("Default"))
self.systemIcons.setText(_("Use System Icons"))
self.systemIcons.setToolTip(
_(
"If checked, icons of the desktop icon theme "
"will be used instead of the bundled icons."
)
)
self.splashScreen.setText(_("Show Splash Screen on Startup"))
self.tabsClosable.setText(_("Show Close Button on Document tabs"))
self.allowRemote.setText(_("Open Files in Running Instance"))
self.allowRemote.setToolTip(
_(
"If checked, files will be opened in a running Frescobaldi "
"application if available, instead of starting a new instance."
)
)
class SessionsAndFiles(preferences.Group):
def __init__(self, page):
super().__init__(page)
layout = QVBoxLayout()
self.setLayout(layout)
def changed():
self.changed.emit()
self.new_combo.setEnabled(self.template.isChecked())
self.session_combo.setEnabled(self.session_custom.isChecked())
def customchanged():
self.changed.emit()
self.filenameTemplate.setEnabled(self.customFilename.isChecked())
self.verbose_toolbuttons = QCheckBox(toggled=self.changed)
layout.addWidget(self.verbose_toolbuttons)
self.tabs = QTabWidget()
layout.addWidget(self.tabs)
# New Documents Tab
self.new_tab = QWidget()
self.tabs.addTab(self.new_tab, "")
new_layout_wrap = QVBoxLayout()
self.new_tab.setLayout(new_layout_wrap)
new_layout = QGridLayout()
new_layout_wrap.addLayout(new_layout)
self.emptyDocument = QRadioButton(toggled=changed)
self.lilyVersion = QRadioButton(toggled=changed)
self.template = QRadioButton(toggled=changed)
self.new_combo = QComboBox(currentIndexChanged=changed)
new_layout.addWidget(self.emptyDocument, 0, 0, 1, 2)
new_layout.addWidget(self.lilyVersion, 1, 0, 1, 2)
new_layout.addWidget(self.template, 2, 0, 1, 1)
new_layout.addWidget(self.new_combo, 2, 1, 1, 1)
new_layout_wrap.addStretch()
# Saving Files Tab
self.save_tab = QWidget()
self.tabs.addTab(self.save_tab, "")
save_layout = QVBoxLayout()
self.save_tab.setLayout(save_layout)
self.stripwsp = QCheckBox(toggled=self.changed)
self.backup = QCheckBox(toggled=self.changed)
self.metainfo = QCheckBox(toggled=self.changed)
self.format = QCheckBox(toggled=self.changed)
save_layout.addWidget(self.stripwsp)
save_layout.addWidget(self.backup)
save_layout.addWidget(self.metainfo)
save_layout.addWidget(self.format)
basedir_layout = QHBoxLayout()
save_layout.addLayout(basedir_layout)
self.basedirLabel = l = QLabel()
self.basedir = UrlRequester()
basedir_layout.addWidget(self.basedirLabel)
basedir_layout.addWidget(self.basedir)
self.basedir.changed.connect(self.changed)
filename_layout = QHBoxLayout()
save_layout.addLayout(filename_layout)
self.customFilename = QCheckBox(toggled=customchanged)
self.filenameTemplate = QLineEdit(textEdited=self.changed)
filename_layout.addWidget(self.customFilename)
filename_layout.addWidget(self.filenameTemplate)
# Sessions Tab
self.session_tab = QWidget()
self.tabs.addTab(self.session_tab, "")
session_layout = QGridLayout()
session_layout_wrap = QVBoxLayout()
self.session_tab.setLayout(session_layout_wrap)
self.session_label = QLabel()
session_layout_wrap.addWidget(self.session_label)
session_layout_wrap.addLayout(session_layout)
self.session_none = QRadioButton(toggled=changed)
self.session_lastused = QRadioButton(toggled=changed)
self.session_custom = QRadioButton(toggled=changed)
self.session_combo = QComboBox(currentIndexChanged=changed)
session_layout.addWidget(self.session_none, 0, 0, 1, 2)
session_layout.addWidget(self.session_lastused, 1, 0, 1, 2)
session_layout.addWidget(self.session_custom, 2, 0, 1, 1)
session_layout.addWidget(self.session_combo, 2, 1, 1, 1)
session_layout_wrap.addStretch()
self.loadNewCombo()
self.page().parent().finished.connect(self.saveTabIndex)
app.translateUI(self)
def translateUI(self):
self.setTitle(_("Sessions and Files"))
self.verbose_toolbuttons.setText(_("Add pull-down menus in main toolbar"))
self.verbose_toolbuttons.setToolTip(
"<font>{}</font>".format(
_(
"If set the file related buttons in the main toolbar will "
"provide pull-down menus with additional functions."
)
)
)
# New Documents Tab
self.tabs.setTabText(0, _("New Document"))
self.emptyDocument.setText(_("Create an empty document"))
self.lilyVersion.setText(
_("Create a document that contains the LilyPond version statement")
)
self.template.setText(_("Create a document from a template:"))
from snippet import snippets
for i, name in enumerate(self._names):
self.new_combo.setItemText(i, snippets.title(name))
# Saving Files Tab
self.tabs.setTabText(1, _("Saving"))
self.stripwsp.setText(_("Strip trailing whitespace"))
self.stripwsp.setToolTip(
_(
"If checked, Frescobaldi will remove unnecessary whitespace at the "
"end of lines (but not inside multi-line strings)."
)
)
self.backup.setText(_("Keep backup copy"))
self.backup.setToolTip(
_(
"Frescobaldi always backups a file before overwriting it "
"with a new version.\n"
"If checked those backup copies are retained."
)
)
self.metainfo.setText(_("Remember cursor position, bookmarks, etc."))
self.format.setText(_("Format document"))
self.basedirLabel.setText(_("Default directory:"))
self.basedirLabel.setToolTip(
_("The default folder for your LilyPond documents (optional).")
)
self.customFilename.setText(_("Use custom default file name:"))
self.customFilename.setToolTip(
_(
"If checked, Frescobaldi will use the template to generate default file name.\n"
"{title} and {composer} will be replaced by title and composer of that document"
)
)
# Sessions Tab
self.tabs.setTabText(2, _("Sessions"))
self.session_label.setText(
_("Session to load if Frescobaldi is started without arguments")
)
self.session_none.setText(_("Start with no session"))
self.session_lastused.setText(_("Start with last used session"))
self.session_custom.setText(_("Start with session:"))
def loadNewCombo(self):
from snippet import snippets
self._names = [
name
for name in snippets.names()
if snippets.get(name).variables.get("template")
]
self.new_combo.clear()
self.new_combo.addItems([""] * len(self._names))
def loadSettings(self):
s = QSettings()
self.verbose_toolbuttons.setChecked(s.value("verbose_toolbuttons", False, bool))
# New Documents Tab
ndoc = s.value("new_document", "empty", str)
template = s.value("new_document_template", "", str)
if template in self._names:
self.new_combo.setCurrentIndex(self._names.index(template))
if ndoc == "template":
self.template.setChecked(True)
elif ndoc == "version":
self.lilyVersion.setChecked(True)
else:
self.emptyDocument.setChecked(True)
# Saving Files Tab
self.stripwsp.setChecked(s.value("strip_trailing_whitespace", False, bool))
self.backup.setChecked(s.value("backup_keep", False, bool))
self.metainfo.setChecked(s.value("metainfo", True, bool))
self.format.setChecked(s.value("format", False, bool))
self.basedir.setPath(s.value("basedir", "", str))
self.customFilename.setChecked(s.value("custom_default_filename", False, bool))
self.filenameTemplate.setText(
s.value("default_filename_template", "{composer}-{title}", str)
)
self.filenameTemplate.setEnabled(self.customFilename.isChecked())
# Sessions Tab
s.beginGroup("session")
startup = s.value("startup", "none", str)
if startup == "lastused":
self.session_lastused.setChecked(True)
elif startup == "custom":
self.session_custom.setChecked(True)
else:
self.session_none.setChecked(True)
sessionNames = sessions.sessionNames()
self.session_combo.clear()
self.session_combo.addItems(sessionNames)
custom = s.value("custom", "", str)
if custom in sessionNames:
self.session_combo.setCurrentIndex(sessionNames.index(custom))
s.endGroup()
self.tabs.setCurrentIndex(s.value("prefs_general_file_tab_index", 0, int))
def saveSettings(self):
s = QSettings()
s.setValue("verbose_toolbuttons", self.verbose_toolbuttons.isChecked())
# New Documents Tab
if self._names and self.template.isChecked():
s.setValue("new_document", "template")
s.setValue(
"new_document_template", self._names[self.new_combo.currentIndex()]
)
elif self.lilyVersion.isChecked():
s.setValue("new_document", "version")
else:
s.setValue("new_document", "empty")
# Saving Files Tab
s.setValue("strip_trailing_whitespace", self.stripwsp.isChecked())
s.setValue("backup_keep", self.backup.isChecked())
s.setValue("metainfo", self.metainfo.isChecked())
s.setValue("format", self.format.isChecked())
s.setValue("basedir", self.basedir.path())
s.setValue("custom_default_filename", self.customFilename.isChecked())
s.setValue("default_filename_template", self.filenameTemplate.text())
# Sessions Tab
s.beginGroup("session")
s.setValue("custom", self.session_combo.currentText())
if self.session_custom.isChecked():
startup = "custom"
elif self.session_lastused.isChecked():
startup = "lastused"
else:
startup = "none"
s.setValue("startup", startup)
def saveTabIndex(self):
s = app.settings("")
s.setValue("prefs_general_file_tab_index", self.tabs.currentIndex())
class ExperimentalFeatures(preferences.Group):
def __init__(self, page):
super().__init__(page)
layout = QVBoxLayout()
self.setLayout(layout)
self.experimentalFeatures = QCheckBox(toggled=self.changed)
layout.addWidget(self.experimentalFeatures)
app.translateUI(self)
def translateUI(self):
self.setTitle(_("Experimental Features"))
self.experimentalFeatures.setText(_("Enable Experimental Features"))
self.experimentalFeatures.setToolTip(
"<qt>"
+ _(
"If checked, features that are not yet finished are enabled.\n"
"You need to restart Frescobaldi to see the changes."
)
)
def loadSettings(self):
s = QSettings()
self.experimentalFeatures.setChecked(
s.value("experimental-features", False, bool)
)
def saveSettings(self):
s = QSettings()
s.setValue("experimental-features", self.experimentalFeatures.isChecked())
|
mylar | rsscheck | # This file is part of Mylar.
#
# Mylar is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mylar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mylar. If not, see <http://www.gnu.org/licenses/>.
import gzip
import os
import random
import re
import sys
import time
from datetime import datetime, timedelta
import cfscrape
import feedparser
import ftpsshup
import mylar
import requests
import torrent.clients.deluge as deluge
import torrent.clients.qbittorrent as qbittorrent
import torrent.clients.transmission as transmission
import urlparse
from bs4 import BeautifulSoup
from mylar import auth32p, db, ftpsshup, helpers, logger, utorrent
from StringIO import StringIO
def _start_newznab_attr(self, attrsD):
context = self._getContext()
context.setdefault("newznab", feedparser.FeedParserDict())
context["newznab"].setdefault("tags", feedparser.FeedParserDict())
name = attrsD.get("name")
value = attrsD.get("value")
if name == "category":
context["newznab"].setdefault("categories", []).append(value)
else:
context["newznab"][name] = value
feedparser._FeedParserMixin._start_newznab_attr = _start_newznab_attr
def torrents(pickfeed=None, seriesname=None, issue=None, feedinfo=None):
if pickfeed is None:
return
srchterm = None
if seriesname:
srchterm = re.sub(" ", "%20", seriesname)
if issue:
srchterm += "%20" + str(issue)
# this is for the public trackers included thus far in order to properly cycle throught the correct ones depending on the search request
# DEM = rss feed
# WWT = rss feed
# if pickfeed == 'TPSE-SEARCH':
# pickfeed = '2'
# loopit = 1
loopit = 1
if pickfeed == "Public":
pickfeed = "999"
# since DEM is dead, just remove the loop entirely
# #we need to cycle through both DEM + WWT feeds
# loopit = 2
lp = 0
totalcount = 0
title = []
link = []
description = []
seriestitle = []
feeddata = []
myDB = db.DBConnection()
torthetpse = []
torthe32p = []
torinfo = {}
while lp < loopit:
if lp == 0 and loopit == 2:
pickfeed = "6" # DEM RSS
elif lp == 1 and loopit == 2:
pickfeed = "999" # WWT RSS
feedtype = None
if (
pickfeed == "1" and mylar.CONFIG.ENABLE_32P is True
): # 32pages new releases feed.
feed = (
"https://32pag.es/feeds.php?feed=torrents_all&user="
+ feedinfo["user"]
+ "&auth="
+ feedinfo["auth"]
+ "&passkey="
+ feedinfo["passkey"]
+ "&authkey="
+ feedinfo["authkey"]
)
feedtype = " from the New Releases RSS Feed for comics"
verify = bool(mylar.CONFIG.VERIFY_32P)
elif pickfeed == "2" and srchterm is not None: # TP.SE search / RSS
lp += 1
continue
# feed = tpse_url + 'rss/' + str(srchterm) + '/'
# verify = bool(mylar.CONFIG.TPSE_VERIFY)
elif pickfeed == "3": # TP.SE rss feed (3101 = comics category) / non-RSS
lp += 1
continue
# feed = tpse_url + '?hl=en&safe=off&num=50&start=0&orderby=best&s=&filter=3101'
# feedtype = ' from the New Releases RSS Feed for comics from TP.SE'
# verify = bool(mylar.CONFIG.TPSE_VERIFY)
elif pickfeed == "4": # 32p search
if any(
[
mylar.CONFIG.USERNAME_32P is None,
mylar.CONFIG.USERNAME_32P == "",
mylar.CONFIG.PASSWORD_32P is None,
mylar.CONFIG.PASSWORD_32P == "",
]
):
logger.error(
"[RSS] Warning - you NEED to enter in your 32P Username and Password to use this option."
)
lp = +1
continue
if mylar.CONFIG.MODE_32P is False:
logger.warn(
"[32P] Searching is not available in 32p Legacy mode. Switch to Auth mode to use the search functionality."
)
lp = +1
continue
return
elif pickfeed == "5" and srchterm is not None: # demonoid search / non-RSS
feed = (
mylar.DEMURL
+ "files/?category=10&subcategory=All&language=0&seeded=2&external=2&query="
+ str(srchterm)
+ "&uid=0&out=rss"
)
verify = bool(mylar.CONFIG.PUBLIC_VERIFY)
elif pickfeed == "6": # demonoid rss feed
feed = mylar.DEMURL + "rss/10.xml"
feedtype = " from the New Releases RSS Feed from Demonoid"
verify = bool(mylar.CONFIG.PUBLIC_VERIFY)
elif pickfeed == "999": # WWT rss feed
feed = mylar.WWTURL + "rss.php?cat=132,50"
feedtype = " from the New Releases RSS Feed from WorldWideTorrents"
verify = bool(mylar.CONFIG.PUBLIC_VERIFY)
elif (
int(pickfeed) >= 7
and feedinfo is not None
and mylar.CONFIG.ENABLE_32P is True
):
# personal 32P notification feeds.
# get the info here
feed = (
"https://32pag.es/feeds.php?feed="
+ feedinfo["feed"]
+ "&user="
+ feedinfo["user"]
+ "&auth="
+ feedinfo["auth"]
+ "&passkey="
+ feedinfo["passkey"]
+ "&authkey="
+ feedinfo["authkey"]
+ "&name="
+ feedinfo["feedname"]
)
feedtype = " from your Personal Notification Feed : " + feedinfo["feedname"]
verify = bool(mylar.CONFIG.VERIFY_32P)
else:
logger.error("invalid pickfeed denoted...")
return
# if pickfeed == '2' or pickfeed == '3':
# picksite = 'TPSE'
# if pickfeed == '2':
# feedme = tpse.
if pickfeed == "5" or pickfeed == "6":
picksite = "DEM"
elif pickfeed == "999":
picksite = "WWT"
elif pickfeed == "1" or pickfeed == "4" or int(pickfeed) > 7:
picksite = "32P"
if all([pickfeed != "4", pickfeed != "3", pickfeed != "5"]):
payload = None
ddos_protection = round(random.uniform(0, 15), 2)
time.sleep(ddos_protection)
logger.info("Now retrieving feed from %s" % picksite)
try:
headers = {
"Accept-encoding": "gzip",
"User-Agent": mylar.CV_HEADERS["User-Agent"],
}
cf_cookievalue = None
scraper = cfscrape.create_scraper()
if pickfeed == "999":
if all([pickfeed == "999", mylar.WWT_CF_COOKIEVALUE is None]):
try:
cf_cookievalue, cf_user_agent = scraper.get_tokens(
feed, user_agent=mylar.CV_HEADERS["User-Agent"]
)
except Exception as e:
logger.warn(
"[WWT-RSSFEED] Unable to retrieve RSS properly: %s" % e
)
lp += 1
continue
else:
mylar.WWT_CF_COOKIEVALUE = cf_cookievalue
cookievalue = cf_cookievalue
elif pickfeed == "999":
cookievalue = mylar.WWT_CF_COOKIEVALUE
r = scraper.get(
feed, verify=verify, cookies=cookievalue, headers=headers
)
else:
r = scraper.get(feed, verify=verify, headers=headers)
except Exception, e:
logger.warn("Error fetching RSS Feed Data from %s: %s" % (picksite, e))
lp += 1
continue
feedme = feedparser.parse(r.content)
# logger.info(feedme) #<-- uncomment this to see what Mylar is retrieving from the feed
i = 0
if pickfeed == "4":
for entry in searchresults["entries"]:
justdigits = entry[
"file_size"
] # size not available in follow-list rss feed
seeddigits = entry[
"seeders"
] # number of seeders not available in follow-list rss feed
if int(seeddigits) >= int(mylar.CONFIG.MINSEEDS):
torthe32p.append(
{
"site": picksite,
"title": entry["torrent_seriesname"].lstrip()
+ " "
+ entry["torrent_seriesvol"]
+ " #"
+ entry["torrent_seriesiss"],
"volume": entry[
"torrent_seriesvol"
], # not stored by mylar yet.
"issue": entry[
"torrent_seriesiss"
], # not stored by mylar yet.
"link": entry["torrent_id"], # just the id for the torrent
"pubdate": entry["pubdate"],
"size": entry["file_size"],
"seeders": entry["seeders"],
"files": entry["num_files"],
}
)
i += 1
elif pickfeed == "3":
# TP.SE RSS FEED (parse)
pass
elif pickfeed == "5":
# DEMONOID SEARCH RESULT (parse)
pass
elif pickfeed == "999":
# try:
# feedme = feedparser.parse(feed)
# except Exception, e:
# logger.warn('Error fetching RSS Feed Data from %s: %s' % (picksite, e))
# lp+=1
# continue
# WWT / FEED
for entry in feedme.entries:
tmpsz = entry.description
tmpsz_st = tmpsz.find("Size:") + 6
if "GB" in tmpsz[tmpsz_st:]:
szform = "GB"
sz = "G"
elif "MB" in tmpsz[tmpsz_st:]:
szform = "MB"
sz = "M"
linkwwt = urlparse.parse_qs(urlparse.urlparse(entry.link).query)["id"]
feeddata.append(
{
"site": picksite,
"title": entry.title,
"link": "".join(linkwwt),
"pubdate": entry.updated,
"size": helpers.human2bytes(
str(tmpsz[tmpsz_st : tmpsz.find(szform, tmpsz_st) - 1])
+ str(sz)
), # + 2 is for the length of the MB/GB in the size.
}
)
i += 1
else:
for entry in feedme["entries"]:
# DEMONOID / FEED
if pickfeed == "6":
tmpsz = feedme.entries[i].description
tmpsz_st = tmpsz.find("Size")
if tmpsz_st != -1:
tmpsize = tmpsz[tmpsz_st : tmpsz_st + 14]
if any(
[
"GB" in tmpsize,
"MB" in tmpsize,
"KB" in tmpsize,
"TB" in tmpsize,
]
):
tmp1 = tmpsz.find("MB", tmpsz_st)
if tmp1 == -1:
tmp1 = tmpsz.find("GB", tmpsz_st)
if tmp1 == -1:
tmp1 = tmpsz.find("TB", tmpsz_st)
if tmp1 == -1:
tmp1 = tmpsz.find("KB", tmpsz_st)
tmpsz_end = tmp1 + 2
tmpsz_st += 7
else:
tmpsz = tmpsz[
:80
] # limit it to the first 80 so it doesn't pick up alt covers mistakingly
tmpsz_st = tmpsz.rfind("|")
if tmpsz_st != -1:
tmpsz_end = tmpsz.find("<br />", tmpsz_st)
tmpsize = tmpsz[tmpsz_st:tmpsz_end] # st+14]
if any(
[
"GB" in tmpsize,
"MB" in tmpsize,
"KB" in tmpsize,
"TB" in tmpsize,
]
):
tmp1 = tmpsz.find("MB", tmpsz_st)
if tmp1 == -1:
tmp1 = tmpsz.find("GB", tmpsz_st)
if tmp1 == -1:
tmp1 = tmpsz.find("TB", tmpsz_st)
if tmp1 == -1:
tmp1 = tmpsz.find("KB", tmpsz_st)
tmpsz_end = tmp1 + 2
tmpsz_st += 2
if "KB" in tmpsz[tmpsz_st:tmpsz_end]:
szform = "KB"
sz = "K"
elif "GB" in tmpsz[tmpsz_st:tmpsz_end]:
szform = "GB"
sz = "G"
elif "MB" in tmpsz[tmpsz_st:tmpsz_end]:
szform = "MB"
sz = "M"
elif "TB" in tmpsz[tmpsz_st:tmpsz_end]:
szform = "TB"
sz = "T"
tsize = helpers.human2bytes(
str(tmpsz[tmpsz_st : tmpsz.find(szform, tmpsz_st) - 1])
+ str(sz)
)
# timestamp is in YYYY-MM-DDTHH:MM:SS+TZ :/
dt = feedme.entries[i].updated
try:
pd = datetime.strptime(dt[0:19], "%Y-%m-%dT%H:%M:%S")
pdate = (
pd.strftime("%a, %d %b %Y %H:%M:%S")
+ " "
+ re.sub(":", "", dt[19:]).strip()
)
# if dt[19]=='+':
# pdate+=timedelta(hours=int(dt[20:22]), minutes=int(dt[23:]))
# elif dt[19]=='-':
# pdate-=timedelta(hours=int(dt[20:22]), minutes=int(dt[23:]))
except:
pdate = feedme.entries[i].updated
feeddata.append(
{
"site": picksite,
"title": feedme.entries[i].title,
"link": str(
re.sub(
"genid=",
"",
urlparse.urlparse(feedme.entries[i].link)[4],
).strip()
),
#'link': str(urlparse.urlparse(feedme.entries[i].link)[2].rpartition('/')[0].rsplit('/',2)[2]),
"pubdate": pdate,
"size": tsize,
}
)
# 32p / FEEDS
elif pickfeed == "1" or int(pickfeed) > 7:
tmpdesc = feedme.entries[i].description
st_pub = feedme.entries[i].title.find("(")
st_end = feedme.entries[i].title.find(")")
pub = feedme.entries[i].title[
st_pub + 1 : st_end
] # +1 to not include (
# logger.fdebug('publisher: ' + re.sub("'",'', pub).strip()) #publisher sometimes is given within quotes for some reason, strip 'em.
vol_find = feedme.entries[i].title.find("vol.")
series = feedme.entries[i].title[st_end + 1 : vol_find].strip()
series = re.sub("&", "&", series).strip()
# logger.fdebug('series title: ' + series)
iss_st = feedme.entries[i].title.find(" - ", vol_find)
vol = re.sub(
"\.", "", feedme.entries[i].title[vol_find:iss_st]
).strip()
# logger.fdebug('volume #: ' + str(vol))
issue = feedme.entries[i].title[iss_st + 3 :].strip()
# logger.fdebug('issue # : ' + str(issue))
try:
justdigits = feedme.entries[i].torrent_contentlength
except:
justdigits = "0"
seeddigits = 0
# if '0-Day Comics Pack' in series:
# logger.info('Comic Pack detected : ' + series)
# itd = True
if int(mylar.CONFIG.MINSEEDS) >= int(seeddigits):
# new releases has it as '&id', notification feeds have it as %ampid (possibly even &id
link = feedme.entries[i].link
link = re.sub("&", "&", link)
link = re.sub("&", "&", link)
linkst = link.find("&id")
linken = link.find("&", linkst + 1)
if linken == -1:
linken = len(link)
newlink = re.sub("&id=", "", link[linkst:linken]).strip()
feeddata.append(
{
"site": picksite,
"title": series.lstrip() + " " + vol + " #" + issue,
"volume": vol, # not stored by mylar yet.
"issue": issue, # not stored by mylar yet.
"link": newlink, # just the id for the torrent
"pubdate": feedme.entries[i].updated,
"size": justdigits,
}
)
i += 1
if feedtype is None:
logger.info("[" + picksite + "] there were " + str(i) + " results..")
else:
logger.info(
"[" + picksite + "] there were " + str(i) + " results" + feedtype
)
totalcount += i
lp += 1
if not seriesname:
# rss search results
rssdbupdate(feeddata, totalcount, "torrent")
else:
# backlog (parsing) search results
if pickfeed == "4":
torinfo["entries"] = torthe32p
else:
torinfo["entries"] = torthetpse
return torinfo
return
def ddl(forcerss=False):
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1"
}
ddl_feed = "https://getcomics.info/feed/"
try:
r = requests.get(ddl_feed, verify=True, headers=headers)
except Exception, e:
logger.warn("Error fetching RSS Feed Data from DDL: %s" % (e))
return False
else:
if r.status_code != 200:
# typically 403 will not return results, but just catch anything other than a 200
if r.status_code == 403:
logger.warn("ERROR - status code:%s" % r.status_code)
return False
else:
logger.warn("[%s] Status code returned: %s" % (r.status_code))
return False
feedme = feedparser.parse(r.content)
results = []
for entry in feedme.entries:
soup = BeautifulSoup(entry.summary, "html.parser")
orig_find = soup.find("p", {"style": "text-align: center;"})
i = 0
option_find = orig_find
while True: # i <= 10:
prev_option = option_find
option_find = option_find.findNext(text=True)
if "Year" in option_find:
year = option_find.findNext(text=True)
year = re.sub("\|", "", year).strip()
else:
if "Size" in prev_option:
size = option_find # .findNext(text=True)
if "- MB" in size:
size = "0 MB"
possible_more = orig_find.next_sibling
break
i += 1
link = entry.link
title = entry.title
updated = entry.updated
if updated.endswith("+0000"):
updated = updated[:-5].strip()
tmpid = entry.id
id = tmpid[tmpid.find("=") + 1 :]
if "KB" in size:
szform = "KB"
sz = "K"
elif "GB" in size:
szform = "GB"
sz = "G"
elif "MB" in size:
szform = "MB"
sz = "M"
elif "TB" in size:
szform = "TB"
sz = "T"
tsize = helpers.human2bytes(re.sub("[^0-9]", "", size).strip() + sz)
# link can be referenced with the ?p=id url
results.append(
{
"Title": title,
"Size": tsize,
"Link": id,
"Site": "DDL",
"Pubdate": updated,
}
)
if len(results) > 0:
logger.info(
"[RSS][DDL] %s entries have been indexed and are now going to be stored for caching."
% len(results)
)
rssdbupdate(results, len(results), "ddl")
return
def nzbs(provider=None, forcerss=False):
feedthis = []
def _parse_feed(site, url, verify, payload=None):
logger.fdebug("[RSS] Fetching items from " + site)
headers = {"User-Agent": str(mylar.USER_AGENT)}
try:
r = requests.get(url, params=payload, verify=verify, headers=headers)
except Exception, e:
logger.warn("Error fetching RSS Feed Data from %s: %s" % (site, e))
return
if r.status_code != 200:
# typically 403 will not return results, but just catch anything other than a 200
if r.status_code == 403:
return False
else:
logger.warn("[%s] Status code returned: %s" % (site, r.status_code))
if r.status_code == 503:
logger.warn(
"[%s] Site appears unresponsive/down. Disabling..." % (site)
)
return "disable"
else:
return
feedme = feedparser.parse(r.content)
feedthis.append({"site": site, "feed": feedme})
newznab_hosts = []
if mylar.CONFIG.NEWZNAB is True:
for newznab_host in mylar.CONFIG.EXTRA_NEWZNABS:
if str(newznab_host[5]) == "1":
newznab_hosts.append(newznab_host)
providercount = (
len(newznab_hosts)
+ int(mylar.CONFIG.EXPERIMENTAL is True)
+ int(mylar.CONFIG.NZBSU is True)
+ int(mylar.CONFIG.DOGNZB is True)
)
logger.fdebug(
"[RSS] You have enabled " + str(providercount) + " NZB RSS search providers."
)
if providercount > 0:
if mylar.CONFIG.EXPERIMENTAL == 1:
max_entries = "250" if forcerss else "50"
params = {"sort": "agedesc", "max": max_entries, "more": "1"}
check = _parse_feed(
"experimental",
"http://nzbindex.nl/rss/alt.binaries.comics.dcp",
False,
params,
)
if check == "disable":
helpers.disable_provider(site)
if mylar.CONFIG.NZBSU == 1:
num_items = "&num=100" if forcerss else "" # default is 25
params = {
"t": "7030",
"dl": "1",
"i": mylar.CONFIG.NZBSU_UID,
"r": mylar.CONFIG.NZBSU_APIKEY,
"num_items": num_items,
}
check = _parse_feed(
"nzb.su", "https://api.nzb.su/rss", mylar.CONFIG.NZBSU_VERIFY, params
)
if check == "disable":
helpers.disable_provider(site)
if mylar.CONFIG.DOGNZB == 1:
num_items = "&num=100" if forcerss else "" # default is 25
params = {
"t": "7030",
"r": mylar.CONFIG.DOGNZB_APIKEY,
"num_items": num_items,
}
check = _parse_feed(
"dognzb",
"https://dognzb.cr/rss.cfm",
mylar.CONFIG.DOGNZB_VERIFY,
params,
)
if check == "disable":
helpers.disable_provider(site)
for newznab_host in newznab_hosts:
site = newznab_host[0].rstrip()
(newznabuid, _, newznabcat) = (newznab_host[4] or "").partition("#")
newznabuid = newznabuid or "1"
newznabcat = newznabcat or "7030"
if site[-10:] == "[nzbhydra]":
# to allow nzbhydra to do category search by most recent (ie. rss)
url = newznab_host[1].rstrip() + "/api"
params = {
"t": "search",
"cat": str(newznabcat),
"dl": "1",
"apikey": newznab_host[3].rstrip(),
"num": "100",
}
check = _parse_feed(site, url, bool(newznab_host[2]), params)
else:
url = newznab_host[1].rstrip() + "/rss"
params = {
"t": str(newznabcat),
"dl": "1",
"i": str(newznabuid),
"r": newznab_host[3].rstrip(),
"num": "100",
}
check = _parse_feed(site, url, bool(newznab_host[2]), params)
if check is False and "rss" in url[-3:]:
logger.fdebug(
"RSS url returning 403 error. Attempting to use API to get most recent items in lieu of RSS feed"
)
url = newznab_host[1].rstrip() + "/api"
params = {
"t": "search",
"cat": str(newznabcat),
"dl": "1",
"apikey": newznab_host[3].rstrip(),
"num": "100",
}
check = _parse_feed(site, url, bool(newznab_host[2]), params)
if check == "disable":
helpers.disable_provider(site, newznab=True)
feeddata = []
for ft in feedthis:
site = ft["site"]
logger.fdebug("[RSS] (" + site + ") now being updated...")
for entry in ft["feed"].entries:
if site == "dognzb":
# because the rss of dog doesn't carry the enclosure item, we'll use the newznab size value
size = 0
if "newznab" in entry and "size" in entry["newznab"]:
size = entry["newznab"]["size"]
else:
# experimental, nzb.su, newznab
size = entry.enclosures[0]["length"]
# Link
if site == "experimental":
link = entry.enclosures[0]["url"]
else:
# dognzb, nzb.su, newznab
link = entry.link
# Remove the API keys from the url to allow for possible api key changes
if site == "dognzb":
link = re.sub(mylar.CONFIG.DOGNZB_APIKEY, "", link).strip()
else:
link = link[: link.find("&i=")].strip()
feeddata.append(
{
"Site": site,
"Title": entry.title,
"Link": link,
"Pubdate": entry.updated,
"Size": size,
}
)
logger.info(
"[RSS] ("
+ site
+ ") "
+ str(len(ft["feed"].entries))
+ " entries indexed."
)
i = len(feeddata)
if i:
logger.info(
"[RSS] "
+ str(i)
+ " entries have been indexed and are now going to be stored for caching."
)
rssdbupdate(feeddata, i, "usenet")
return
def rssdbupdate(feeddata, i, type):
rsschktime = 15
myDB = db.DBConnection()
# let's add the entries into the db so as to save on searches
# also to build up the ID's ;)
for dataval in feeddata:
if type == "torrent":
# we just store the torrent ID's now.
newVal = {
"Link": dataval["link"],
"Pubdate": dataval["pubdate"],
"Site": dataval["site"],
"Size": dataval["size"],
}
ctrlVal = {"Title": dataval["title"]}
else:
newlink = dataval["Link"]
newVal = {
"Link": newlink,
"Pubdate": dataval["Pubdate"],
"Site": dataval["Site"],
"Size": dataval["Size"],
}
ctrlVal = {"Title": dataval["Title"]}
myDB.upsert("rssdb", newVal, ctrlVal)
logger.fdebug(
"Completed adding new data to RSS DB. Next add in "
+ str(mylar.CONFIG.RSS_CHECKINTERVAL)
+ " minutes"
)
return
def ddl_dbsearch(seriesname, issue, comicid=None, nzbprov=None, oneoff=False):
myDB = db.DBConnection()
seriesname_alt = None
if any([comicid is None, comicid == "None", oneoff is True]):
pass
else:
snm = myDB.selectone(
"SELECT * FROM comics WHERE comicid=?", [comicid]
).fetchone()
if snm is None:
logger.fdebug("Invalid ComicID of %s. Aborting search" % comicid)
return "no results"
else:
seriesname = snm["ComicName"]
seriesname_alt = snm["AlternateSearch"]
dsearch_rem1 = re.sub("\\band\\b", "%", seriesname.lower())
dsearch_rem2 = re.sub("\\bthe\\b", "%", dsearch_rem1.lower())
dsearch_removed = re.sub("\s+", " ", dsearch_rem2)
dsearch_seriesname = re.sub(
"['\!\@\#\$\%\:\-\;\/\\=\?\&\.\s\,]", "%", dsearch_removed
)
dsearch = "%" + dsearch_seriesname + "%"
dresults = myDB.select(
"SELECT * FROM rssdb WHERE Title like ? AND Site='DDL'", [dsearch]
)
ddltheinfo = []
ddlinfo = {}
if not dresults:
return "no results"
else:
for dl in dresults:
ddltheinfo.append(
{
"title": dl["Title"],
"link": dl["Link"],
"pubdate": dl["Pubdate"],
"site": dl["Site"],
"length": dl["Size"],
}
)
ddlinfo["entries"] = ddltheinfo
return ddlinfo
def torrentdbsearch(seriesname, issue, comicid=None, nzbprov=None, oneoff=False):
myDB = db.DBConnection()
seriesname_alt = None
if any([comicid is None, comicid == "None", oneoff is True]):
pass
else:
# logger.fdebug('ComicID: ' + str(comicid))
snm = myDB.selectone(
"SELECT * FROM comics WHERE comicid=?", [comicid]
).fetchone()
if snm is None:
logger.fdebug("Invalid ComicID of " + str(comicid) + ". Aborting search.")
return
else:
seriesname = snm["ComicName"]
seriesname_alt = snm["AlternateSearch"]
# remove 'and' and 'the':
tsearch_rem1 = re.sub("\\band\\b", "%", seriesname.lower())
tsearch_rem2 = re.sub("\\bthe\\b", "%", tsearch_rem1.lower())
tsearch_removed = re.sub("\s+", " ", tsearch_rem2)
tsearch_seriesname = re.sub(
"['\!\@\#\$\%\:\-\;\/\\=\?\&\.\s\,]", "%", tsearch_removed
)
if mylar.CONFIG.PREFERRED_QUALITY == 0:
tsearch = tsearch_seriesname + "%"
elif mylar.CONFIG.PREFERRED_QUALITY == 1:
tsearch = tsearch_seriesname + "%cbr%"
elif mylar.CONFIG.PREFERRED_QUALITY == 2:
tsearch = tsearch_seriesname + "%cbz%"
else:
tsearch = tsearch_seriesname + "%"
if seriesname == "0-Day Comics Pack - %s" % (issue[:4]):
# call the helper to get the month
tsearch += "vol%s" % issue[5:7]
tsearch += "%"
tsearch += "#%s" % issue[8:10]
tsearch += "%"
# logger.fdebug('tsearch : ' + tsearch)
AS_Alt = []
tresults = []
tsearch = "%" + tsearch
if mylar.CONFIG.ENABLE_32P and nzbprov == "32P":
tresults = myDB.select(
"SELECT * FROM rssdb WHERE Title like ? AND Site='32P'", [tsearch]
)
if mylar.CONFIG.ENABLE_PUBLIC and nzbprov == "Public Torrents":
tresults += myDB.select(
"SELECT * FROM rssdb WHERE Title like ? AND (Site='DEM' OR Site='WWT')",
[tsearch],
)
# logger.fdebug('seriesname_alt:' + str(seriesname_alt))
if seriesname_alt is None or seriesname_alt == "None":
if not tresults:
logger.fdebug("no Alternate name given. Aborting search.")
return "no results"
else:
chkthealt = seriesname_alt.split("##")
if chkthealt == 0:
AS_Alternate = seriesname_alt
AS_Alt.append(seriesname_alt)
for calt in chkthealt:
AS_Alter = re.sub("##", "", calt)
u_altsearchcomic = AS_Alter.encode("ascii", "ignore").strip()
AS_Altrem = re.sub("\\band\\b", "", u_altsearchcomic.lower())
AS_Altrem = re.sub("\\bthe\\b", "", AS_Altrem.lower())
AS_Alternate = re.sub("[\_\#\,\/\:\;\.\-\!\$\%\+'\&\?\@\s]", "%", AS_Altrem)
AS_Altrem_mod = re.sub("[\&]", " ", AS_Altrem)
AS_formatrem_seriesname = re.sub(
"['\!\@\#\$\%\:\;\/\\=\?\.\,]", "", AS_Altrem_mod
)
AS_formatrem_seriesname = re.sub("\s+", " ", AS_formatrem_seriesname)
if AS_formatrem_seriesname[:1] == " ":
AS_formatrem_seriesname = AS_formatrem_seriesname[1:]
AS_Alt.append(AS_formatrem_seriesname)
if mylar.CONFIG.PREFERRED_QUALITY == 0:
AS_Alternate += "%"
elif mylar.CONFIG.PREFERRED_QUALITY == 1:
AS_Alternate += "%cbr%"
elif mylar.CONFIG.PREFERRED_QUALITY == 2:
AS_Alternate += "%cbz%"
else:
AS_Alternate += "%"
AS_Alternate = "%" + AS_Alternate
if mylar.CONFIG.ENABLE_32P and nzbprov == "32P":
tresults += myDB.select(
"SELECT * FROM rssdb WHERE Title like ? AND Site='32P'",
[AS_Alternate],
)
if mylar.CONFIG.ENABLE_PUBLIC and nzbprov == "Public Torrents":
tresults += myDB.select(
"SELECT * FROM rssdb WHERE Title like ? AND (Site='DEM' OR Site='WWT')",
[AS_Alternate],
)
if not tresults:
logger.fdebug("torrent search returned no results for %s" % seriesname)
return "no results"
extensions = ("cbr", "cbz")
tortheinfo = []
torinfo = {}
for tor in tresults:
# & have been brought into the title field incorretly occassionally - patched now, but to include those entries already in the
# cache db that have the incorrect entry, we'll adjust.
torTITLE = re.sub("&", "&", tor["Title"]).strip()
# torsplit = torTITLE.split(' ')
if mylar.CONFIG.PREFERRED_QUALITY == 1:
if "cbr" not in torTITLE:
# logger.fdebug('Quality restriction enforced [ cbr only ]. Rejecting result.')
continue
elif mylar.CONFIG.PREFERRED_QUALITY == 2:
if "cbz" not in torTITLE:
# logger.fdebug('Quality restriction enforced [ cbz only ]. Rejecting result.')
continue
# logger.fdebug('tor-Title: ' + torTITLE)
# logger.fdebug('there are ' + str(len(torsplit)) + ' sections in this title')
i = 0
if nzbprov is not None:
if nzbprov != tor["Site"] and not any(
[mylar.CONFIG.ENABLE_PUBLIC, tor["Site"] != "WWT", tor["Site"] != "DEM"]
):
# logger.fdebug('this is a result from ' + str(tor['Site']) + ', not the site I am looking for of ' + str(nzbprov))
continue
# 0 holds the title/issue and format-type.
seriesname_mod = seriesname
foundname_mod = torTITLE # torsplit[0]
seriesname_mod = re.sub("\\band\\b", " ", seriesname_mod.lower())
foundname_mod = re.sub("\\band\\b", " ", foundname_mod.lower())
seriesname_mod = re.sub("\\bthe\\b", " ", seriesname_mod.lower())
foundname_mod = re.sub("\\bthe\\b", " ", foundname_mod.lower())
seriesname_mod = re.sub("[\&]", " ", seriesname_mod)
foundname_mod = re.sub("[\&]", " ", foundname_mod)
formatrem_seriesname = re.sub("['\!\@\#\$\%\:\;\=\?\.\,]", "", seriesname_mod)
formatrem_seriesname = re.sub("[\-]", " ", formatrem_seriesname)
formatrem_seriesname = re.sub(
"[\/]", " ", formatrem_seriesname
) # not necessary since seriesname in a torrent file won't have /
formatrem_seriesname = re.sub("\s+", " ", formatrem_seriesname)
if formatrem_seriesname[:1] == " ":
formatrem_seriesname = formatrem_seriesname[1:]
formatrem_torsplit = re.sub("['\!\@\#\$\%\:\;\\=\?\.\,]", "", foundname_mod)
formatrem_torsplit = re.sub(
"[\-]", " ", formatrem_torsplit
) # we replace the - with space so we'll get hits if differnces
formatrem_torsplit = re.sub(
"[\/]", " ", formatrem_torsplit
) # not necessary since if has a /, should be removed in above line
formatrem_torsplit = re.sub("\s+", " ", formatrem_torsplit)
# logger.fdebug(str(len(formatrem_torsplit)) + ' - formatrem_torsplit : ' + formatrem_torsplit.lower())
# logger.fdebug(str(len(formatrem_seriesname)) + ' - formatrem_seriesname :' + formatrem_seriesname.lower())
if formatrem_seriesname.lower() in formatrem_torsplit.lower() or any(
x.lower() in formatrem_torsplit.lower() for x in AS_Alt
):
# logger.fdebug('matched to : ' + torTITLE)
# logger.fdebug('matched on series title: ' + seriesname)
titleend = formatrem_torsplit[len(formatrem_seriesname) :]
titleend = re.sub("\-", "", titleend) # remove the '-' which is unnecessary
# remove extensions
titleend = re.sub("cbr", "", titleend)
titleend = re.sub("cbz", "", titleend)
titleend = re.sub("none", "", titleend)
# logger.fdebug('titleend: ' + titleend)
sptitle = titleend.split()
extra = ""
tortheinfo.append(
{
"title": torTITLE, # cttitle,
"link": tor["Link"],
"pubdate": tor["Pubdate"],
"site": tor["Site"],
"length": tor["Size"],
}
)
torinfo["entries"] = tortheinfo
return torinfo
def nzbdbsearch(
seriesname,
issue,
comicid=None,
nzbprov=None,
searchYear=None,
ComicVersion=None,
oneoff=False,
):
myDB = db.DBConnection()
seriesname_alt = None
if any([comicid is None, comicid == "None", oneoff is True]):
pass
else:
snm = myDB.selectone(
"SELECT * FROM comics WHERE comicid=?", [comicid]
).fetchone()
if snm is None:
logger.info("Invalid ComicID of " + str(comicid) + ". Aborting search.")
return
else:
seriesname = snm["ComicName"]
seriesname_alt = snm["AlternateSearch"]
nsearch_seriesname = re.sub("['\!\@\#\$\%\:\;\/\\=\?\.\-\s]", "%", seriesname)
formatrem_seriesname = re.sub("['\!\@\#\$\%\:\;\/\\=\?\.]", "", seriesname)
nsearch = "%" + nsearch_seriesname + "%"
nresults = myDB.select(
"SELECT * FROM rssdb WHERE Title like ? AND Site=?", [nsearch, nzbprov]
)
if nresults is None:
logger.fdebug("nzb search returned no results for " + seriesname)
if seriesname_alt is None:
logger.fdebug("no nzb Alternate name given. Aborting search.")
return "no results"
else:
chkthealt = seriesname_alt.split("##")
if chkthealt == 0:
AS_Alternate = AlternateSearch
for calt in chkthealt:
AS_Alternate = re.sub("##", "", calt)
AS_Alternate = "%" + AS_Alternate + "%"
nresults += myDB.select(
"SELECT * FROM rssdb WHERE Title like ? AND Site=?",
[AS_Alternate, nzbprov],
)
if nresults is None:
logger.fdebug("nzb alternate name search returned no results.")
return "no results"
nzbtheinfo = []
nzbinfo = {}
if nzbprov == "experimental":
except_list = ["releases", "gold line", "distribution", "0-day", "0 day"]
if ComicVersion:
ComVersChk = re.sub("[^0-9]", "", ComicVersion)
if ComVersChk == "":
ComVersChk = 0
else:
ComVersChk = 0
else:
ComVersChk = 0
filetype = None
if mylar.CONFIG.PREFERRED_QUALITY == 1:
filetype = "cbr"
elif mylar.CONFIG.PREFERRED_QUALITY == 2:
filetype = "cbz"
for results in nresults:
title = results["Title"]
# logger.fdebug("titlesplit: " + str(title.split("\"")))
splitTitle = title.split('"')
noYear = "False"
_digits = re.compile("\d")
for subs in splitTitle:
# logger.fdebug(subs)
if (
len(subs) >= len(seriesname)
and not any(d in subs.lower() for d in except_list)
and bool(_digits.search(subs)) is True
):
if subs.lower().startswith("for"):
# need to filter down alternate names in here at some point...
if seriesname.lower().startswith("for"):
pass
else:
# this is the crap we ignore. Continue
logger.fdebug(
"this starts with FOR : "
+ str(subs)
+ ". This is not present in the series - ignoring."
)
continue
if ComVersChk == 0:
noYear = "False"
if ComVersChk != 0 and searchYear not in subs:
noYear = "True"
noYearline = subs
if searchYear in subs and noYear == "True":
# this would occur on the next check in the line, if year exists and
# the noYear check in the first check came back valid append it
subs = noYearline + " (" + searchYear + ")"
noYear = "False"
if noYear == "False":
if filetype is not None:
if filetype not in subs.lower():
continue
nzbtheinfo.append(
{
"title": subs,
"link": re.sub(
"\/release\/", "/download/", results["Link"]
),
"pubdate": str(results["PubDate"]),
"site": str(results["Site"]),
"length": str(results["Size"]),
}
)
else:
for nzb in nresults:
# no need to parse here, just compile and throw it back ....
nzbtheinfo.append(
{
"title": nzb["Title"],
"link": nzb["Link"],
"pubdate": nzb["Pubdate"],
"site": nzb["Site"],
"length": nzb["Size"],
}
)
# logger.fdebug("entered info for " + nzb['Title'])
nzbinfo["entries"] = nzbtheinfo
return nzbinfo
def torsend2client(seriesname, issue, seriesyear, linkit, site, pubhash=None):
logger.info("matched on " + seriesname)
filename = helpers.filesafe(seriesname)
filename = re.sub(" ", "_", filename)
filename += "_" + str(issue) + "_" + str(seriesyear)
if linkit[-7:] != "torrent":
filename += ".torrent"
if any(
[
mylar.USE_UTORRENT,
mylar.USE_RTORRENT,
mylar.USE_TRANSMISSION,
mylar.USE_DELUGE,
mylar.USE_QBITTORRENT,
]
):
filepath = os.path.join(mylar.CONFIG.CACHE_DIR, filename)
logger.fdebug("filename for torrent set to : " + filepath)
elif mylar.USE_WATCHDIR:
if mylar.CONFIG.TORRENT_LOCAL and mylar.CONFIG.LOCAL_WATCHDIR is not None:
filepath = os.path.join(mylar.CONFIG.LOCAL_WATCHDIR, filename)
logger.fdebug("filename for torrent set to : " + filepath)
elif mylar.CONFIG.TORRENT_SEEDBOX and mylar.CONFIG.SEEDBOX_WATCHDIR is not None:
filepath = os.path.join(mylar.CONFIG.CACHE_DIR, filename)
logger.fdebug("filename for torrent set to : " + filepath)
else:
logger.error(
"No Local Watch Directory or Seedbox Watch Directory specified. Set it and try again."
)
return "fail"
cf_cookievalue = None
if site == "32P":
url = "https://32pag.es/torrents.php"
if mylar.CONFIG.ENABLE_32P is False:
return "fail"
if mylar.CONFIG.VERIFY_32P == 1 or mylar.CONFIG.VERIFY_32P == True:
verify = True
else:
verify = False
logger.fdebug("[32P] Verify SSL set to : " + str(verify))
if mylar.CONFIG.MODE_32P is False:
if mylar.KEYS_32P is None or mylar.CONFIG.PASSKEY_32P is None:
logger.warn(
"[32P] Unable to retrieve keys from provided RSS Feed. Make sure you have provided a CURRENT RSS Feed from 32P"
)
mylar.KEYS_32P = helpers.parse_32pfeed(mylar.FEED_32P)
if mylar.KEYS_32P is None or mylar.KEYS_32P == "":
return "fail"
else:
logger.fdebug(
"[32P-AUTHENTICATION] 32P (Legacy) Authentication Successful. Re-establishing keys."
)
mylar.AUTHKEY_32P = mylar.KEYS_32P["authkey"]
else:
logger.fdebug(
"[32P-AUTHENTICATION] 32P (Legacy) Authentication already done. Attempting to use existing keys."
)
mylar.AUTHKEY_32P = mylar.KEYS_32P["authkey"]
else:
if any(
[
mylar.CONFIG.USERNAME_32P is None,
mylar.CONFIG.USERNAME_32P == "",
mylar.CONFIG.PASSWORD_32P is None,
mylar.CONFIG.PASSWORD_32P == "",
]
):
logger.error(
"[RSS] Unable to sign-on to 32P to validate settings and initiate download sequence. Please enter/check your username password in the configuration."
)
return "fail"
elif (
mylar.CONFIG.PASSKEY_32P is None
or mylar.AUTHKEY_32P is None
or mylar.KEYS_32P is None
):
logger.fdebug(
"[32P-AUTHENTICATION] 32P (Auth Mode) Authentication enabled. Keys have not been established yet, attempting to gather."
)
feed32p = auth32p.info32p(reauthenticate=True)
feedinfo = feed32p.authenticate()
if feedinfo == "disable":
helpers.disable_provider("32P")
return "fail"
if (
mylar.CONFIG.PASSKEY_32P is None
or mylar.AUTHKEY_32P is None
or mylar.KEYS_32P is None
):
logger.error(
"[RSS] Unable to sign-on to 32P to validate settings and initiate download sequence. Please enter/check your username password in the configuration."
)
return "fail"
else:
logger.fdebug(
"[32P-AUTHENTICATION] 32P (Auth Mode) Authentication already done. Attempting to use existing keys."
)
payload = {
"action": "download",
"torrent_pass": mylar.CONFIG.PASSKEY_32P,
"authkey": mylar.AUTHKEY_32P,
"id": linkit,
}
dfile = auth32p.info32p()
file_download = dfile.downloadfile(payload, filepath)
if file_download is False:
return "fail"
logger.fdebug("[%s] Saved torrent file to : %s" % (site, filepath))
elif site == "DEM":
url = helpers.torrent_create("DEM", linkit)
if url.startswith("https"):
dem_referrer = mylar.DEMURL + "files/download/"
else:
dem_referrer = "http" + mylar.DEMURL[5:] + "files/download/"
headers = {
"Accept-encoding": "gzip",
"User-Agent": str(mylar.USER_AGENT),
"Referer": dem_referrer,
}
logger.fdebug("Grabbing torrent from url:" + str(url))
payload = None
verify = False
elif site == "WWT":
url = helpers.torrent_create("WWT", linkit)
if url.startswith("https"):
wwt_referrer = mylar.WWTURL
else:
wwt_referrer = "http" + mylar.WWTURL[5:]
headers = {
"Accept-encoding": "gzip",
"User-Agent": mylar.CV_HEADERS["User-Agent"],
"Referer": wwt_referrer,
}
logger.fdebug("Grabbing torrent [id:" + str(linkit) + "] from url:" + str(url))
payload = {"id": linkit}
verify = False
else:
headers = {"Accept-encoding": "gzip", "User-Agent": str(mylar.USER_AGENT)}
url = linkit
payload = None
verify = False
if site != "Public Torrents" and site != "32P":
if not verify:
# 32P throws back an insecure warning because it can't validate against the CA. The below suppresses the message just for 32P instead of being displayed.
# disable SSL warnings - too many 'warning' messages about invalid certificates
try:
from requests.packages.urllib3 import disable_warnings
disable_warnings()
except ImportError:
# this is probably not necessary and redudant, but leaving in for the time being.
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings()
try:
from urllib3.exceptions import InsecureRequestWarning
urllib3.disable_warnings()
except ImportError:
logger.warn("[EPIC FAILURE] Cannot load the requests module")
return "fail"
try:
scraper = cfscrape.create_scraper()
if site == "WWT":
if mylar.WWT_CF_COOKIEVALUE is None:
cf_cookievalue, cf_user_agent = scraper.get_tokens(
url, user_agent=mylar.CV_HEADERS["User-Agent"]
)
mylar.WWT_CF_COOKIEVALUE = cf_cookievalue
r = scraper.get(
url,
params=payload,
cookies=mylar.WWT_CF_COOKIEVALUE,
verify=verify,
stream=True,
headers=headers,
)
else:
r = scraper.get(
url, params=payload, verify=verify, stream=True, headers=headers
)
# r = requests.get(url, params=payload, verify=verify, stream=True, headers=headers)
except Exception, e:
logger.warn("Error fetching data from %s (%s): %s" % (site, url, e))
# if site == '32P':
# logger.info('[TOR2CLIENT-32P] Retrying with 32P')
# if mylar.CONFIG.MODE_32P == 1:
# logger.info('[TOR2CLIENT-32P] Attempting to re-authenticate against 32P and poll new keys as required.')
# feed32p = auth32p.info32p(reauthenticate=True)
# feedinfo = feed32p.authenticate()
# if feedinfo == "disable":
# helpers.disable_provider('32P')
# return "fail"
# logger.debug('[TOR2CLIENT-32P] Creating CF Scraper')
# scraper = cfscrape.create_scraper()
# try:
# r = scraper.get(url, params=payload, verify=verify, allow_redirects=True)
# except Exception, e:
# logger.warn('[TOR2CLIENT-32P] Unable to GET %s (%s): %s' % (site, url, e))
# return "fail"
# else:
# logger.warn('[TOR2CLIENT-32P] Unable to authenticate using existing RSS Feed given. Make sure that you have provided a CURRENT feed from 32P')
# return "fail"
# else:
# return "fail"
if any([site == "DEM", site == "WWT"]) and any(
[
str(r.status_code) == "403",
str(r.status_code) == "404",
str(r.status_code) == "503",
]
):
if str(r.status_code) != "503":
logger.warn(
"Unable to download from " + site + " [" + str(r.status_code) + "]"
)
# retry with the alternate torrent link.
url = helpers.torrent_create(site, linkit, True)
logger.fdebug("Trying alternate url: " + str(url))
try:
r = requests.get(
url, params=payload, verify=verify, stream=True, headers=headers
)
except Exception, e:
return "fail"
else:
logger.warn(
"Cloudflare protection online for "
+ site
+ ". Attempting to bypass..."
)
try:
scraper = cfscrape.create_scraper()
cf_cookievalue, cf_user_agent = cfscrape.get_cookie_string(url)
headers = {"Accept-encoding": "gzip", "User-Agent": cf_user_agent}
r = scraper.get(
url,
verify=verify,
cookies=cf_cookievalue,
stream=True,
headers=headers,
)
except Exception, e:
return "fail"
if any([site == "DEM", site == "WWT"]):
if r.headers.get("Content-Encoding") == "gzip":
buf = StringIO(r.content)
f = gzip.GzipFile(fileobj=buf)
with open(filepath, "wb") as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
logger.fdebug("[" + site + "] Saved torrent file to : " + filepath)
else:
if site != "32P":
# tpse is magnet links only...
filepath = linkit
if mylar.USE_UTORRENT:
uTC = utorrent.utorrentclient()
# if site == 'TPSE':
# ti = uTC.addurl(linkit)
# else:
ti = uTC.addfile(filepath, filename)
if ti == "fail":
return ti
else:
# if ti is value, it will return the hash
torrent_info = {}
torrent_info["hash"] = ti
torrent_info["clientmode"] = "utorrent"
torrent_info["link"] = linkit
return torrent_info
elif mylar.USE_RTORRENT:
import test
rp = test.RTorrent()
torrent_info = rp.main(filepath=filepath)
if torrent_info:
torrent_info["clientmode"] = "rtorrent"
torrent_info["link"] = linkit
return torrent_info
else:
return "fail"
elif mylar.USE_TRANSMISSION:
try:
rpc = transmission.TorrentClient()
if not rpc.connect(
mylar.CONFIG.TRANSMISSION_HOST,
mylar.CONFIG.TRANSMISSION_USERNAME,
mylar.CONFIG.TRANSMISSION_PASSWORD,
):
return "fail"
torrent_info = rpc.load_torrent(filepath)
if torrent_info:
torrent_info["clientmode"] = "transmission"
torrent_info["link"] = linkit
return torrent_info
else:
return "fail"
except Exception as e:
logger.error(e)
return "fail"
elif mylar.USE_DELUGE:
try:
dc = deluge.TorrentClient()
if not dc.connect(
mylar.CONFIG.DELUGE_HOST,
mylar.CONFIG.DELUGE_USERNAME,
mylar.CONFIG.DELUGE_PASSWORD,
):
logger.info("Not connected to Deluge!")
return "fail"
else:
logger.info("Connected to Deluge! Will try to add torrent now!")
torrent_info = dc.load_torrent(filepath)
if torrent_info:
torrent_info["clientmode"] = "deluge"
torrent_info["link"] = linkit
return torrent_info
else:
return "fail"
logger.info("Unable to connect to Deluge!")
except Exception as e:
logger.error(e)
return "fail"
elif mylar.USE_QBITTORRENT:
try:
qc = qbittorrent.TorrentClient()
if not qc.connect(
mylar.CONFIG.QBITTORRENT_HOST,
mylar.CONFIG.QBITTORRENT_USERNAME,
mylar.CONFIG.QBITTORRENT_PASSWORD,
):
logger.info(
"Not connected to qBittorrent - Make sure the Web UI is enabled and the port is correct!"
)
return "fail"
else:
logger.info("Connected to qBittorrent! Will try to add torrent now!")
torrent_info = qc.load_torrent(filepath)
if torrent_info["status"] is True:
torrent_info["clientmode"] = "qbittorrent"
torrent_info["link"] = linkit
return torrent_info
else:
logger.info("Unable to add torrent to qBittorrent")
return "fail"
except Exception as e:
logger.error(e)
return "fail"
elif mylar.USE_WATCHDIR:
if mylar.CONFIG.TORRENT_LOCAL:
# if site == 'TPSE':
# torrent_info = {'hash': pubhash}
# else:
# #get the hash so it doesn't mess up...
torrent_info = helpers.get_the_hash(filepath)
torrent_info["clientmode"] = "watchdir"
torrent_info["link"] = linkit
torrent_info["filepath"] = filepath
return torrent_info
else:
tssh = ftpsshup.putfile(filepath, filename)
return tssh
def delete_cache_entry(id):
myDB = db.DBConnection()
myDB.action("DELETE FROM rssdb WHERE link=? AND Site='32P'", [id])
if __name__ == "__main__":
# torrents(sys.argv[1])
# torrentdbsearch(sys.argv[1], sys.argv[2], sys.argv[3])
nzbs(provider=sys.argv[1])
|
Import | stepZ | # -*- coding: utf-8 -*-
# ****************************************************************************
# * Copyright (c) 2018 Maurice <easyw@katamail.com> *
# * *
# * StepZ Import Export compressed STEP files for FreeCAD *
# * License: LGPLv2+ *
# * *
# ****************************************************************************
# workaround for unicode in gzipping filename
# OCC7 doesn't support non-ASCII characters at the moment
# https://forum.freecad.org/viewtopic.php?t=20815
import os
import re
import shutil
import tempfile
import FreeCAD
import FreeCADGui
import ImportGui
import PySide
from PySide import QtCore, QtGui
___stpZversion___ = "1.4.0"
# support both gz and zipfile archives
# Catia seems to use gz, Inventor zipfile
# improved import, open and export
import builtins
import gzip as gz
import importlib
import zipfile as zf
# import stepZ; import importlib; importlib.reload(stepZ); stepZ.open(u"C:/Temp/brick.stpz")
def mkz_string(input):
if isinstance(input, str):
return input
else:
input = input.encode("utf-8")
return input
####
def mkz_unicode(input):
if isinstance(input, str):
return input
else:
input = input.decode("utf-8")
return input
####
def sayz(msg):
FreeCAD.Console.PrintMessage(msg)
FreeCAD.Console.PrintMessage("\n")
####
def sayzw(msg):
FreeCAD.Console.PrintWarning(msg)
FreeCAD.Console.PrintWarning("\n")
####
def sayzerr(msg):
FreeCAD.Console.PrintError(msg)
FreeCAD.Console.PrintWarning("\n")
####
def import_stpz(fn, fc, doc):
# sayz(fn)
ext = os.path.splitext(os.path.basename(fn))[1]
fname = os.path.splitext(os.path.basename(fn))[0]
basepath = os.path.split(fn)[0]
filepath = os.path.join(basepath, fname + ".stp")
tempdir = tempfile.gettempdir() # get the current temporary directory
tempfilepath = os.path.join(tempdir, fname + ".stp")
with builtins.open(tempfilepath, "wb") as f: # py3
f.write(fc)
# ImportGui.insert(filepath)
if doc is None:
ImportGui.open(tempfilepath)
else:
ImportGui.open(tempfilepath, doc.Name)
FreeCADGui.SendMsgToActiveView("ViewFit")
try:
os.remove(tempfilepath)
except OSError:
sayzerr("error on removing " + tempfilepath + " file")
###
def open(filename, doc=None):
if zf.is_zipfile(filename):
with zf.ZipFile(filename, "r") as fz:
file_names = fz.namelist()
for fn in file_names:
sayz(fn)
with fz.open(fn) as zfile:
file_content = zfile.read()
import_stpz(filename, file_content, doc)
else:
with gz.open(filename, "rb") as f:
fnm = os.path.splitext(os.path.basename(filename))[0]
sayz(fnm)
file_content = f.read()
import_stpz(filename, file_content, doc)
####
def insert(filename, doc):
doc = FreeCAD.ActiveDocument
open(filename, doc)
####
def export(objs, filename):
"""exporting to file folder"""
# sayz(filename)
sayz("stpZ version " + ___stpZversion___)
ext = os.path.splitext(os.path.basename(filename))[1]
fname = os.path.splitext(os.path.basename(filename))[0]
basepath = os.path.split(filename)[0]
tempdir = tempfile.gettempdir() # get the current temporary directory
filepath = os.path.join(basepath, fname) + ".stp"
filepath_base = os.path.join(basepath, fname)
namefpath = os.path.join(basepath, fname)
outfpath = os.path.join(basepath, fname) + ".stpZ"
outfpathT = os.path.join(tempdir, fname) + ".stpZ"
outfpath_stp = os.path.join(basepath, fname) + ".stp"
outfpathT_stp = os.path.join(tempdir, fname) + ".stp"
outfpath_base = basepath
# outfpath_str = mkz_string(os.path.join(basepath,fname))
outfpath_str = os.path.join(basepath, fname) + ".stp"
outfpathT_str = os.path.join(tempdir, fname) + ".stp"
if os.path.exists(outfpathT_stp):
os.remove(outfpathT_stp)
sayzw("Old temp file with the same name removed '" + outfpathT_stp + "'")
ImportGui.export(objs, outfpathT_stp)
with builtins.open(outfpathT_stp, "rb") as f_in:
file_content = f_in.read()
new_f_content = file_content
f_in.close()
with gz.open(outfpathT_str, "wb") as f_out:
f_out.write(new_f_content)
f_out.close()
if os.path.exists(outfpath):
shutil.move(outfpathT_str, outfpath)
# os.remove(outfpathT_stp)
else:
shutil.move(outfpathT_str, outfpath)
# os.remove(outfpathT_stp)
####
|
FeatureHole | ViewProviderHole | # /******************************************************************************
# * Copyright (c) 2012 Jan Rheinländer <jrheinlaender@users.sourceforge.net> *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This library is free software; you can redistribute it and/or *
# * modify it under the terms of the GNU Library General Public *
# * License as published by the Free Software Foundation; either *
# * version 2 of the License, or (at your option) any later version. *
# * *
# * This library is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this library; see the file COPYING.LIB. If not, *
# * write to the Free Software Foundation, Inc., 59 Temple Place, *
# * Suite 330, Boston, MA 02111-1307, USA *
# * *
# ******************************************************************************/
import FreeCAD
import FreeCADGui
from TaskHole import TaskHole
class ViewProviderHole:
def __init__(self, obj):
"""Set this object to the proxy object of the actual view provider"""
obj.Proxy = self
self.Object = obj.Object
def attach(self, obj):
"""Setup the scene sub-graph of the view provider, this method is mandatory"""
return
def claimChildren(self):
if self is None:
return
# The following statement leads to the error:
# <unknown exception traceback><type 'exceptions.TypeError'>: PyCXX: Error creating object of type N2Py7SeqBaseINS_6ObjectEEE from None
if not hasattr(self, "Object"):
return
if self.Object is not None:
return [
self.Object.HoleGroove, # the groove feature
self.Object.HoleGroove.Sketch.Support[
0
], # the groove sketchplane (datum plane) feature
self.Object.HoleGroove.Sketch.Support[0].References[0][0],
] # the sketchplane first reference (datum line)
def updateData(self, fp, prop):
"""If a property of the handled feature has changed we have the chance to handle this here"""
return
def getDisplayModes(self, obj):
"""Return a list of display modes."""
modes = []
return modes
def getDefaultDisplayMode(self):
"""Return the name of the default display mode. It must be defined in getDisplayModes."""
return "Shaded"
def onChanged(self, vp, prop):
"""Print the name of the property that has changed"""
# FreeCAD.Console.PrintMessage("Change property: " + str(prop) + "\n")
pass
def setEdit(self, vp, mode):
panel = TaskHole(self.Object)
FreeCADGui.Control.showDialog(panel)
if not panel.setupUi():
FreeCADGui.Control.closeDialog(panel)
return False
return True
def unsetEdit(self, vp, mode):
return
def getIcon(self):
"""Return the icon in XMP format which will appear in the tree view. This method is optional
and if not defined a default icon is shown.
"""
return ""
def __getstate__(self):
"""When saving the document this object gets stored using Python's cPickle module.
Since we have some un-pickable here -- the Coin stuff -- we must define this method
to return a tuple of all pickable objects or None.
"""
return None
def __setstate__(self, state):
"""When restoring the pickled object from document we have the chance to set some
internals here. Since no data were pickled nothing needs to be done here.
"""
return None
|
src | utils | # -*- coding: utf-8 -*-
# MusicPlayer, https://github.com/albertz/music-player
# Copyright (c) 2012, Albert Zeyer, www.az2000.de
# All rights reserved.
# This code is under the 2-clause BSD license, see License.txt in the root directory of this project.
import sys
import time
import types
from collections import deque
import better_exchook
PY3 = sys.version_info[0] >= 3
if PY3:
NumberTypes = (int, float)
unicode = str
py2_str = bytes
def to_bytes(s):
if isinstance(s, str):
return s.encode("utf8")
assert isinstance(s, bytes)
return s
from importlib import reload as reload_module
else: # Python <= 2
NumberTypes = (types.IntType, types.LongType, types.FloatType)
py2_unicode = unicode
unicode = py2_unicode
py2_str = str
def to_bytes(s):
return buffer(s)
reload_module = reload
# some global variable which indicates that we are quitting just right now
quit = False
class Id:
"When you need some unique object with maybe some name, use this"
name = None
def __init__(self, name=None):
self.name = name
def __repr__(self):
if self.name:
return "<Id %s>" % self.name
return "<Id %i>" % id(self)
class Uninitialized:
pass
class initBy(object):
def __init__(self, initFunc, name=None):
self.initFunc = initFunc
self.name = name or (initFunc.__name__ if PY3 else initFunc.func_name)
self.attrName = "_" + self.name
def load(self, inst):
if not hasattr(inst, self.attrName):
setattr(inst, self.attrName, self.initFunc(inst))
def __get__(self, inst, type=None):
if inst is None: # access through class
return self
self.load(inst)
if hasattr(getattr(inst, self.attrName), "__get__"):
return getattr(inst, self.attrName).__get__(inst, type)
return getattr(inst, self.attrName)
def __set__(self, inst, value):
self.load(inst)
if hasattr(getattr(inst, self.attrName), "__set__"):
return getattr(inst, self.attrName).__set__(inst, value)
setattr(inst, self.attrName, value)
class oneOf(object):
def __init__(self, *consts):
assert len(consts) > 0
self.consts = consts
self.value = consts[0]
def __get__(self, inst, type=None):
if inst is None: # access through class
return self
return self
def __set__(self, inst, value):
assert value in self.consts
self.value = value
class safe_property(object):
def __init__(self, prop):
self.prop = prop
def __get__(self, instance, owner):
if instance is None:
return self
try:
return self.prop.__get__(instance, owner)
except AttributeError:
# We should never reraise this particular exception. Thus catch it here.
sys.excepthook(*sys.exc_info())
return None # The best we can do.
def __set__(self, inst, value):
try:
self.prop.__set__(inst, value)
except AttributeError:
# We should never reraise this particular exception. Thus catch it here.
sys.excepthook(*sys.exc_info())
def __getattr__(self, attr):
# forward prop.setter, prop.deleter, etc.
return getattr(self.prop, attr)
def formatDate(t):
if isinstance(t, NumberTypes):
t = time.gmtime(t)
return time.strftime("%Y-%m-%d %H:%M:%S +0000", t)
def formatTime(t):
if t is None:
return "?"
t = round(t)
mins = int(t // 60)
t -= mins * 60
hours = mins // 60
mins -= hours * 60
if hours:
return "%02i:%02i:%02.0f" % (hours, mins, t)
return "%02i:%02.0f" % (mins, t)
def formatFilesize(s):
L = 800
Symbols = ["byte", "KB", "MB", "GB", "TB"]
i = 0
while True:
if s < L:
break
if i == len(Symbols) - 1:
break
s /= 1024.0
i += 1
return "%.3g %s" % (s, Symbols[i])
def betterRepr(o):
# the main difference: this one is deterministic
# the orig dict.__repr__ has the order undefined.
if isinstance(o, list):
return "[\n" + "".join(map(lambda v: betterRepr(v) + ",\n", o)) + "]"
if isinstance(o, deque):
return "deque([\n" + "".join(map(lambda v: betterRepr(v) + ",\n", o)) + "])"
if isinstance(o, tuple):
return "(" + ", ".join(map(betterRepr, o)) + ")"
if isinstance(o, dict):
return (
"{\n"
+ "".join(
[
betterRepr(k) + ": " + betterRepr(v) + ",\n"
for (k, v) in sorted(o.items())
]
)
+ "}"
)
# fallback
return repr(o)
def takeN(iterator, n):
i = 0
l = [None] * n
while i < n:
try:
l[i] = next(iterator)
except StopIteration:
l = l[0:i]
break
i += 1
return l
def attrChain(base, *attribs, **kwargs):
default = kwargs.get("default", None)
obj = base
for attr in attribs:
if obj is None:
return default
obj = getattr(obj, attr, None)
if obj is None:
return default
return obj
def ObjectProxy(lazyLoader, customAttribs={}, baseType=object, typeName="ObjectProxy"):
class Value:
pass
obj = Value()
attribs = customAttribs.copy()
def load():
if not hasattr(obj, "value"):
obj.value = lazyLoader()
def obj_getattribute(self, key):
try:
return object.__getattribute__(self, key)
except AttributeError:
load()
return getattr(obj.value, key)
def obj_setattr(self, key, value):
load()
return setattr(obj.value, key, value)
def obj_desc_get(self, inst, type=None):
if inst is None:
load()
return obj.value
return self
def obj_desc_set(self, inst, value):
if hasattr(value, "__get__"):
# In case the value is itself some sort of ObjectProxy, try to get its
# underlying object and use our proxy instead.
obj.value = value.__get__(None)
else:
obj.value = value
attribs.update(
{
"__getattribute__": obj_getattribute,
"__setattr__": obj_setattr,
"__get__": obj_desc_get,
"__set__": obj_desc_set,
}
)
# just set them so that we have them in the class. needed for __len__, __str__, etc.
for a in dir(baseType):
if a == "__new__":
continue
if a == "__init__":
continue
if a in attribs.keys():
continue
class WrapProp(object):
def __get__(self, inst, type=None, attrib=a):
if inst is lazyObjInst:
load()
return object.__getattribute__(obj.value, attrib)
return getattr(baseType, attrib)
attribs[a] = WrapProp()
LazyObject = type(typeName, (object,), attribs)
lazyObjInst = LazyObject()
return lazyObjInst
def PersistentObject(
baseType,
filename,
defaultArgs=(),
persistentRepr=False,
namespace=None,
installAutosaveWrappersOn=(),
autosaveOnDel=True,
customAttribs={},
):
betterRepr = globals()["betterRepr"] # save local copy
import appinfo
fullfn = appinfo.userdir + "/" + filename
def load():
try:
f = open(fullfn)
except IOError: # e.g. file-not-found. that's ok
return baseType(*defaultArgs)
# some common types
g = {baseType.__name__: baseType} # the baseType itself
if namespace is None:
g.update(globals()) # all what we have here
if baseType.__module__:
# the module of the basetype
import sys
m = sys.modules[baseType.__module__]
g.update([(varname, getattr(m, varname)) for varname in dir(m)])
else:
g.update(namespace)
try:
obj = eval(f.read(), g)
except Exception:
import sys
sys.excepthook(*sys.exc_info())
return baseType(*defaultArgs)
# Try to convert.
if not isinstance(obj, baseType):
obj = baseType(obj)
return obj
def save(obj):
s = betterRepr(obj.__get__(None))
f = open(fullfn, "w")
f.write(s)
f.write("\n")
f.close()
def obj_repr(obj):
if persistentRepr:
return "PersistentObject(%s, %r, persistentRepr=True)" % (
baseType.__name__,
filename,
)
return betterRepr(obj.__get__(None))
_customAttribs = {
"save": save,
"_isPersistentObject": True,
"_filename": filename,
"_persistentRepr": persistentRepr,
"__repr__": obj_repr,
}
if autosaveOnDel:
def obj_del(obj):
save(obj)
_customAttribs["__del__"] = obj_del
def makeWrapper(funcAttrib):
def wrapped(self, *args, **kwargs):
obj = self.__get__(None)
f = getattr(obj, funcAttrib)
ret = f(*args, **kwargs)
save(self)
return ret
return wrapped
for attr in installAutosaveWrappersOn:
_customAttribs[attr] = makeWrapper(attr)
_customAttribs.update(customAttribs)
return ObjectProxy(
load,
baseType=baseType,
customAttribs=_customAttribs,
typeName="PersistentObject(%s)" % filename,
)
def test_ObjectProxy():
expectedLoad = False
class Test:
def __init__(self):
assert expectedLoad
obj1 = object()
obj2 = object()
proxy = ObjectProxy(Test)
expectedLoad = True
assert proxy.obj1 is Test.obj1
class Test(object):
def __init__(self):
assert expectedLoad
obj1 = object()
obj2 = object()
proxy = ObjectProxy(Test, customAttribs={"obj1": 42})
expectedLoad = True
assert proxy.obj1 is 42
assert proxy.obj2 is Test.obj2
from collections import deque
proxy = ObjectProxy(deque, customAttribs={"append": 42})
assert proxy.append is 42
class DictObj(dict):
def __getattr__(self, item):
return self[item]
def __setattr__(self, key, value):
self[key] = value
def objc_disposeClassPair(className):
# Be careful using this!
# Any objects holding refs to the old class will be invalid
# and will probably crash!
# Creating a new class after it will not make them valid because
# the new class will be at a different address.
# some discussion / example:
# http://stackoverflow.com/questions/7361847/pyobjc-how-to-delete-existing-objective-c-class
# https://github.com/albertz/chromehacking/blob/master/disposeClass.py
import ctypes
ctypes.pythonapi.objc_lookUpClass.restype = ctypes.c_void_p
ctypes.pythonapi.objc_lookUpClass.argtypes = (ctypes.c_char_p,)
addr = ctypes.pythonapi.objc_lookUpClass(className)
if not addr:
return False
ctypes.pythonapi.objc_disposeClassPair.restype = None
ctypes.pythonapi.objc_disposeClassPair.argtypes = (ctypes.c_void_p,)
ctypes.pythonapi.objc_disposeClassPair(addr)
def objc_setClass(obj, clazz):
import objc
objAddr = objc.pyobjc_id(
obj
) # returns the addr and also ensures that it is an objc object
assert objAddr != 0
import ctypes
ctypes.pythonapi.objc_lookUpClass.restype = ctypes.c_void_p
ctypes.pythonapi.objc_lookUpClass.argtypes = (ctypes.c_char_p,)
className = clazz.__name__ # this should be correct I guess
classAddr = ctypes.pythonapi.objc_lookUpClass(className)
assert classAddr != 0
# Class object_setClass(id object, Class cls)
ctypes.pythonapi.object_setClass.restype = ctypes.c_void_p
ctypes.pythonapi.object_setClass.argtypes = (ctypes.c_void_p, ctypes.c_void_p)
ctypes.pythonapi.object_setClass(objAddr, classAddr)
obj.__class__ = clazz
def ObjCClassAutorenamer(name, bases, dict):
import objc
def lookUpClass(name):
try:
return objc.lookUpClass(name)
except objc.nosuchclass_error:
return None
if lookUpClass(name):
numPostfix = 1
while lookUpClass("%s_%i" % (name, numPostfix)):
numPostfix += 1
name = "%s_%i" % (name, numPostfix)
return type(name, bases, dict)
def getMusicPathsFromDirectory(dir):
import os
import appinfo
matches = []
for root, dirnames, filenames in os.walk(dir):
for filename in filenames:
if filename.endswith(tuple(appinfo.formats)):
matches.append(os.path.join(root, filename))
return matches
def getSongsFromDirectory(dir):
songs = []
files = getMusicPathsFromDirectory(dir)
from Song import Song
for file in files:
songs.append(Song(file))
return songs
# A fuzzy set is a dict of values to [0,1] numbers.
def unionFuzzySets(*fuzzySets):
resultSet = {}
for key in set.union(*map(set, fuzzySets)):
value = max(map(lambda x: x.get(key, 0), fuzzySets))
if value > 0:
resultSet[key] = value
return resultSet
def intersectFuzzySets(*fuzzySets):
resultSet = {}
for key in set.intersection(*map(set, fuzzySets)):
value = min(map(lambda x: x[key], fuzzySets))
if value > 0:
resultSet[key] = value
return resultSet
def convertToUnicode(value):
"""
:rtype : unicode
"""
if isinstance(value, unicode):
return value
assert isinstance(value, str)
try:
value = value.decode("utf-8")
except UnicodeError:
try:
value = value.decode() # default
except UnicodeError:
try:
value = value.decode("iso-8859-1")
except UnicodeError:
value = value.decode("utf-8", "replace")
# value = value.replace(u"\ufffd", "?")
assert isinstance(value, unicode)
return value
def fixValue(value, type):
if not type:
return value
if isinstance(value, type):
return value
if type is unicode:
if isinstance(value, str):
return convertToUnicode(value)
return unicode(value)
return value
def getTempNameInScope(scope):
import random
while True:
name = "_tmp_" + "".join([str(random.randrange(0, 10)) for _ in range(10)])
if name not in scope:
return name
def iterGlobalsUsedInFunc(f, fast=False, loadsOnly=True):
if hasattr(f, "func_code"):
code = f.func_code
elif hasattr(f, "im_func"):
code = f.im_func.func_code
else:
code = f
if fast:
# co_names is the list of all names which are used.
# These are mostly the globals. These are also attrib names, so these are more...
for name in code.co_names:
yield name
else:
# Use the disassembly. Note that this will still not
# find dynamic lookups to `globals()`
# (which is anyway not possible to detect always).
import dis
ops = ["LOAD_GLOBAL"]
if not loadsOnly:
ops += ["STORE_GLOBAL", "DELETE_GLOBAL"]
ops = map(dis.opmap.__getitem__, ops)
i = 0
while i < len(code.co_code):
op = ord(code.co_code[i])
i += 1
if op >= dis.HAVE_ARGUMENT:
oparg = ord(code.co_code[i]) + ord(code.co_code[i + 1]) * 256
i += 2
else:
oparg = None
if op in ops:
name = code.co_names[oparg]
yield name
# iterate through sub code objects
import types
for subcode in code.co_consts:
if isinstance(subcode, types.CodeType):
for g in iterGlobalsUsedInFunc(subcode, fast=fast, loadsOnly=loadsOnly):
yield g
def iterGlobalsUsedInClass(clazz, module=None):
import types
for attrName in dir(clazz):
attr = getattr(clazz, attrName)
while True: # resolve props
if isinstance(attr, safe_property):
attr = attr.prop
continue
if isinstance(attr, property):
attr = attr.fget
continue
break
if isinstance(attr, (types.FunctionType, types.MethodType)):
if module:
if attr.__module__ != module:
continue
for g in iterGlobalsUsedInFunc(attr):
yield g
def ExceptionCatcherDecorator(func):
def decoratedFunc(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception:
sys.excepthook(*sys.exc_info())
return decoratedFunc
def killMeHard():
import os
import signal
import sys
os.kill(0, signal.SIGKILL)
def dumpAllThreads():
import sys
if not hasattr(sys, "_current_frames"):
print("Warning: dumpAllThreads: no sys._current_frames")
return
import threading
id2name = dict([(th.ident, th.name) for th in threading.enumerate()])
for threadId, stack in sys._current_frames().items():
print("\n# Thread: %s(%d)" % (id2name.get(threadId, ""), threadId))
better_exchook.print_traceback(stack)
def dumpThread(threadId):
import sys
if not hasattr(sys, "_current_frames"):
print("Warning: dumpThread: no sys._current_frames")
return
if threadId not in sys._current_frames():
print("Thread %d not found" % threadId)
return
stack = sys._current_frames()[threadId]
better_exchook.print_traceback(stack)
def debugFindThread(threadName):
import threading
for th in threading.enumerate():
if th.name == threadName:
return th
return None
def debugGetThreadStack(threadName):
th = debugFindThread(threadName)
assert th, "thread not found"
stack = sys._current_frames()[th.ident]
return th, stack
def debugGetLocalVarFromThread(threadName, funcName, varName):
th, stack = debugGetThreadStack(threadName)
_tb = stack
limit = None
n = 0
from inspect import isframe
while _tb is not None and (limit is None or n < limit):
if isframe(_tb):
f = _tb
else:
f = _tb.tb_frame
if f.f_code.co_name == funcName:
if varName in f.f_locals:
return f, f.f_locals[varName]
if isframe(_tb):
_tb = _tb.f_back
else:
_tb = _tb.tb_next
n += 1
return None, None
def NSAutoreleasePoolDecorator(func):
def decoratedFunc(*args, **kwargs):
import AppKit
pool = AppKit.NSAutoreleasePool.alloc().init()
ret = func(*args, **kwargs)
del pool
return ret
return decoratedFunc
def simplifyString(s):
s = convertToUnicode(s)
s = s.lower()
import unicodedata
s = unicodedata.normalize("NFD", s)
s = "".join([c for c in s if unicodedata.category(c) != "Mn"])
for base, repl in (
("я", "r"),
("æ", "a"),
("œ", "o"),
("ø", "o"),
("ɲ", "n"),
("ß", "ss"),
("©", "c"),
("ð", "d"),
("đ", "d"),
("ɖ", "d"),
("þ", "th"),
):
s = s.replace(base, repl)
return s
def uniqList(l):
s = set()
l_new = []
for v in l:
if v in s:
continue
s.add(v)
l_new.append(v)
return l_new
def isPymoduleAvailable(mod):
try:
__import__(mod)
except ImportError:
return False
return True
def interactive_py_compile(source, filename="<interactive>"):
c = compile(source, filename, "single")
# we expect this at the end:
# PRINT_EXPR
# LOAD_CONST
# RETURN_VALUE
import dis
if ord(c.co_code[-5]) != dis.opmap["PRINT_EXPR"]:
return c
assert ord(c.co_code[-4]) == dis.opmap["LOAD_CONST"]
assert ord(c.co_code[-1]) == dis.opmap["RETURN_VALUE"]
code = c.co_code[:-5]
code += chr(dis.opmap["RETURN_VALUE"])
CodeArgs = [
"argcount",
"nlocals",
"stacksize",
"flags",
"code",
"consts",
"names",
"varnames",
"filename",
"name",
"firstlineno",
"lnotab",
"freevars",
"cellvars",
]
c_dict = dict([(arg, getattr(c, "co_" + arg)) for arg in CodeArgs])
c_dict["code"] = code
import types
c = types.CodeType(*[c_dict[arg] for arg in CodeArgs])
return c
_pthread_setname_np = Uninitialized
_pthread_self = None
def setCurThreadName(name):
name = convertToUnicode(name)
# name = name[:15] # Not sure if needed. If so, we should use shorter names...
name = name.encode("utf8")
global _pthread_setname_np, _pthread_self
if _pthread_setname_np is Uninitialized:
_pthread_setname_np = None
try:
import ctypes
import ctypes.util
libpthread_path = ctypes.util.find_library("pthread")
if not libpthread_path:
raise ImportError
libpthread = ctypes.CDLL(libpthread_path)
if not hasattr(libpthread, "pthread_setname_np"):
raise ImportError
_pthread_setname_np = libpthread.pthread_setname_np
if sys.platform == "darwin":
_pthread_setname_np.argtypes = [ctypes.c_char_p]
_pthread_setname_np.restype = ctypes.c_int
else:
_pthread_self = libpthread.pthread_self
_pthread_self.argtypes = []
_pthread_self.restype = ctypes.c_void_p
_pthread_setname_np.argtypes = [ctypes.c_void_p, ctypes.c_char_p]
_pthread_setname_np.restype = ctypes.c_int
except ImportError:
print("setCurThreadName: failed to import libpthread")
if _pthread_setname_np is None:
return
if sys.platform == "darwin":
_pthread_setname_np(name)
else:
_pthread_setname_np(_pthread_self(), name)
|
Pyfa | db_update | #!/usr/bin/env python3
# ======================================================================
# Copyright (C) 2012 Diego Duclos
#
# This file is part of eos.
#
# eos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# eos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with eos. If not, see <http://www.gnu.org/licenses/>.
# ======================================================================
import functools
import itertools
import json
import os
import re
import sqlite3
import sys
import sqlalchemy.orm
from sqlalchemy import and_, or_
# todo: need to set the EOS language to en, becasuse this assumes it's being run within an English context
# Need to know what that would do if called from pyfa
ROOT_DIR = os.path.realpath(os.path.dirname(__file__))
DB_PATH = os.path.join(ROOT_DIR, "eve.db")
JSON_DIR = os.path.join(ROOT_DIR, "staticdata")
if ROOT_DIR not in sys.path:
sys.path.insert(0, ROOT_DIR)
GAMEDATA_SCHEMA_VERSION = 4
def db_needs_update():
"""True if needs, false if it does not, none if we cannot check it."""
try:
with open(os.path.join(JSON_DIR, "phobos", "metadata.0.json")) as f:
data_version = next(
(
r["field_value"]
for r in json.load(f)
if r["field_name"] == "client_build"
)
)
except (KeyboardInterrupt, SystemExit):
raise
# If we have no source data - return None; should not update in this case
except:
return None
if not os.path.isfile(DB_PATH):
print("Gamedata DB not found")
return True
db_data_version = None
db_schema_version = None
try:
db = sqlite3.connect(DB_PATH)
cursor = db.cursor()
cursor.execute(
"SELECT field_value FROM metadata WHERE field_name = 'client_build'"
)
for row in cursor:
db_data_version = int(row[0])
cursor.execute(
"SELECT field_value FROM metadata WHERE field_name = 'schema_version'"
)
for row in cursor:
db_schema_version = int(row[0])
cursor.close()
db.close()
except (KeyboardInterrupt, SystemExit):
raise
except:
print("Error when fetching gamedata DB metadata")
return True
if data_version != db_data_version:
print(
"Gamedata DB data version mismatch: needed {}, DB has {}".format(
data_version, db_data_version
)
)
return True
if GAMEDATA_SCHEMA_VERSION != db_schema_version:
print(
"Gamedata DB schema version mismatch: needed {}, DB has {}".format(
GAMEDATA_SCHEMA_VERSION, db_schema_version
)
)
return True
return False
def update_db():
print("Building gamedata DB...")
if os.path.isfile(DB_PATH):
os.remove(DB_PATH)
import eos.config
import eos.db
import eos.gamedata
# Create the database tables
eos.db.gamedata_meta.create_all()
def _readData(minerName, jsonName, keyIdName=None):
compiled_data = None
for i in itertools.count(0):
try:
with open(
os.path.join(JSON_DIR, minerName, "{}.{}.json".format(jsonName, i)),
encoding="utf-8",
) as f:
rawData = json.load(f)
if i == 0:
compiled_data = {} if type(rawData) == dict else []
if type(rawData) == dict:
compiled_data.update(rawData)
else:
compiled_data.extend(rawData)
except FileNotFoundError:
break
if not keyIdName:
return compiled_data
# IDs in keys, rows in values
data = []
for k, v in compiled_data.items():
row = {}
row.update(v)
row[keyIdName] = int(k)
data.append(row)
return data
def _addRows(data, cls, fieldMap=None):
if fieldMap is None:
fieldMap = {}
for row in data:
instance = cls()
for k, v in row.items():
if isinstance(v, str):
v = v.strip()
setattr(instance, fieldMap.get(k, k), v)
eos.db.gamedata_session.add(instance)
def processEveTypes():
print("processing evetypes")
data = _readData("fsd_binary", "types", keyIdName="typeID")
for row in data:
if (
# Apparently people really want Civilian modules available
(
row["typeName_en-us"].startswith("Civilian")
and "Shuttle" not in row["typeName_en-us"]
)
or row["typeName_en-us"] == "Capsule"
or row["groupID"] == 4033 # destructible effect beacons
or re.match("AIR .+Booster.*", row["typeName_en-us"])
):
row["published"] = True
# Nearly useless and clutter search results too much
elif (
row["typeName_en-us"].startswith("Limited Synth ")
or row["typeName_en-us"].startswith("Expired ")
or re.match("Mining Blitz .+ Booster Dose .+", row["typeName_en-us"])
or row["typeName_en-us"].endswith(" Filament")
and (
"'Needlejack'" not in row["typeName_en-us"]
and "'Devana'" not in row["typeName_en-us"]
and "'Pochven'" not in row["typeName_en-us"]
and "'Extraction'" not in row["typeName_en-us"]
and "'Krai Veles'" not in row["typeName_en-us"]
and "'Krai Perun'" not in row["typeName_en-us"]
and "'Krai Svarog'" not in row["typeName_en-us"]
)
):
row["published"] = False
newData = []
for row in data:
if (
row["published"]
or
# group Ship Modifiers, for items like tactical t3 ship modes
row["groupID"] == 1306
or
# Micro Bombs (Fighters)
row["typeID"] in (41549, 41548, 41551, 41550)
or
# Abyssal weather (environment)
row["groupID"]
in (
1882,
1975,
1971,
1983,
) # the "container" for the abyssal environments
):
newData.append(row)
map = {"typeName_en-us": "typeName", "description_en-us": "_description"}
map.update(
{
"description" + v: "_description" + v
for (k, v) in eos.config.translation_mapping.items()
if k != "en"
}
)
_addRows(newData, eos.gamedata.Item, fieldMap=map)
return newData
def processEveGroups():
print("processing evegroups")
data = _readData("fsd_binary", "groups", keyIdName="groupID")
map = {"groupName_en-us": "name"}
map.update(
{
"groupName" + v: "name" + v
for (k, v) in eos.config.translation_mapping.items()
if k != "en"
}
)
_addRows(data, eos.gamedata.Group, fieldMap=map)
return data
def processEveCategories():
print("processing evecategories")
data = _readData("fsd_binary", "categories", keyIdName="categoryID")
map = {"categoryName_en-us": "name"}
map.update(
{
"categoryName" + v: "name" + v
for (k, v) in eos.config.translation_mapping.items()
if k != "en"
}
)
_addRows(data, eos.gamedata.Category, fieldMap=map)
def processDogmaAttributes():
print("processing dogmaattributes")
data = _readData("fsd_binary", "dogmaattributes", keyIdName="attributeID")
map = {
"displayName_en-us": "displayName",
# 'tooltipDescription_en-us': 'tooltipDescription'
}
_addRows(data, eos.gamedata.AttributeInfo, fieldMap=map)
def processDogmaTypeAttributes(eveTypesData):
print("processing dogmatypeattributes")
data = _readData("fsd_binary", "typedogma", keyIdName="typeID")
eveTypeIds = set(r["typeID"] for r in eveTypesData)
newData = []
seenKeys = set()
def checkKey(key):
if key in seenKeys:
return False
seenKeys.add(key)
return True
for typeData in data:
if typeData["typeID"] not in eveTypeIds:
continue
for row in typeData.get("dogmaAttributes", ()):
row["typeID"] = typeData["typeID"]
if checkKey((row["typeID"], row["attributeID"])):
newData.append(row)
for row in eveTypesData:
for attrId, attrName in {
4: "mass",
38: "capacity",
161: "volume",
162: "radius",
}.items():
if attrName in row and checkKey((row["typeID"], attrId)):
newData.append(
{
"typeID": row["typeID"],
"attributeID": attrId,
"value": row[attrName],
}
)
_addRows(newData, eos.gamedata.Attribute)
return newData
def processDynamicItemAttributes():
print("processing dynamicitemattributes")
data = _readData("fsd_binary", "dynamicitemattributes")
for mutaID, mutaData in data.items():
muta = eos.gamedata.DynamicItem()
muta.typeID = mutaID
muta.resultingTypeID = mutaData["inputOutputMapping"][0]["resultingType"]
eos.db.gamedata_session.add(muta)
for x in mutaData["inputOutputMapping"][0]["applicableTypes"]:
item = eos.gamedata.DynamicItemItem()
item.typeID = mutaID
item.applicableTypeID = x
eos.db.gamedata_session.add(item)
for attrID, attrData in mutaData["attributeIDs"].items():
attr = eos.gamedata.DynamicItemAttribute()
attr.typeID = mutaID
attr.attributeID = attrID
attr.min = attrData["min"]
attr.max = attrData["max"]
eos.db.gamedata_session.add(attr)
def processDogmaEffects():
print("processing dogmaeffects")
data = _readData("fsd_binary", "dogmaeffects", keyIdName="effectID")
_addRows(
data,
eos.gamedata.Effect,
fieldMap={"resistanceAttributeID": "resistanceID"},
)
def processDogmaTypeEffects(eveTypesData):
print("processing dogmatypeeffects")
data = _readData("fsd_binary", "typedogma", keyIdName="typeID")
eveTypeIds = set(r["typeID"] for r in eveTypesData)
newData = []
for typeData in data:
if typeData["typeID"] not in eveTypeIds:
continue
for row in typeData.get("dogmaEffects", ()):
row["typeID"] = typeData["typeID"]
newData.append(row)
_addRows(newData, eos.gamedata.ItemEffect)
return newData
def processDogmaUnits():
print("processing dogmaunits")
data = _readData("fsd_binary", "dogmaunits", keyIdName="unitID")
_addRows(
data,
eos.gamedata.Unit,
fieldMap={"name": "unitName", "displayName_en-us": "displayName"},
)
def processMarketGroups():
print("processing marketgroups")
data = _readData("fsd_binary", "marketgroups", keyIdName="marketGroupID")
map = {
"name_en-us": "marketGroupName",
"description_en-us": "_description",
}
map.update(
{
"name" + v: "marketGroupName" + v
for (k, v) in eos.config.translation_mapping.items()
if k != "en"
}
)
map.update(
{
"description" + v: "_description" + v
for (k, v) in eos.config.translation_mapping.items()
if k != "en"
}
)
_addRows(data, eos.gamedata.MarketGroup, fieldMap=map)
def processMetaGroups():
print("processing metagroups")
data = _readData("fsd_binary", "metagroups", keyIdName="metaGroupID")
map = {"name_en-us": "metaGroupName"}
map.update(
{
"name" + v: "metaGroupName" + v
for (k, v) in eos.config.translation_mapping.items()
if k != "en"
}
)
_addRows(data, eos.gamedata.MetaGroup, fieldMap=map)
def processCloneGrades():
print("processing clonegrades")
data = _readData("fsd_lite", "clonegrades")
newData = []
# December, 2017 - CCP decided to use only one set of skill levels for alpha clones. However, this is still
# represented in the data as a skillset per race. To ensure that all skills are the same, we store them in a way
# that we can check to make sure all races have the same skills, as well as skill levels
check = {}
for ID in data:
for skill in data[ID]["skills"]:
newData.append(
{
"alphaCloneID": int(ID),
"alphaCloneName": "Alpha Clone",
"typeID": skill["typeID"],
"level": skill["level"],
}
)
if ID not in check:
check[ID] = {}
check[ID][int(skill["typeID"])] = int(skill["level"])
if not functools.reduce(
lambda a, b: a if a == b else False, [v for _, v in check.items()]
):
raise Exception("Alpha Clones not all equal")
newData = [x for x in newData if x["alphaCloneID"] == 1]
if len(newData) == 0:
raise Exception("Alpha Clone processing failed")
tmp = []
for row in newData:
if row["alphaCloneID"] not in tmp:
cloneParent = eos.gamedata.AlphaClone()
setattr(cloneParent, "alphaCloneID", row["alphaCloneID"])
setattr(cloneParent, "alphaCloneName", row["alphaCloneName"])
eos.db.gamedata_session.add(cloneParent)
tmp.append(row["alphaCloneID"])
_addRows(newData, eos.gamedata.AlphaCloneSkill)
def processTraits():
print("processing traits")
data = _readData("phobos", "traits")
def convertSection(sectionData):
sectionLines = []
headerText = "<b>{}</b>".format(sectionData["header"])
sectionLines.append(headerText)
for bonusData in sectionData["bonuses"]:
prefix = (
"{} ".format(bonusData["number"]) if "number" in bonusData else ""
)
bonusText = "{}{}".format(
prefix, bonusData["text"].replace("\u00B7", "\u2022 ")
)
sectionLines.append(bonusText)
sectionLine = "<br />\n".join(sectionLines)
return sectionLine
newData = []
for row in data:
try:
newRow = {
"typeID": row["typeID"],
}
for k, v in eos.config.translation_mapping.items():
if v == "":
v = "_en-us"
typeLines = []
traitData = row["traits{}".format(v)]
for skillData in sorted(
traitData.get("skills", ()), key=lambda i: i["header"]
):
typeLines.append(convertSection(skillData))
if "role" in traitData:
typeLines.append(convertSection(traitData["role"]))
if "misc" in traitData:
typeLines.append(convertSection(traitData["misc"]))
traitLine = "<br />\n<br />\n".join(typeLines)
newRow["traitText{}".format(v)] = traitLine
newData.append(newRow)
except:
pass
_addRows(
newData, eos.gamedata.Traits, fieldMap={"traitText_en-us": "traitText"}
)
def processMetadata():
print("processing metadata")
data = _readData("phobos", "metadata")
_addRows(data, eos.gamedata.MetaData)
def processReqSkills(eveTypesData):
print("processing requiredskillsfortypes")
def composeReqSkills(raw):
reqSkills = {}
for skillTypeID, skillLevel in raw.items():
reqSkills[int(skillTypeID)] = skillLevel
return reqSkills
eveTypeIds = set(r["typeID"] for r in eveTypesData)
data = _readData("fsd_binary", "requiredskillsfortypes")
reqsByItem = {}
itemsByReq = {}
for typeID, skillreqData in data.items():
typeID = int(typeID)
if typeID not in eveTypeIds:
continue
for skillTypeID, skillLevel in composeReqSkills(skillreqData).items():
reqsByItem.setdefault(typeID, {})[skillTypeID] = skillLevel
itemsByReq.setdefault(skillTypeID, {})[typeID] = skillLevel
for item in eos.db.gamedata_session.query(eos.gamedata.Item).all():
if item.typeID in reqsByItem:
item.reqskills = json.dumps(reqsByItem[item.typeID])
if item.typeID in itemsByReq:
item.requiredfor = json.dumps(itemsByReq[item.typeID])
def processReplacements(
eveTypesData, eveGroupsData, dogmaTypeAttributesData, dogmaTypeEffectsData
):
print("finding item replacements")
def compareAttrs(attrs1, attrs2):
# Consider items as different if they have no attrs
if len(attrs1) == 0 and len(attrs2) == 0:
return False
if set(attrs1) != set(attrs2):
return False
if all(attrs1[aid] == attrs2[aid] for aid in attrs1):
return True
return False
skillReqAttribs = {
182: 277,
183: 278,
184: 279,
1285: 1286,
1289: 1287,
1290: 1288,
}
skillReqAttribsFlat = set(skillReqAttribs.keys()).union(
skillReqAttribs.values()
)
# Get data on type groups
# Format: {type ID: group ID}
typesGroups = {}
for row in eveTypesData:
typesGroups[row["typeID"]] = row["groupID"]
# Get data on item effects
# Format: {type ID: set(effect, IDs)}
typesEffects = {}
for row in dogmaTypeEffectsData:
typesEffects.setdefault(row["typeID"], set()).add(row["effectID"])
# Get data on type attributes
# Format: {type ID: {attribute ID: attribute value}}
typesNormalAttribs = {}
typesSkillAttribs = {}
for row in dogmaTypeAttributesData:
attributeID = row["attributeID"]
if attributeID in skillReqAttribsFlat:
typeSkillAttribs = typesSkillAttribs.setdefault(row["typeID"], {})
typeSkillAttribs[row["attributeID"]] = row["value"]
# Ignore these attributes for comparison purposes
elif attributeID in (
# We do not need mass as it affects final ship stats only when carried by ship itself
# (and we're not going to replace ships), but it's wildly inconsistent for other items,
# which otherwise would be the same
4, # mass
124, # mainColor
162, # radius
422, # techLevel
633, # metaLevel
1692, # metaGroupID
1768, # typeColorScheme
):
continue
else:
typeNormalAttribs = typesNormalAttribs.setdefault(row["typeID"], {})
typeNormalAttribs[row["attributeID"]] = row["value"]
# Get data on skill requirements
# Format: {type ID: {skill type ID: skill level}}
typesSkillReqs = {}
for typeID, typeAttribs in typesSkillAttribs.items():
typeSkillAttribs = typesSkillAttribs.get(typeID, {})
if not typeSkillAttribs:
continue
typeSkillReqs = typesSkillReqs.setdefault(typeID, {})
for skillreqTypeAttr, skillreqLevelAttr in skillReqAttribs.items():
try:
skillType = int(typeSkillAttribs[skillreqTypeAttr])
skillLevel = int(typeSkillAttribs[skillreqLevelAttr])
except (KeyError, ValueError):
continue
typeSkillReqs[skillType] = skillLevel
# Format: {group ID: category ID}
groupCategories = {}
for row in eveGroupsData:
groupCategories[row["groupID"]] = row["categoryID"]
# As EVE affects various types mostly depending on their group or skill requirements,
# we're going to group various types up this way
# Format: {(group ID, frozenset(skillreq, type, IDs), frozenset(type, effect, IDs): [type ID, {attribute ID: attribute value}]}
groupedData = {}
for row in eveTypesData:
typeID = row["typeID"]
# Ignore items outside of categories we need
if groupCategories[typesGroups[typeID]] not in (
6, # Ship
7, # Module
8, # Charge
18, # Drone
20, # Implant
22, # Deployable
23, # Starbase
32, # Subsystem
35, # Decryptors
65, # Structure
66, # Structure Module
87, # Fighter
):
continue
typeAttribs = typesNormalAttribs.get(typeID, {})
# Ignore items w/o attributes
if not typeAttribs:
continue
# We need only skill types, not levels for keys
typeSkillreqs = frozenset(typesSkillReqs.get(typeID, {}))
typeGroup = typesGroups[typeID]
typeEffects = frozenset(typesEffects.get(typeID, ()))
groupData = groupedData.setdefault(
(typeGroup, typeSkillreqs, typeEffects), []
)
groupData.append((typeID, typeAttribs))
# Format: {type ID: set(type IDs)}
replacements = {}
# Now, go through composed groups and for every item within it
# find items which are the same
for groupData in groupedData.values():
for type1, type2 in itertools.combinations(groupData, 2):
if compareAttrs(type1[1], type2[1]):
replacements.setdefault(type1[0], set()).add(type2[0])
replacements.setdefault(type2[0], set()).add(type1[0])
# Update DB session with data we generated
for item in eos.db.gamedata_session.query(eos.gamedata.Item).all():
itemReplacements = replacements.get(item.typeID)
if itemReplacements is not None:
item.replacements = ",".join(
"{}".format(tid) for tid in sorted(itemReplacements)
)
def processImplantSets(eveTypesData):
print("composing implant sets")
# Includes only implants which can be considered part of sets, not all implants
implant_groups = (300, 1730)
specials = {"Genolution": ("Genolution Core Augmentation", r"CA-\d+")}
implantSets = {}
for row in eveTypesData:
if not row.get("published"):
continue
if row.get("groupID") not in implant_groups:
continue
typeName = row.get("typeName_en-us", "")
# Regular sets matching
m = re.match(
"(?P<grade>(High|Mid|Low)-grade) (?P<set>\w+) (?P<implant>(Alpha|Beta|Gamma|Delta|Epsilon|Omega))",
typeName,
re.IGNORECASE,
)
if m:
implantSets.setdefault((m.group("grade"), m.group("set")), set()).add(
row["typeID"]
)
# Special set matching
for setHandle, (setName, implantPattern) in specials.items():
pattern = "(?P<set>{}) (?P<implant>{})".format(setName, implantPattern)
m = re.match(pattern, typeName)
if m:
implantSets.setdefault((None, setHandle), set()).add(row["typeID"])
break
data = []
for (gradeName, setName), implants in implantSets.items():
if len(implants) < 2:
continue
implants = ",".join("{}".format(tid) for tid in sorted(implants))
row = {"setName": setName, "gradeName": gradeName, "implants": implants}
data.append(row)
_addRows(data, eos.gamedata.ImplantSet)
eveTypesData = processEveTypes()
eveGroupsData = processEveGroups()
processEveCategories()
processDogmaAttributes()
dogmaTypeAttributesData = processDogmaTypeAttributes(eveTypesData)
processDynamicItemAttributes()
processDogmaEffects()
dogmaTypeEffectsData = processDogmaTypeEffects(eveTypesData)
processDogmaUnits()
processMarketGroups()
processMetaGroups()
processCloneGrades()
processTraits()
processMetadata()
eos.db.gamedata_session.flush()
processReqSkills(eveTypesData)
processReplacements(
eveTypesData, eveGroupsData, dogmaTypeAttributesData, dogmaTypeEffectsData
)
processImplantSets(eveTypesData)
# Add schema version to prevent further updates
metadata_schema_version = eos.gamedata.MetaData()
metadata_schema_version.field_name = "schema_version"
metadata_schema_version.field_value = GAMEDATA_SCHEMA_VERSION
eos.db.gamedata_session.add(metadata_schema_version)
eos.db.gamedata_session.flush()
# CCP still has 5 subsystems assigned to T3Cs, even though only 4 are available / usable. They probably have some
# old legacy requirement or assumption that makes it difficult for them to change this value in the data. But for
# pyfa, we can do it here as a post-processing step
for attr in (
eos.db.gamedata_session.query(eos.gamedata.Attribute)
.filter(eos.gamedata.Attribute.ID == 1367)
.all()
):
attr.value = 4.0
for item in (
eos.db.gamedata_session.query(eos.gamedata.Item)
.filter(
or_(
eos.gamedata.Item.name.like("%abyssal%"),
eos.gamedata.Item.name.like("%mutated%"),
eos.gamedata.Item.name.like("%_PLACEHOLDER%"),
# Drifter weapons are published for some reason
eos.gamedata.Item.name.in_(("Lux Kontos", "Lux Xiphos")),
)
)
.all()
):
if "Asteroid Mining Crystal" in item.name:
continue
if "Mutated Drone Specialization" in item.name:
continue
item.published = False
for x in [30]: # Apparel
cat = (
eos.db.gamedata_session.query(eos.gamedata.Category)
.filter(eos.gamedata.Category.ID == x)
.first()
)
print("Removing Category: {}".format(cat.name))
eos.db.gamedata_session.delete(cat)
# Unused normally, can be useful for customizing items
def _copyItem(srcName, tgtTypeID, tgtName):
eveType = (
eos.db.gamedata_session.query(eos.gamedata.Item)
.filter(eos.gamedata.Item.name == srcName)
.one()
)
eos.db.gamedata_session.expunge(eveType)
sqlalchemy.orm.make_transient(eveType)
eveType.ID = tgtTypeID
for suffix in eos.config.translation_mapping.values():
setattr(eveType, f"typeName{suffix}", tgtName)
eos.db.gamedata_session.add(eveType)
eos.db.gamedata_session.flush()
def _hardcodeAttribs(typeID, attrMap):
for attrName, value in attrMap.items():
try:
attr = (
eos.db.gamedata_session.query(eos.gamedata.Attribute)
.filter(
and_(
eos.gamedata.Attribute.name == attrName,
eos.gamedata.Attribute.typeID == typeID,
)
)
.one()
)
except sqlalchemy.orm.exc.NoResultFound:
attrInfo = (
eos.db.gamedata_session.query(eos.gamedata.AttributeInfo)
.filter(eos.gamedata.AttributeInfo.name == attrName)
.one()
)
attr = eos.gamedata.Attribute()
attr.attributeID = attrInfo.ID
attr.typeID = typeID
attr.value = value
eos.db.gamedata_session.add(attr)
else:
attr.value = value
def _hardcodeEffects(typeID, effectMap):
item = (
eos.db.gamedata_session.query(eos.gamedata.Item)
.filter(eos.gamedata.Item.ID == typeID)
.one()
)
item.effects.clear()
for effectID, effectName in effectMap.items():
effect = eos.gamedata.Effect()
effect.effectID = effectID
effect.effectName = effectName
item.effects[effectName] = effect
def hardcodeShapash():
shapashTypeID = 1000000
_copyItem(srcName="Utu", tgtTypeID=shapashTypeID, tgtName="Shapash")
attrMap = {
# Fitting
"powerOutput": 50,
"cpuOutput": 225,
"capacitorCapacity": 420,
"rechargeRate": 187500,
# Slots
"hiSlots": 3,
"medSlots": 4,
"lowSlots": 4,
"launcherSlotsLeft": 0,
"turretSlotsLeft": 3,
# Rigs
"rigSlots": 2,
"rigSize": 1,
"upgradeCapacity": 400,
# Shield
"shieldCapacity": 575,
"shieldRechargeRate": 625000,
"shieldEmDamageResonance": 1 - 0.0,
"shieldThermalDamageResonance": 1 - 0.6,
"shieldKineticDamageResonance": 1 - 0.85,
"shieldExplosiveDamageResonance": 1 - 0.5,
# Armor
"armorHP": 1015,
"armorEmDamageResonance": 1 - 0.5,
"armorThermalDamageResonance": 1 - 0.675,
"armorKineticDamageResonance": 1 - 0.8375,
"armorExplosiveDamageResonance": 1 - 0.1,
# Structure
"hp": 1274,
"emDamageResonance": 1 - 0.33,
"thermalDamageResonance": 1 - 0.33,
"kineticDamageResonance": 1 - 0.33,
"explosiveDamageResonance": 1 - 0.33,
"mass": 1215000,
"volume": 29500,
"capacity": 165,
# Navigation
"maxVelocity": 325,
"agility": 3.467,
"warpSpeedMultiplier": 5.5,
# Drones
"droneCapacity": 75,
"droneBandwidth": 25,
# Targeting
"maxTargetRange": 49000,
"maxLockedTargets": 6,
"scanRadarStrength": 0,
"scanLadarStrength": 0,
"scanMagnetometricStrength": 9,
"scanGravimetricStrength": 0,
"signatureRadius": 39,
"scanResolution": 550,
# Misc
"energyWarfareResistance": 0,
"stasisWebifierResistance": 0,
"weaponDisruptionResistance": 0,
}
effectMap = {
100100: "pyfaCustomShapashAfArAmount",
100101: "pyfaCustomShapashAfShtTrackingOptimal",
100102: "pyfaCustomShapashGfShtDamage",
100103: "pyfaCustomShapashGfPointRange",
100104: "pyfaCustomShapashGfPropOverheat",
100105: "pyfaCustomShapashRolePlateMass",
100106: "pyfaCustomShapashRoleHeat",
}
_hardcodeAttribs(shapashTypeID, attrMap)
_hardcodeEffects(shapashTypeID, effectMap)
def hardcodeCybele():
cybeleTypeID = 1000001
_copyItem(srcName="Adrestia", tgtTypeID=cybeleTypeID, tgtName="Cybele")
attrMap = {
# Fitting
"powerOutput": 1284,
"cpuOutput": 400,
"capacitorCapacity": 2400,
"rechargeRate": 334000,
"hiSlots": 5,
"medSlots": 4,
"lowSlots": 6,
"launcherSlotsLeft": 0,
"turretSlotsLeft": 5,
# Rigs
"rigSlots": 2,
"rigSize": 2,
"upgradeCapacity": 400,
# Shield
"shieldCapacity": 1200,
"shieldRechargeRate": 1250000,
"shieldEmDamageResonance": 1 - 0.0,
"shieldThermalDamageResonance": 1 - 0.5,
"shieldKineticDamageResonance": 1 - 0.9,
"shieldExplosiveDamageResonance": 1 - 0.5,
# Armor
"armorHP": 1900,
"armorEmDamageResonance": 1 - 0.5,
"armorThermalDamageResonance": 1 - 0.69,
"armorKineticDamageResonance": 1 - 0.85,
"armorExplosiveDamageResonance": 1 - 0.1,
# Structure
"hp": 2300,
"emDamageResonance": 1 - 0.33,
"thermalDamageResonance": 1 - 0.33,
"kineticDamageResonance": 1 - 0.33,
"explosiveDamageResonance": 1 - 0.33,
"mass": 11100000,
"volume": 112000,
"capacity": 450,
# Navigation
"maxVelocity": 235,
"agility": 0.457,
"warpSpeedMultiplier": 4.5,
# Drones
"droneCapacity": 100,
"droneBandwidth": 50,
# Targeting
"maxTargetRange": 60000,
"maxLockedTargets": 6,
"scanRadarStrength": 0,
"scanLadarStrength": 0,
"scanMagnetometricStrength": 15,
"scanGravimetricStrength": 0,
"signatureRadius": 115,
"scanResolution": 330,
# Misc
"energyWarfareResistance": 0,
"stasisWebifierResistance": 0,
"weaponDisruptionResistance": 0,
}
effectMap = {
100200: "pyfaCustomCybeleHacMhtFalloff",
100201: "pyfaCustomCybeleHacMhtTracking",
100202: "pyfaCustomCybeleGcMhtDamage",
100203: "pyfaCustomCybeleGcArAmount",
100204: "pyfaCustomCybeleGcPointRange",
100205: "pyfaCustomCybeleRoleVelocity",
100206: "pyfaCustomCybeleRolePlateMass",
}
_hardcodeAttribs(cybeleTypeID, attrMap)
_hardcodeEffects(cybeleTypeID, effectMap)
hardcodeShapash()
hardcodeCybele()
eos.db.gamedata_session.commit()
eos.db.gamedata_engine.execute("VACUUM")
print("done")
if __name__ == "__main__":
update_db()
|
objecteditor | widget | # This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2008 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
An Object Editor widget.
"""
import sys
import app
import objecteditor
from PyQt5 import QtCore
from PyQt5.QtCore import QSettings
from PyQt5.QtGui import QTextCursor
from PyQt5.QtWidgets import QDoubleSpinBox, QLabel, QPushButton, QVBoxLayout, QWidget
from . import defineoffset
class Widget(QWidget):
# I think we will work with individual editor objects for different types of objects.
# Each one will be shown/hidden on demand, i.e. when an element is activated
# through the SVG view, the music view or the cursor in the source.
# Each editor object handles its own connections to signals.
# (PS: The object editor will also work with the source code directly,
# i.e. independently of graphical SVG editing.)
def __init__(self, tool):
super().__init__(tool)
self.mainwindow = tool.mainwindow()
self.define = None
import panelmanager
self.svgview = panelmanager.manager(tool.mainwindow()).svgview.widget().view
layout = QVBoxLayout(spacing=1)
self.setLayout(layout)
self.elemLabel = QLabel()
self.XOffsetBox = QDoubleSpinBox()
self.XOffsetBox.setRange(-99, 99)
self.XOffsetBox.setSingleStep(0.1)
self.XOffsetLabel = l = QLabel()
l.setBuddy(self.XOffsetBox)
self.YOffsetBox = QDoubleSpinBox()
self.YOffsetBox.setRange(-99, 99)
self.YOffsetBox.setSingleStep(0.1)
self.YOffsetLabel = l = QLabel()
l.setBuddy(self.YOffsetBox)
self.insertButton = QPushButton("insert offset in source", self)
self.insertButton.clicked.connect(self.callInsert)
layout.addWidget(self.elemLabel)
layout.addWidget(self.XOffsetLabel)
layout.addWidget(self.XOffsetBox)
layout.addWidget(self.YOffsetLabel)
layout.addWidget(self.YOffsetBox)
layout.addWidget(self.insertButton)
layout.addStretch(1)
app.translateUI(self)
self.loadSettings()
self.connectSlots()
def connectSlots(self):
# On creation we connect to all available signals
self.connectToSvgView()
def connectToSvgView(self):
"""Register with signals emitted by the
SVG viewer for processing graphical editing.
"""
self.svgview.objectStartDragging.connect(self.startDragging)
self.svgview.objectDragging.connect(self.Dragging)
self.svgview.objectDragged.connect(self.Dragged)
self.svgview.cursor.connect(self.setObjectFromCursor)
def disconnectFromSvgView(self):
"""Do not process graphical edits when the
Object Editor isn't visible."""
self.svgview.objectStartDragging.disconnect()
self.svgview.objectDragging.disconnect()
self.svgview.objectDragged.disconnect()
self.svgview.cursor.disconnect()
def translateUI(self):
self.XOffsetLabel.setText(_("X Offset"))
self.XOffsetBox.setToolTip(_("Display the X Offset"))
self.YOffsetLabel.setText(_("Y Offset"))
self.YOffsetBox.setToolTip(_("Display the Y Offset"))
self.insertButton.setEnabled(False)
def hideEvent(self, event):
"""Disconnect from all graphical editing signals
when the panel isn't visible
"""
self.disconnectFromSvgView()
event.accept()
def showEvent(self, event):
"""Connect to the graphical editing signals
when the panel becomes visible
"""
self.connectToSvgView()
event.accept()
def callInsert(self):
"""Insert the override command in the source."""
if self.define:
self.define.insertOverride(self.XOffsetBox.value(), self.YOffsetBox.value())
@QtCore.pyqtSlot(float, float)
def setOffset(self, x, y):
"""Display the updated offset."""
self.XOffsetBox.setValue(x)
self.YOffsetBox.setValue(y)
@QtCore.pyqtSlot(float, float)
def startDragging(self, x, y):
"""Set the value of the offset externally."""
# temporary debug output
# print("Start dragging with offset", x, y)
self.setOffset(x, y)
@QtCore.pyqtSlot(float, float)
def Dragging(self, x, y):
"""Set the value of the offset externally."""
# temporary debug output
# print("Dragging with offset", x, y)
self.setOffset(x, y)
@QtCore.pyqtSlot(float, float)
def Dragged(self, x, y):
"""Set the value of the offset externally."""
# temporary debug output
# print("Dragged to", x, y)
self.setOffset(x, y)
@QtCore.pyqtSlot(QTextCursor)
def setObjectFromCursor(self, cursor):
"""Set selected element."""
self.define = defineoffset.DefineOffset(self.mainwindow.currentDocument())
self.elemLabel.setText(self.define.getCurrentLilyObject(cursor))
self.insertButton.setEnabled(True)
def loadSettings(self):
"""Called on construction. Load settings and set checkboxes state."""
s = QSettings()
s.beginGroup("object_editor")
def saveSettings(self):
"""Called on close. Save settings and checkboxes state."""
s = QSettings()
s.beginGroup("object_editor")
|
dialogs | aboutdlg | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013-2015 by Ihor E. Novikov
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import sys
import wal
from sk1 import _, config, events
from sk1.dialogs.aboutdlg_credits import CREDITS
from sk1.dialogs.aboutdlg_license import LICENSE
from sk1.resources import get_bmp, icons
from uc2 import uc2const
class AboutDialog(wal.SimpleDialog):
sizer = None
app = None
def __init__(self, app, parent, title, size=config.about_dlg_size):
self.app = app
wal.SimpleDialog.__init__(
self, parent, title, size, margin=0, resizable=False, add_line=False
)
def build(self):
nb = wal.Notebook(self)
nb.add_page(AboutPage(self.app, nb), _("About"))
nb.add_page(ComponentsPage(nb), _("Components"))
nb.add_page(TranslatorsPage(nb), _("Translators"))
nb.add_page(ThanksPage(nb), _("Thanks to"))
nb.add_page(LicensePage(nb), _("License"))
self.pack(nb, expand=True, fill=True, padding_all=5)
class AboutPage(wal.HPanel):
def __init__(self, app, parent):
wal.HPanel.__init__(self, parent)
hp = wal.HPanel(self)
self.pack(hp)
hp.pack((55, 15))
logo_p = wal.VPanel(hp)
hp.pack(logo_p, fill=True)
logo_p.pack(get_bmp(logo_p, icons.SK1_ICON48), padding=5)
hp.pack((10, 5))
box = wal.VPanel(hp)
hp.pack(box, padding=5)
data = app.appdata
txt = data.app_name + " - " + _("vector graphics editor")
box.pack(wal.Label(box, txt, True, 2), fill=True)
data = app.appdata
mark = "" if not data.build else " %s %s" % (_("build"), data.build)
txt = "%s: %s %s%s" % (_("Version"), data.version, data.revision, mark)
box.pack(wal.Label(box, txt), fill=True)
box.pack((35, 35))
import datetime
year = str(datetime.date.today().year)
txt = "(C) 2011-%s sK1 Project team" % year + "\n"
box.pack(wal.Label(box, txt), fill=True)
p = wal.HPanel(box)
p.pack(wal.HyperlinkLabel(p, "https://sk1project.net"))
box.pack(p, fill=True)
class ComponentsPage(wal.VPanel):
def __init__(self, parent):
wal.VPanel.__init__(self, parent)
import reportlab
from uc2 import cms, libcairo, libimg, libpango
data = [[_("Component"), _("Version")]]
mark = "" if not uc2const.BUILD else " build %s" % uc2const.BUILD
uc_ver = "%s %s%s" % (uc2const.VERSION, uc2const.REVISION, mark)
data.append(["Python", sys.version])
data.append(["wxPython", wal.VERSION])
data.append(["UniConvertor", uc_ver])
data.append(["LCMS", cms.libcms.get_version()])
data.append(["Cairo", libcairo.get_version()[0]])
data.append(["pycairo", libcairo.get_version()[1]])
data.append(["Pillow", libimg.get_version()])
data.append(["ImageMagick", libimg.get_magickwand_version()[0]])
data.append(["Pango", libpango.get_version()])
data.append(["Reportlab", reportlab.Version])
if not wal.IS_MSW:
import cups
try:
cups.require("10.0")
except RuntimeError as e:
data.append(["pycups", str(e).split()[-1]])
vp = wal.VPanel(self)
vp.set_bg(wal.UI_COLORS["border"])
slist = wal.ReportList(vp, data, border=False)
vp.pack(slist, expand=True, fill=True, padding_all=1)
self.pack(vp, expand=True, fill=True, padding_all=5)
slist.set_column_width(0, wal.LIST_AUTOSIZE)
TR_LIST = [
["LANG", "Translators"],
["Brazilian Portuguese", "João Lima Neto <contato@joaolimaneto.com.br>"],
["French", "anonymous <nobody@nowhere.com>"],
["German", "Thore Sommer <mail@thson.de>,"],
["", "Michael Schorcht"],
["Italian", "Tom Spaccavento <spcvntom@gmail.com>"],
["Polish", "Krzysztof Broński <krzysztof.p.bronski@gmail.com>"],
["Russian", "Ihor Novikov <sk1.project.org@gmail.com>"],
["Spanish", "Carlos Jesús Atagua Díaz <atacarlos@gmail.com>"],
["Ukrainian", "Maxim Barabash <maxim.s.barabash@gmail.com>,"],
["", "Eugeniy Fedirets <evgeniy_fedirets@ukr.net>"],
]
class TranslatorsPage(wal.VPanel):
def __init__(self, parent):
wal.VPanel.__init__(self, parent)
vp = wal.VPanel(self)
vp.set_bg(wal.UI_COLORS["border"])
tr_list = wal.ReportList(vp, TR_LIST, border=False)
vp.pack(tr_list, expand=True, fill=True, padding_all=1)
self.pack(vp, expand=True, fill=True, padding_all=5)
tr_list.set_column_width(0, wal.LIST_AUTOSIZE)
class ThanksPage(wal.VPanel):
def __init__(self, parent):
wal.VPanel.__init__(self, parent)
entry = wal.Entry(self, CREDITS, multiline=True, editable=False)
self.pack(entry, expand=True, fill=True, padding_all=5)
class LicensePage(wal.VPanel):
def __init__(self, parent):
wal.VPanel.__init__(self, parent)
entry = wal.Entry(self, LICENSE, multiline=True, editable=False)
self.pack(entry, expand=True, fill=True, padding_all=5)
class EvetLoopMonitor(wal.VPanel):
def __init__(self, parent):
wal.VPanel.__init__(self, parent)
data = [["EventLoop", "Connections"]]
for item in events.ALL_CHANNELS:
data.append([item[0], str(len(item) - 1)])
slist = wal.ReportList(self, data, border=False)
self.pack(slist, expand=True, fill=True)
slist.set_column_width(0, wal.LIST_AUTOSIZE)
def about_dialog(app, parent):
title = _("About") + " " + app.appdata.app_name
dlg = AboutDialog(app, parent, title)
dlg.Refresh()
dlg.show()
|
api | urls | from common.api_helpers.optional_slash_router import (
OptionalSlashRouter,
optional_slash_path,
)
from django.urls import include, path, re_path
from .views import UserNotificationPolicyView, auth
from .views.alert_group import AlertGroupView
from .views.alert_receive_channel import AlertReceiveChannelView
from .views.alert_receive_channel_template import AlertReceiveChannelTemplateView
from .views.alerts import AlertDetailView
from .views.channel_filter import ChannelFilterView
from .views.custom_button import CustomButtonView
from .views.escalation_chain import EscalationChainViewSet
from .views.escalation_policy import EscalationPolicyView
from .views.features import FeaturesAPIView
from .views.integration_heartbeat import IntegrationHeartBeatView
from .views.live_setting import LiveSettingViewSet
from .views.on_call_shifts import OnCallShiftView
from .views.organization import (
CurrentOrganizationView,
GetChannelVerificationCode,
GetTelegramVerificationCode,
SetGeneralChannel,
)
from .views.paging import DirectPagingAPIView
from .views.preview_template_options import PreviewTemplateOptionsView
from .views.public_api_tokens import PublicApiTokenView
from .views.resolution_note import ResolutionNoteView
from .views.route_regex_debugger import RouteRegexDebuggerView
from .views.schedule import ScheduleView
from .views.shift_swap import ShiftSwapViewSet
from .views.slack_channel import SlackChannelView
from .views.slack_team_settings import (
AcknowledgeReminderOptionsAPIView,
SlackTeamSettingsAPIView,
UnAcknowledgeTimeoutOptionsAPIView,
)
from .views.team import TeamViewSet
from .views.telegram_channels import TelegramChannelViewSet
from .views.user import CurrentUserView, UserView
from .views.user_group import UserGroupViewSet
from .views.webhooks import WebhooksView
app_name = "api-internal"
router = OptionalSlashRouter()
router.register(r"users", UserView, basename="user")
router.register(r"teams", TeamViewSet, basename="team")
router.register(r"alertgroups", AlertGroupView, basename="alertgroup")
router.register(
r"notification_policies", UserNotificationPolicyView, basename="notification_policy"
)
router.register(
r"escalation_policies", EscalationPolicyView, basename="escalation_policy"
)
router.register(
r"escalation_chains", EscalationChainViewSet, basename="escalation_chain"
)
router.register(
r"alert_receive_channels", AlertReceiveChannelView, basename="alert_receive_channel"
)
router.register(
r"alert_receive_channel_templates",
AlertReceiveChannelTemplateView,
basename="alert_receive_channel_template",
)
router.register(r"channel_filters", ChannelFilterView, basename="channel_filter")
router.register(r"schedules", ScheduleView, basename="schedule")
router.register(r"custom_buttons", CustomButtonView, basename="custom_button")
router.register(r"webhooks", WebhooksView, basename="webhooks")
router.register(r"resolution_notes", ResolutionNoteView, basename="resolution_note")
router.register(
r"telegram_channels", TelegramChannelViewSet, basename="telegram_channel"
)
router.register(r"slack_channels", SlackChannelView, basename="slack_channel")
router.register(r"user_groups", UserGroupViewSet, basename="user_group")
router.register(
r"heartbeats", IntegrationHeartBeatView, basename="integration_heartbeat"
)
router.register(r"tokens", PublicApiTokenView, basename="api_token")
router.register(r"live_settings", LiveSettingViewSet, basename="live_settings")
router.register(r"oncall_shifts", OnCallShiftView, basename="oncall_shifts")
router.register(r"shift_swaps", ShiftSwapViewSet, basename="shift_swap")
urlpatterns = [
path("", include(router.urls)),
optional_slash_path("user", CurrentUserView.as_view(), name="api-user"),
optional_slash_path(
"set_general_channel",
SetGeneralChannel.as_view(),
name="api-set-general-log-channel",
),
optional_slash_path(
"organization", CurrentOrganizationView.as_view(), name="api-organization"
),
# TODO: remove current_team routes in future release
optional_slash_path(
"current_team", CurrentOrganizationView.as_view(), name="api-current-team"
),
optional_slash_path(
"current_team/get_telegram_verification_code",
GetTelegramVerificationCode.as_view(),
name="api-get-telegram-verification-code",
),
optional_slash_path(
"current_team/get_channel_verification_code",
GetChannelVerificationCode.as_view(),
name="api-get-channel-verification-code",
),
optional_slash_path(
"slack_settings", SlackTeamSettingsAPIView.as_view(), name="slack-settings"
),
optional_slash_path(
"slack_settings/acknowledge_remind_options",
AcknowledgeReminderOptionsAPIView.as_view(),
name="acknowledge-reminder-options",
),
optional_slash_path(
"slack_settings/unacknowledge_timeout_options",
UnAcknowledgeTimeoutOptionsAPIView.as_view(),
name="unacknowledge-timeout-options",
),
optional_slash_path("features", FeaturesAPIView.as_view(), name="features"),
optional_slash_path(
"preview_template_options",
PreviewTemplateOptionsView.as_view(),
name="preview_template_options",
),
optional_slash_path(
"route_regex_debugger",
RouteRegexDebuggerView.as_view(),
name="route_regex_debugger",
),
re_path(r"^alerts/(?P<id>\w+)/?$", AlertDetailView.as_view(), name="alerts-detail"),
optional_slash_path(
"direct_paging", DirectPagingAPIView.as_view(), name="direct_paging"
),
]
urlpatterns += [
# For some reason frontend is using url without / at the end. Hacking here to avoid 301's :(
path(
r"login/<backend>",
auth.overridden_login_slack_auth,
name="slack-auth-with-no-slash",
),
path(r"login/<backend>/", auth.overridden_login_slack_auth, name="slack-auth"),
path(
r"complete/<backend>/",
auth.overridden_complete_slack_auth,
name="complete-slack-auth",
),
]
|
extractor | cammodels | # coding: utf-8
from __future__ import unicode_literals
from ..utils import int_or_none, url_or_none
from .common import InfoExtractor
class CamModelsIE(InfoExtractor):
_VALID_URL = r"https?://(?:www\.)?cammodels\.com/cam/(?P<id>[^/?#&]+)"
_TESTS = [
{
"url": "https://www.cammodels.com/cam/AutumnKnight/",
"only_matching": True,
"age_limit": 18,
}
]
def _real_extract(self, url):
user_id = self._match_id(url)
manifest = self._download_json(
"https://manifest-server.naiadsystems.com/live/s:%s.json" % user_id, user_id
)
formats = []
thumbnails = []
for format_id, format_dict in manifest["formats"].items():
if not isinstance(format_dict, dict):
continue
encodings = format_dict.get("encodings")
if not isinstance(encodings, list):
continue
vcodec = format_dict.get("videoCodec")
acodec = format_dict.get("audioCodec")
for media in encodings:
if not isinstance(media, dict):
continue
media_url = url_or_none(media.get("location"))
if not media_url:
continue
format_id_list = [format_id]
height = int_or_none(media.get("videoHeight"))
if height is not None:
format_id_list.append("%dp" % height)
f = {
"url": media_url,
"format_id": "-".join(format_id_list),
"width": int_or_none(media.get("videoWidth")),
"height": height,
"vbr": int_or_none(media.get("videoKbps")),
"abr": int_or_none(media.get("audioKbps")),
"fps": int_or_none(media.get("fps")),
"vcodec": vcodec,
"acodec": acodec,
}
if "rtmp" in format_id:
f["ext"] = "flv"
elif "hls" in format_id:
f.update(
{
"ext": "mp4",
# hls skips fragments, preferring rtmp
"preference": -1,
}
)
else:
if format_id == "jpeg":
thumbnails.append(
{
"url": f["url"],
"width": f["width"],
"height": f["height"],
"format_id": f["format_id"],
}
)
continue
formats.append(f)
self._sort_formats(formats)
return {
"id": user_id,
"title": self._live_title(user_id),
"thumbnails": thumbnails,
"is_live": True,
"formats": formats,
"age_limit": 18,
}
|
migrations | 0003_add_refresh_help_text | # Generated by Django 2.0.5 on 2018-07-15 14:16
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("babybuddy", "0002_add_settings"),
]
operations = [
migrations.AlterField(
model_name="settings",
name="dashboard_refresh_rate",
field=models.DurationField(
blank=True,
choices=[
(None, "disabled"),
(datetime.timedelta(0, 60), "1 min."),
(datetime.timedelta(0, 120), "2 min."),
(datetime.timedelta(0, 180), "3 min."),
(datetime.timedelta(0, 240), "4 min."),
(datetime.timedelta(0, 300), "5 min."),
(datetime.timedelta(0, 600), "10 min."),
(datetime.timedelta(0, 900), "15 min."),
(datetime.timedelta(0, 1800), "30 min."),
],
default=datetime.timedelta(0, 60),
help_text="This setting will only be used when a browser does not support refresh on focus.",
null=True,
verbose_name="Refresh rate",
),
),
]
|
femtaskpanels | task_constraint_magnetization | # ***************************************************************************
# * Copyright (c) 2023 Uwe Stöhr <uwestoehr@lyx.org> *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
__title__ = "FreeCAD FEM constraint magnetization task panel for the document object"
__author__ = "Uwe Stöhr"
__url__ = "https://www.freecad.org"
## @package task_constraint_magnetization
# \ingroup FEM
# \brief task panel for constraint magnetization object
import FreeCAD
import FreeCADGui
from femguiutils import selection_widgets
from femtools import femutils, membertools
class _TaskPanel(object):
def __init__(self, obj):
self._obj = obj
self._paramWidget = FreeCADGui.PySideUic.loadUi(
FreeCAD.getHomePath() + "Mod/Fem/Resources/ui/Magnetization.ui"
)
self._initParamWidget()
# geometry selection widget
# magnetization is always a body force for 3D, therefore only allow solid
self._selectionWidget = selection_widgets.GeometryElementsSelection(
obj.References, ["Solid", "Face"], True, False
)
# form made from param and selection widget
self.form = [self._paramWidget, self._selectionWidget]
analysis = obj.getParentGroup()
self._mesh = None
self._part = None
if analysis is not None:
self._mesh = membertools.get_single_member(analysis, "Fem::FemMeshObject")
if self._mesh is not None:
self._part = femutils.get_part_to_mesh(self._mesh)
self._partVisible = None
self._meshVisible = None
def open(self):
if self._mesh is not None and self._part is not None:
self._meshVisible = self._mesh.ViewObject.isVisible()
self._partVisible = self._part.ViewObject.isVisible()
self._mesh.ViewObject.hide()
self._part.ViewObject.show()
def reject(self):
self._restoreVisibility()
FreeCADGui.ActiveDocument.resetEdit()
return True
def accept(self):
if self._obj.References != self._selectionWidget.references:
self._obj.References = self._selectionWidget.references
self._applyWidgetChanges()
self._obj.Document.recompute()
FreeCADGui.ActiveDocument.resetEdit()
self._restoreVisibility()
return True
def _restoreVisibility(self):
if self._mesh is not None and self._part is not None:
if self._meshVisible:
self._mesh.ViewObject.show()
else:
self._mesh.ViewObject.hide()
if self._partVisible:
self._part.ViewObject.show()
else:
self._part.ViewObject.hide()
def _initParamWidget(self):
self._paramWidget.realXQSB.setProperty("value", self._obj.Magnetization_re_1)
FreeCADGui.ExpressionBinding(self._paramWidget.realXQSB).bind(
self._obj, "Magnetization_re_1"
)
self._paramWidget.realYQSB.setProperty("value", self._obj.Magnetization_re_2)
FreeCADGui.ExpressionBinding(self._paramWidget.realYQSB).bind(
self._obj, "Magnetization_re_2"
)
self._paramWidget.realZQSB.setProperty("value", self._obj.Magnetization_re_3)
FreeCADGui.ExpressionBinding(self._paramWidget.realZQSB).bind(
self._obj, "Magnetization_re_3"
)
self._paramWidget.imagXQSB.setProperty("value", self._obj.Magnetization_im_1)
FreeCADGui.ExpressionBinding(self._paramWidget.imagXQSB).bind(
self._obj, "Magnetization_im_1"
)
self._paramWidget.imagYQSB.setProperty("value", self._obj.Magnetization_im_2)
FreeCADGui.ExpressionBinding(self._paramWidget.imagYQSB).bind(
self._obj, "Magnetization_im_2"
)
self._paramWidget.imagZQSB.setProperty("value", self._obj.Magnetization_im_3)
FreeCADGui.ExpressionBinding(self._paramWidget.imagZQSB).bind(
self._obj, "Magnetization_im_3"
)
self._paramWidget.reXunspecBox.setChecked(self._obj.Magnetization_re_1_Disabled)
self._paramWidget.reYunspecBox.setChecked(self._obj.Magnetization_re_2_Disabled)
self._paramWidget.reZunspecBox.setChecked(self._obj.Magnetization_re_3_Disabled)
self._paramWidget.imXunspecBox.setChecked(self._obj.Magnetization_im_1_Disabled)
self._paramWidget.imYunspecBox.setChecked(self._obj.Magnetization_im_2_Disabled)
self._paramWidget.imZunspecBox.setChecked(self._obj.Magnetization_im_3_Disabled)
def _applyMagnetizationChanges(self, enabledBox, magnetizationQSB):
enabled = enabledBox.isChecked()
magnetization = None
try:
magnetization = magnetizationQSB.property("value")
except ValueError:
FreeCAD.Console.PrintMessage(
"Wrong input. Not recognised input: '{}' "
"Magnetization has not been set.\n".format(magnetizationQSB.text())
)
magnetization = "0.0 A/m"
return enabled, magnetization
def _applyWidgetChanges(self):
# apply the magnetizations and their enabled state
(
self._obj.Magnetization_re_1_Disabled,
self._obj.Magnetization_re_1,
) = self._applyMagnetizationChanges(
self._paramWidget.reXunspecBox, self._paramWidget.realXQSB
)
(
self._obj.Magnetization_re_2_Disabled,
self._obj.Magnetization_re_2,
) = self._applyMagnetizationChanges(
self._paramWidget.reYunspecBox, self._paramWidget.realYQSB
)
(
self._obj.Magnetization_re_3_Disabled,
self._obj.Magnetization_re_3,
) = self._applyMagnetizationChanges(
self._paramWidget.reZunspecBox, self._paramWidget.realZQSB
)
(
self._obj.Magnetization_im_1_Disabled,
self._obj.Magnetization_im_1,
) = self._applyMagnetizationChanges(
self._paramWidget.imXunspecBox, self._paramWidget.imagXQSB
)
(
self._obj.Magnetization_im_2_Disabled,
self._obj.Magnetization_im_2,
) = self._applyMagnetizationChanges(
self._paramWidget.imYunspecBox, self._paramWidget.imagYQSB
)
(
self._obj.Magnetization_im_3_Disabled,
self._obj.Magnetization_im_3,
) = self._applyMagnetizationChanges(
self._paramWidget.imZunspecBox, self._paramWidget.imagZQSB
)
|
OptionalManager | UiWebsocketPlugin | import html
import os
import re
import time
import gevent
from Config import config
from Plugin import PluginManager
from Translate import Translate
from util import helper
from util.Flag import flag
plugin_dir = os.path.dirname(__file__)
if "_" not in locals():
_ = Translate(plugin_dir + "/languages/")
bigfile_sha512_cache = {}
@PluginManager.registerTo("UiWebsocket")
class UiWebsocketPlugin(object):
def __init__(self, *args, **kwargs):
self.time_peer_numbers_updated = 0
super(UiWebsocketPlugin, self).__init__(*args, **kwargs)
def actionSiteSign(
self, to, privatekey=None, inner_path="content.json", *args, **kwargs
):
# Add file to content.db and set it as pinned
content_db = self.site.content_manager.contents.db
content_inner_dir = helper.getDirname(inner_path)
content_db.my_optional_files[
self.site.address + "/" + content_inner_dir
] = time.time()
if len(content_db.my_optional_files) > 50: # Keep only last 50
oldest_key = min(
iter(content_db.my_optional_files.keys()),
key=(lambda key: content_db.my_optional_files[key]),
)
del content_db.my_optional_files[oldest_key]
return super(UiWebsocketPlugin, self).actionSiteSign(
to, privatekey, inner_path, *args, **kwargs
)
def updatePeerNumbers(self):
self.site.updateHashfield()
content_db = self.site.content_manager.contents.db
content_db.updatePeerNumbers()
self.site.updateWebsocket(peernumber_updated=True)
def addBigfileInfo(self, row):
global bigfile_sha512_cache
content_db = self.site.content_manager.contents.db
site = content_db.sites[row["address"]]
if not site.settings.get("has_bigfile"):
return False
file_key = row["address"] + "/" + row["inner_path"]
sha512 = bigfile_sha512_cache.get(file_key)
file_info = None
if not sha512:
file_info = site.content_manager.getFileInfo(row["inner_path"])
if not file_info or not file_info.get("piece_size"):
return False
sha512 = file_info["sha512"]
bigfile_sha512_cache[file_key] = sha512
if sha512 in site.storage.piecefields:
piecefield = site.storage.piecefields[sha512].tobytes()
else:
piecefield = None
if piecefield:
row["pieces"] = len(piecefield)
row["pieces_downloaded"] = piecefield.count(b"\x01")
row["downloaded_percent"] = 100 * row["pieces_downloaded"] / row["pieces"]
if row["pieces_downloaded"]:
if row["pieces"] == row["pieces_downloaded"]:
row["bytes_downloaded"] = row["size"]
else:
if not file_info:
file_info = site.content_manager.getFileInfo(row["inner_path"])
row["bytes_downloaded"] = row["pieces_downloaded"] * file_info.get(
"piece_size", 0
)
else:
row["bytes_downloaded"] = 0
row["is_downloading"] = bool(
next(
(
inner_path
for inner_path in site.bad_files
if inner_path.startswith(row["inner_path"])
),
False,
)
)
# Add leech / seed stats
row["peer_seed"] = 0
row["peer_leech"] = 0
for peer in site.peers.values():
if not peer.time_piecefields_updated or sha512 not in peer.piecefields:
continue
peer_piecefield = peer.piecefields[sha512].tobytes()
if not peer_piecefield:
continue
if peer_piecefield == b"\x01" * len(peer_piecefield):
row["peer_seed"] += 1
else:
row["peer_leech"] += 1
# Add myself
if piecefield:
if row["pieces_downloaded"] == row["pieces"]:
row["peer_seed"] += 1
else:
row["peer_leech"] += 1
return True
# Optional file functions
def actionOptionalFileList(
self,
to,
address=None,
orderby="time_downloaded DESC",
limit=10,
filter="downloaded",
filter_inner_path=None,
):
if not address:
address = self.site.address
# Update peer numbers if necessary
content_db = self.site.content_manager.contents.db
if (
time.time() - content_db.time_peer_numbers_updated > 60 * 1
and time.time() - self.time_peer_numbers_updated > 60 * 5
):
# Start in new thread to avoid blocking
self.time_peer_numbers_updated = time.time()
gevent.spawn(self.updatePeerNumbers)
if address == "all" and "ADMIN" not in self.permissions:
return self.response(to, {"error": "Forbidden"})
if not self.hasSitePermission(address):
return self.response(to, {"error": "Forbidden"})
if not all(
[
re.match("^[a-z_*/+-]+( DESC| ASC|)$", part.strip())
for part in orderby.split(",")
]
):
return self.response(to, "Invalid order_by")
if type(limit) != int:
return self.response(to, "Invalid limit")
back = []
content_db = self.site.content_manager.contents.db
wheres = {}
wheres_raw = []
if "bigfile" in filter:
wheres["size >"] = 1024 * 1024 * 1
if "downloaded" in filter:
wheres_raw.append("(is_downloaded = 1 OR is_pinned = 1)")
if "pinned" in filter:
wheres["is_pinned"] = 1
if filter_inner_path:
wheres["inner_path__like"] = filter_inner_path
if address == "all":
join = "LEFT JOIN site USING (site_id)"
else:
wheres["site_id"] = content_db.site_ids[address]
join = ""
if wheres_raw:
query_wheres_raw = "AND" + " AND ".join(wheres_raw)
else:
query_wheres_raw = ""
query = "SELECT * FROM file_optional %s WHERE ? %s ORDER BY %s LIMIT %s" % (
join,
query_wheres_raw,
orderby,
limit,
)
for row in content_db.execute(query, wheres):
row = dict(row)
if address != "all":
row["address"] = address
if row["size"] > 1024 * 1024:
has_bigfile_info = self.addBigfileInfo(row)
else:
has_bigfile_info = False
if not has_bigfile_info and "bigfile" in filter:
continue
if not has_bigfile_info:
if row["is_downloaded"]:
row["bytes_downloaded"] = row["size"]
row["downloaded_percent"] = 100
else:
row["bytes_downloaded"] = 0
row["downloaded_percent"] = 0
back.append(row)
self.response(to, back)
def actionOptionalFileInfo(self, to, inner_path):
content_db = self.site.content_manager.contents.db
site_id = content_db.site_ids[self.site.address]
# Update peer numbers if necessary
if (
time.time() - content_db.time_peer_numbers_updated > 60 * 1
and time.time() - self.time_peer_numbers_updated > 60 * 5
):
# Start in new thread to avoid blocking
self.time_peer_numbers_updated = time.time()
gevent.spawn(self.updatePeerNumbers)
query = "SELECT * FROM file_optional WHERE site_id = :site_id AND inner_path = :inner_path LIMIT 1"
res = content_db.execute(query, {"site_id": site_id, "inner_path": inner_path})
row = next(res, None)
if row:
row = dict(row)
if row["size"] > 1024 * 1024:
row["address"] = self.site.address
self.addBigfileInfo(row)
self.response(to, row)
else:
self.response(to, None)
def setPin(self, inner_path, is_pinned, address=None):
if not address:
address = self.site.address
if not self.hasSitePermission(address):
return {"error": "Forbidden"}
site = self.server.sites[address]
site.content_manager.setPin(inner_path, is_pinned)
return "ok"
@flag.no_multiuser
def actionOptionalFilePin(self, to, inner_path, address=None):
if type(inner_path) is not list:
inner_path = [inner_path]
back = self.setPin(inner_path, 1, address)
num_file = len(inner_path)
if back == "ok":
if num_file == 1:
self.cmd(
"notification",
[
"done",
_["Pinned %s"] % html.escape(helper.getFilename(inner_path[0])),
5000,
],
)
else:
self.cmd(
"notification", ["done", _["Pinned %s files"] % num_file, 5000]
)
self.response(to, back)
@flag.no_multiuser
def actionOptionalFileUnpin(self, to, inner_path, address=None):
if type(inner_path) is not list:
inner_path = [inner_path]
back = self.setPin(inner_path, 0, address)
num_file = len(inner_path)
if back == "ok":
if num_file == 1:
self.cmd(
"notification",
[
"done",
_["Removed pin from %s"]
% html.escape(helper.getFilename(inner_path[0])),
5000,
],
)
else:
self.cmd(
"notification",
["done", _["Removed pin from %s files"] % num_file, 5000],
)
self.response(to, back)
@flag.no_multiuser
def actionOptionalFileDelete(self, to, inner_path, address=None):
if not address:
address = self.site.address
if not self.hasSitePermission(address):
return self.response(to, {"error": "Forbidden"})
site = self.server.sites[address]
content_db = site.content_manager.contents.db
site_id = content_db.site_ids[site.address]
res = content_db.execute(
"SELECT * FROM file_optional WHERE ? LIMIT 1",
{"site_id": site_id, "inner_path": inner_path, "is_downloaded": 1},
)
row = next(res, None)
if not row:
return self.response(to, {"error": "Not found in content.db"})
removed = site.content_manager.optionalRemoved(
inner_path, row["hash_id"], row["size"]
)
# if not removed:
# return self.response(to, {"error": "Not found in hash_id: %s" % row["hash_id"]})
content_db.execute(
"UPDATE file_optional SET is_downloaded = 0, is_pinned = 0, peer = peer - 1 WHERE ?",
{"site_id": site_id, "inner_path": inner_path},
)
try:
site.storage.delete(inner_path)
except Exception as err:
return self.response(to, {"error": "File delete error: %s" % err})
site.updateWebsocket(file_delete=inner_path)
if inner_path in site.content_manager.cache_is_pinned:
site.content_manager.cache_is_pinned = {}
self.response(to, "ok")
# Limit functions
@flag.admin
def actionOptionalLimitStats(self, to):
back = {}
back["limit"] = config.optional_limit
back["used"] = self.site.content_manager.contents.db.getOptionalUsedBytes()
back["free"] = helper.getFreeSpace()
self.response(to, back)
@flag.no_multiuser
@flag.admin
def actionOptionalLimitSet(self, to, limit):
config.optional_limit = re.sub(
r"\.0+$", "", limit
) # Remove unnecessary digits from end
config.saveValue("optional_limit", limit)
self.response(to, "ok")
# Distribute help functions
def actionOptionalHelpList(self, to, address=None):
if not address:
address = self.site.address
if not self.hasSitePermission(address):
return self.response(to, {"error": "Forbidden"})
site = self.server.sites[address]
self.response(to, site.settings.get("optional_help", {}))
@flag.no_multiuser
def actionOptionalHelp(self, to, directory, title, address=None):
if not address:
address = self.site.address
if not self.hasSitePermission(address):
return self.response(to, {"error": "Forbidden"})
site = self.server.sites[address]
content_db = site.content_manager.contents.db
site_id = content_db.site_ids[address]
if "optional_help" not in site.settings:
site.settings["optional_help"] = {}
stats = content_db.execute(
"SELECT COUNT(*) AS num, SUM(size) AS size FROM file_optional WHERE site_id = :site_id AND inner_path LIKE :inner_path",
{"site_id": site_id, "inner_path": directory + "%"},
).fetchone()
stats = dict(stats)
if not stats["size"]:
stats["size"] = 0
if not stats["num"]:
stats["num"] = 0
self.cmd(
"notification",
[
"done",
_[
"You started to help distribute <b>%s</b>.<br><small>Directory: %s</small>"
]
% (html.escape(title), html.escape(directory)),
10000,
],
)
site.settings["optional_help"][directory] = title
self.response(to, dict(stats))
@flag.no_multiuser
def actionOptionalHelpRemove(self, to, directory, address=None):
if not address:
address = self.site.address
if not self.hasSitePermission(address):
return self.response(to, {"error": "Forbidden"})
site = self.server.sites[address]
try:
del site.settings["optional_help"][directory]
self.response(to, "ok")
except Exception:
self.response(to, {"error": "Not found"})
def cbOptionalHelpAll(self, to, site, value):
site.settings["autodownloadoptional"] = value
self.response(to, value)
@flag.no_multiuser
def actionOptionalHelpAll(self, to, value, address=None):
if not address:
address = self.site.address
if not self.hasSitePermission(address):
return self.response(to, {"error": "Forbidden"})
site = self.server.sites[address]
if value:
if "ADMIN" in self.site.settings["permissions"]:
self.cbOptionalHelpAll(to, site, True)
else:
site_title = site.content_manager.contents["content.json"].get(
"title", address
)
self.cmd(
"confirm",
[
_["Help distribute all new optional files on site <b>%s</b>"]
% html.escape(site_title),
_["Yes, I want to help!"],
],
lambda res: self.cbOptionalHelpAll(to, site, True),
)
else:
site.settings["autodownloadoptional"] = False
self.response(to, False)
|
femobjects | solver_ccxtools | # ***************************************************************************
# * Copyright (c) 2015 Bernd Hahnebach <bernd@bimstatik.org> *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
__title__ = "FreeCAD FEM solver calculix ccx tools document object"
__author__ = "Bernd Hahnebach"
__url__ = "https://www.freecad.org"
## @package solver_ccxtools
# \ingroup FEM
# \brief solver calculix ccx tools object
import FreeCAD
from femsolver.calculix.solver import add_attributes, on_restore_of_document
from . import base_fempythonobject
class SolverCcxTools(base_fempythonobject.BaseFemPythonObject):
"""The Fem::FemSolver's Proxy python type, add solver specific properties"""
Type = "Fem::SolverCcxTools"
def __init__(self, obj):
super(SolverCcxTools, self).__init__(obj)
ccx_prefs = FreeCAD.ParamGet("User parameter:BaseApp/Preferences/Mod/Fem/Ccx")
# add attributes
# implemented in framework calculix solver module
add_attributes(obj, ccx_prefs)
obj.addProperty(
"App::PropertyPath",
"WorkingDir",
"Fem",
"Working directory for calculations, will only be used it is left blank in preferences",
)
# the working directory is not set, the solver working directory is
# only used if the preferences working directory is left blank
def onDocumentRestored(self, obj):
ccx_prefs = FreeCAD.ParamGet("User parameter:BaseApp/Preferences/Mod/Fem/Ccx")
# implemented in framework calculix solver module
on_restore_of_document(obj, ccx_prefs)
|
extractor | tudou | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class TudouPlaylistIE(InfoExtractor):
IE_NAME = "tudou:playlist"
_VALID_URL = r"https?://(?:www\.)?tudou\.com/listplay/(?P<id>[\w-]{11})\.html"
_TESTS = [
{
"url": "http://www.tudou.com/listplay/zzdE77v6Mmo.html",
"info_dict": {
"id": "zzdE77v6Mmo",
},
"playlist_mincount": 209,
}
]
def _real_extract(self, url):
playlist_id = self._match_id(url)
playlist_data = self._download_json(
"http://www.tudou.com/tvp/plist.action?lcode=%s" % playlist_id, playlist_id
)
entries = [
self.url_result(
"http://www.tudou.com/programs/view/%s" % item["icode"],
"Tudou",
item["icode"],
item["kw"],
)
for item in playlist_data["items"]
]
return self.playlist_result(entries, playlist_id)
class TudouAlbumIE(InfoExtractor):
IE_NAME = "tudou:album"
_VALID_URL = r"https?://(?:www\.)?tudou\.com/album(?:cover|play)/(?P<id>[\w-]{11})"
_TESTS = [
{
"url": "http://www.tudou.com/albumplay/v5qckFJvNJg.html",
"info_dict": {
"id": "v5qckFJvNJg",
},
"playlist_mincount": 45,
}
]
def _real_extract(self, url):
album_id = self._match_id(url)
album_data = self._download_json(
"http://www.tudou.com/tvp/alist.action?acode=%s" % album_id, album_id
)
entries = [
self.url_result(
"http://www.tudou.com/programs/view/%s" % item["icode"],
"Tudou",
item["icode"],
item["kw"],
)
for item in album_data["items"]
]
return self.playlist_result(entries, album_id)
|
signal | decimate | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2018 Timothée Lecomte
# This file is part of Friture.
#
# Friture is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as published by
# the Free Software Foundation.
#
# Friture is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Friture. If not, see <http://www.gnu.org/licenses/>.
import numpy
from friture_extensions.lfilter import pyx_lfilter_float64_1D
def decimate(bdec, adec, x, zi):
if len(x) == 0:
raise Exception("Filter input is too small")
# could use a polyphase decimator here
x_dec, zf = pyx_lfilter_float64_1D(bdec, adec, x, zi)
x_dec = x_dec[::2]
return x_dec, zf
def decimate_multiple(Ndec, bdec, adec, x, zis):
"""decimate Ndec times"""
x_dec = x
# FIXME problems when x is smaller than filter coeff
# do not run on empty arrays, otherwise output contains artifacts
if x.size == 0:
return x, zis
if zis is None:
for i in range(Ndec):
x_dec, zf = decimate(bdec, adec, x_dec)
return x_dec, None
else:
zfs = []
for i, zi in zip(list(range(Ndec)), zis):
x_dec, zf = decimate(bdec, adec, x_dec, zi=zi)
# zf can be reused to restart the filter
zfs += [zf]
return x_dec, zfs
def decimate_multiple_filtic(Ndec, bdec, adec):
"""build a proper array of zero initial conditions to start the subsampler"""
zfs = []
for i in range(Ndec):
l = max(len(bdec), len(adec)) - 1
zfs += [numpy.zeros(l)]
return zfs
|
gnuradio | _cmake_format | # Copyright 2021 Marcus Müller
# SPDX-License-Identifier: LGPL-3.0-or-later
class _clang_format_options:
def __init__(self, clangfile=None):
if not clangfile:
clangfile = ".clang-format"
self.lines = []
with open(clangfile, encoding="utf-8") as opened:
for line in opened:
if line.strip().startswith("#"):
continue
self.lines.append(line.rstrip().split(":"))
def __getitem__(self, string):
path = string.split(".")
value = None
for crumble in path:
for line in self.lines:
if line[0].strip() == crumble:
if len(line) > 1:
value = line[1].strip().rstrip()
break
return value
_clang_format = _clang_format_options()
with section("parse"):
additional_commands = {
"gr_python_install": {
"flags": [],
"kwargs": {"PROGRAMS": "*", "FILES": "*", "DESTINATION": "*"},
},
}
with section("markup"):
first_comment_is_literal = True
enable_markup = False
with section("format"):
disable = False
line_width = int(_clang_format["ColumnLimit"])
tab_size = int(_clang_format["IndentWidth"])
min_prefix_chars = tab_size
max_prefix_chars = 3 * tab_size
use_tabchars = _clang_format["UseTab"] in (
"ForIndentation",
"ForContinuationAndIndentation",
"Always",
)
separate_ctrl_name_with_space = False
separate_fn_name_with_space = False
dangle_parens = False
command_case = "canonical"
keyword_case = "upper"
with section("lint"):
max_arguments = 6
max_localvars = 20
max_statements = 75
|
dictionary | rtfcre_parse | import re
import sys
from collections import deque
from plover import log
from rtf_tokenize import RtfTokenizer
class RtfParseError(Exception):
def __init__(self, lnum, cnum, fmt, *fmt_args):
msg = "line %u, column %u: %s" % (lnum + 1, cnum + 1, fmt % fmt_args)
super().__init__(msg)
class BadRtfError(Exception):
def __init__(self, fmt, *fmt_args):
msg = fmt % fmt_args
super().__init__(msg)
def finalize_translation(text):
if not text:
return text
# caseCATalyst doesn't put punctuation in \cxp: treat any isolated
# punctuation at the beginning of the translation as special.
if text[0] in ".?!:;," and text[1:] in ("", " "):
return "{" + text[0] + "}" + text[1:]
left_ws = len(text) - len(text.lstrip())
if left_ws > 1:
text = "{^" + text[:left_ws] + "^}" + text[left_ws:]
right_ws = len(text) - len(text.rstrip())
if right_ws > 1:
text = text[:-right_ws] + "{^" + text[-right_ws:] + "^}"
return text
def parse_rtfcre(text, normalize=lambda s: s, skip_errors=True):
not_text = r"\{}"
style_rx = re.compile("s[0-9]+")
tokenizer = RtfTokenizer(text)
next_token = tokenizer.next_token
rewind_token = tokenizer.rewind_token
# Check header.
if next_token() != "{" or next_token() != r"\rtf1":
raise BadRtfError("invalid header")
# Parse header/document.
g_destination, g_text = "rtf1", ""
group_stack = deque()
stylesheet = {}
steno = None
while True:
token = next_token()
# EOF.
if token is None:
err = RtfParseError(
tokenizer.lnum, tokenizer.cnum, "unexpected end of file"
)
if not skip_errors:
raise err
log.error("%s", err)
break
# Group start.
if token == "{":
# Always rewind the last token?
rewind = False
# Is it an ignored group?
is_ignored = False
destination = None
token = next_token()
# Ignored?
if token == r"\*":
token = next_token()
is_ignored = True
# Destination?
if token[0] == "\\":
destination = token[1:]
# Steno.
if destination == "cxs":
if group_stack:
err = RtfParseError(
tokenizer.lnum,
tokenizer.cnum,
"starting new mapping, but previous is unfinished",
)
if not skip_errors:
raise err
log.error("%s", err)
# Simulate missing group end(s).
assert group_stack[0][0] == "rtf1"
rewind_token(token)
if is_ignored:
rewind_token(r"\*")
rewind_token("{")
for __ in range(len(group_stack)):
rewind_token("}")
continue
if steno is not None:
yield normalize(steno), finalize_translation(g_text)
steno = None
is_ignored = False
# Reset text.
g_text = ""
elif destination in {
# Fingerspelling.
"cxfing",
# Stenovations extensions...
"cxsvatdictflags",
# Plover macro.
"cxplovermacro",
# Plover meta.
"cxplovermeta",
}:
is_ignored = False
elif style_rx.fullmatch(destination):
pass
else:
# In the case of e.g. `{\par...`,
# `\par` must be handled as a
# control word.
rewind = True
else:
rewind = True
if is_ignored:
# Skip ignored content.
stack_depth = 1
while True:
token = next_token()
if token is None:
err = RtfParseError(
tokenizer.lnum, tokenizer.cnum, "unexpected end of file"
)
if not skip_errors:
raise err
log.error("%s", err)
break
if token == "{":
stack_depth += 1
elif token == "}":
stack_depth -= 1
if not stack_depth:
break
if stack_depth:
break
continue
group_stack.append((g_destination, g_text))
g_destination, g_text = destination, ""
if rewind:
rewind_token(token)
continue
# Group end.
if token == "}":
if not group_stack:
token = next_token()
if token is None:
# The end...
break
err = RtfParseError(
tokenizer.lnum,
tokenizer.cnum,
"expected end of file, got: %r",
token[0],
)
if not skip_errors:
raise err
log.error("%s", err)
rewind_token(token)
continue
# Steno.
if g_destination == "cxs":
steno = g_text
text = ""
# Punctuation.
elif g_destination == "cxp":
text = g_text.strip()
if text in {".", "!", "?", ",", ";", ":"}:
text = "{" + text + "}"
elif text == "'":
text = "{^'}"
elif text in ("-", "/"):
text = "{^" + text + "^}"
else:
# Show unknown punctuation as given.
text = "{^" + g_text + "^}"
# Stenovations extensions...
elif g_destination == "cxsvatdictflags":
if "N" in g_text:
text = "{-|}"
else:
text = ""
# Fingerspelling.
elif g_destination == "cxfing":
text = "{&" + g_text + "}"
# Plover macro.
elif g_destination == "cxplovermacro":
text = "=" + g_text
# Plover meta.
elif g_destination == "cxplovermeta":
text = "{" + g_text + "}"
# Style declaration.
elif (
g_destination is not None
and style_rx.fullmatch(g_destination)
and group_stack[-1][0] == "stylesheet"
):
stylesheet[g_destination] = g_text
else:
text = g_text
g_destination, g_text = group_stack.pop()
g_text += text
continue
# Control char/word.
if token[0] == "\\":
ctrl = token[1:]
text = {
# Ignore.
"*": "",
# Hard space.
"~": "{^ ^}",
# Non-breaking hyphen.
"_": "{^-^}",
# Escaped newline: \par.
"": "\n\n",
"\n": "\n\n",
"\r": "\n\n",
# Escaped characters.
"\\": "\\",
"{": "{",
"}": "}",
"-": "-",
# Line break.
"line": "\n",
# Paragraph break.
"par": "\n\n",
# Tab.
"tab": "\t",
# Force Cap.
"cxfc": "{-|}",
# Force Lower Case.
"cxfl": "{>}",
}.get(ctrl)
if text is not None:
g_text += text
# Delete Spaces.
elif ctrl == "cxds":
token = next_token()
if token is None or token[0] in not_text:
g_text += "{^}"
rewind_token(token)
else:
text = token
token = next_token()
if token == r"\cxds":
# Infix
g_text += "{^" + text + "^}"
else:
# Prefix.
g_text += "{^" + text + "}"
rewind_token(token)
# Delete Last Stroke.
elif ctrl == "cxdstroke":
g_text = "=undo"
# Fingerspelling.
elif ctrl == "cxfing":
token = next_token()
if token is None or token[0] in not_text:
err = RtfParseError(
tokenizer.lnum, tokenizer.cnum, "expected text, got: %r", token
)
if not skip_errors:
raise err
log.error("%s", err)
rewind_token(token)
else:
g_text += "{&" + token + "}"
elif style_rx.fullmatch(ctrl):
# Workaround for caseCATalyst declaring
# new styles without a preceding \par.
if not g_text.endswith("\n\n"):
g_text += "\n\n"
# Indent continuation styles.
if stylesheet.get(ctrl, "").startswith("Contin"):
g_text += " "
continue
# Text.
text = token
token = next_token()
if token == r"\cxds":
# Suffix.
text = "{" + text + "^}"
else:
rewind_token(token)
g_text += text
if steno is not None:
yield normalize(steno), finalize_translation(g_text)
def main(todo, filename):
with open(filename, "rb") as fp:
text = fp.read().decode("cp1252")
if todo == "tokenize":
next_token = RtfTokenizer(text).next_token
while next_token() is not None:
pass
elif todo == "parse":
for __ in parse_rtfcre(text):
pass
elif todo == "dump_tokenize":
tokenizer = RtfTokenizer(text)
while True:
token = tokenizer.next_token()
if token is None:
break
print("%3u:%-3u %r" % (tokenizer.lnum + 1, tokenizer.cnum + 1, token))
elif todo == "dump_parse":
for mapping in parse_rtfcre(text):
print(mapping)
else:
raise ValueError(todo)
if __name__ == "__main__":
assert len(sys.argv) == 3
main(sys.argv[1], sys.argv[2])
|
canvas | flowgraph | """
Copyright 2007-2011, 2016q Free Software Foundation, Inc.
This file is part of GNU Radio
SPDX-License-Identifier: GPL-2.0-or-later
"""
import ast
import functools
import random
from itertools import count
from shutil import which as find_executable
from gi.repository import GLib, Gtk
from ...core import Messages
from ...core.FlowGraph import FlowGraph as CoreFlowgraph
from .. import Actions, Bars, Constants, Dialogs, MainWindow, Utils
from ..external_editor import ExternalEditor
from . import colors
from .connection import DummyConnection
from .drawable import Drawable
class _ContextMenu(object):
"""
Help with drawing the right click context menu
"""
def __init__(self, main_window):
self._menu = Gtk.Menu.new_from_model(Bars.ContextMenu())
self._menu.attach_to_widget(main_window)
# In GTK 3.22 Menu.popup was deprecated, we want to popup at the
# pointer, so use that new function instead if we can.
if Gtk.check_version(3, 22, 0) is None:
self.popup = self._menu.popup_at_pointer
def popup(self, event):
self._menu.popup(None, None, None, None, event.button, event.time)
class FlowGraph(CoreFlowgraph, Drawable):
"""
FlowGraph is the data structure to store graphical signal blocks,
graphical inputs and outputs,
and the connections between inputs and outputs.
"""
def __init__(self, parent, **kwargs):
"""
FlowGraph constructor.
Create a list for signal blocks and connections. Connect mouse handlers.
"""
super(self.__class__, self).__init__(parent, **kwargs)
Drawable.__init__(self)
# We need to get the main window object so the context menu can be to the
# registered actions
app = Gtk.Application.get_default()
main_window = None
for window in app.get_windows():
if isinstance(window, MainWindow.MainWindow):
main_window = window
break
self.drawing_area = None
# important vars dealing with mouse event tracking
self.element_moved = False
self.mouse_pressed = False
self.press_coor = (0, 0)
# selected
self.selected_elements = set()
self._old_selected_port = None
self._new_selected_port = None
# current mouse hover element
self.element_under_mouse = None
# context menu
self._context_menu = _ContextMenu(main_window)
self.get_context_menu = lambda: self._context_menu
self._new_connection = None
self._elements_to_draw = []
self._external_updaters = {}
def _get_unique_id(self, base_id=""):
"""
Get a unique id starting with the base id.
Args:
base_id: the id starts with this and appends a count
Returns:
a unique id
"""
block_ids = set(b.name for b in self.blocks)
for index in count():
block_id = "{}_{}".format(base_id, index)
if block_id not in block_ids:
break
return block_id
def install_external_editor(self, param, parent=None):
target = (param.parent_block.name, param.key)
if target in self._external_updaters:
editor = self._external_updaters[target]
else:
config = self.parent_platform.config
editor = find_executable(config.editor) or Dialogs.choose_editor(
parent, config
) # todo: pass in parent
if not editor:
return
updater = functools.partial(
self.handle_external_editor_change, target=target
)
editor = self._external_updaters[target] = ExternalEditor(
editor=editor,
name=target[0],
value=param.get_value(),
callback=functools.partial(GLib.idle_add, updater),
)
editor.start()
try:
editor.open_editor()
except Exception as e:
# Problem launching the editor. Need to select a new editor.
Messages.send(
">>> Error opening an external editor. Please select a different editor.\n"
)
# Reset the editor to force the user to select a new one.
self.parent_platform.config.editor = ""
self.remove_external_editor(target=target)
def remove_external_editor(self, target=None, param=None):
if target is None:
target = (param.parent_block.name, param.key)
if target in self._external_updaters:
self._external_updaters[target].stop()
del self._external_updaters[target]
def handle_external_editor_change(self, new_value, target):
try:
block_id, param_key = target
self.get_block(block_id).params[param_key].set_value(new_value)
except (IndexError, ValueError): # block no longer exists
self.remove_external_editor(target=target)
return
Actions.EXTERNAL_UPDATE()
def add_new_block(self, key, coor=None):
"""
Add a block of the given key to this flow graph.
Args:
key: the block key
coor: an optional coordinate or None for random
"""
id = self._get_unique_id(key)
scroll_pane = self.drawing_area.get_parent().get_parent()
# calculate the position coordinate
h_adj = scroll_pane.get_hadjustment()
v_adj = scroll_pane.get_vadjustment()
if coor is None:
coor = (
int(
random.uniform(0.25, 0.75) * h_adj.get_page_size()
+ h_adj.get_value()
),
int(
random.uniform(0.25, 0.75) * v_adj.get_page_size()
+ v_adj.get_value()
),
)
# get the new block
block = self.new_block(key)
block.coordinate = coor
block.params["id"].set_value(id)
Actions.ELEMENT_CREATE()
return id
def make_connection(self):
"""this selection and the last were ports, try to connect them"""
if self._new_connection and self._new_connection.has_real_sink:
self._old_selected_port = self._new_connection.source_port
self._new_selected_port = self._new_connection.sink_port
if self._old_selected_port and self._new_selected_port:
try:
self.connect(self._old_selected_port, self._new_selected_port)
Actions.ELEMENT_CREATE()
except Exception as e:
Messages.send_fail_connection(e)
self._old_selected_port = None
self._new_selected_port = None
return True
return False
def update(self):
"""
Call the top level rewrite and validate.
Call the top level create labels and shapes.
"""
self.rewrite()
self.validate()
self.update_elements_to_draw()
self.create_labels()
self.create_shapes()
def reload(self):
"""
Reload flow-graph (with updated blocks)
Args:
page: the page to reload (None means current)
Returns:
False if some error occurred during import
"""
success = False
data = self.export_data()
if data:
self.unselect()
success = self.import_data(data)
self.update()
return success
###########################################################################
# Copy Paste
###########################################################################
def copy_to_clipboard(self):
"""
Copy the selected blocks and connections into the clipboard.
Returns:
the clipboard
"""
# get selected blocks
blocks = list(self.selected_blocks())
if not blocks:
return None
# calc x and y min
x_min, y_min = blocks[0].coordinate
for block in blocks:
x, y = block.coordinate
x_min = min(x, x_min)
y_min = min(y, y_min)
# get connections between selected blocks
connections = list(
filter(
lambda c: c.source_block in blocks and c.sink_block in blocks,
self.connections,
)
)
clipboard = (
(x_min, y_min),
[block.export_data() for block in blocks],
[connection.export_data() for connection in connections],
)
return clipboard
def paste_from_clipboard(self, clipboard):
"""
Paste the blocks and connections from the clipboard.
Args:
clipboard: the nested data of blocks, connections
"""
(x_min, y_min), blocks_n, connections_n = clipboard
# recalc the position
scroll_pane = self.drawing_area.get_parent().get_parent()
h_adj = scroll_pane.get_hadjustment()
v_adj = scroll_pane.get_vadjustment()
x_off = h_adj.get_value() - x_min + h_adj.get_page_size() / 4
y_off = v_adj.get_value() - y_min + v_adj.get_page_size() / 4
if len(self.get_elements()) <= 1:
x_off, y_off = 0, 0
# create blocks
pasted_blocks = {}
for block_n in blocks_n:
block_key = block_n.get("id")
if block_key == "options":
continue
block_name = block_n.get("name")
# Verify whether a block with this name exists before adding it
if block_name in (blk.name for blk in self.blocks):
block_n = block_n.copy()
block_n["name"] = self._get_unique_id(block_name)
block = self.new_block(block_key)
if not block:
continue # unknown block was pasted (e.g. dummy block)
block.import_data(**block_n)
pasted_blocks[block_name] = block # that is before any rename
block.move((x_off, y_off))
while any(
Utils.align_to_grid(block.coordinate)
== Utils.align_to_grid(other.coordinate)
for other in self.blocks
if other is not block
):
block.move((Constants.CANVAS_GRID_SIZE, Constants.CANVAS_GRID_SIZE))
# shift all following blocks
x_off += Constants.CANVAS_GRID_SIZE
y_off += Constants.CANVAS_GRID_SIZE
self.selected_elements = set(pasted_blocks.values())
# update before creating connections
self.update()
# create connections
for src_block, src_port, dst_block, dst_port in connections_n:
source = pasted_blocks[src_block].get_source(src_port)
sink = pasted_blocks[dst_block].get_sink(dst_port)
connection = self.connect(source, sink)
self.selected_elements.add(connection)
###########################################################################
# Modify Selected
###########################################################################
def type_controller_modify_selected(self, direction):
"""
Change the registered type controller for the selected signal blocks.
Args:
direction: +1 or -1
Returns:
true for change
"""
return any(
[sb.type_controller_modify(direction) for sb in self.selected_blocks()]
)
def port_controller_modify_selected(self, direction):
"""
Change port controller for the selected signal blocks.
Args:
direction: +1 or -1
Returns:
true for changed
"""
return any(
[sb.port_controller_modify(direction) for sb in self.selected_blocks()]
)
def change_state_selected(self, new_state):
"""
Enable/disable the selected blocks.
Args:
new_state: a block state
Returns:
true if changed
"""
changed = False
for block in self.selected_blocks():
changed |= block.state != new_state
block.state = new_state
return changed
def move_selected(self, delta_coordinate):
"""
Move the element and by the change in coordinates.
Args:
delta_coordinate: the change in coordinates
"""
# Determine selected blocks top left coordinate
blocks = list(self.selected_blocks())
if not blocks:
return
min_x, min_y = self.selected_block.coordinate
for selected_block in blocks:
x, y = selected_block.coordinate
min_x, min_y = min(min_x, x), min(min_y, y)
# Sanitize delta_coordinate so that blocks don't move to negative coordinate
delta_coordinate = (
max(delta_coordinate[0], -min_x),
max(delta_coordinate[1], -min_y),
)
# Move selected blocks
for selected_block in blocks:
selected_block.move(delta_coordinate)
self.element_moved = True
def align_selected(self, calling_action=None):
"""
Align the selected blocks.
Args:
calling_action: the action initiating the alignment
Returns:
True if changed, otherwise False
"""
blocks = list(self.selected_blocks())
if calling_action is None or not blocks:
return False
# compute common boundary of selected objects
min_x, min_y = max_x, max_y = blocks[0].coordinate
for selected_block in blocks:
x, y = selected_block.coordinate
min_x, min_y = min(min_x, x), min(min_y, y)
x += selected_block.width
y += selected_block.height
max_x, max_y = max(max_x, x), max(max_y, y)
ctr_x, ctr_y = (max_x + min_x) / 2, (max_y + min_y) / 2
# align the blocks as requested
transform = {
Actions.BLOCK_VALIGN_TOP: lambda x, y, w, h: (x, min_y),
Actions.BLOCK_VALIGN_MIDDLE: lambda x, y, w, h: (x, ctr_y - h / 2),
Actions.BLOCK_VALIGN_BOTTOM: lambda x, y, w, h: (x, max_y - h),
Actions.BLOCK_HALIGN_LEFT: lambda x, y, w, h: (min_x, y),
Actions.BLOCK_HALIGN_CENTER: lambda x, y, w, h: (ctr_x - w / 2, y),
Actions.BLOCK_HALIGN_RIGHT: lambda x, y, w, h: (max_x - w, y),
}.get(calling_action, lambda *args: args)
for selected_block in blocks:
x, y = selected_block.coordinate
w, h = selected_block.width, selected_block.height
selected_block.coordinate = transform(x, y, w, h)
return True
def rotate_selected(self, rotation):
"""
Rotate the selected blocks by multiples of 90 degrees.
Args:
rotation: the rotation in degrees
Returns:
true if changed, otherwise false.
"""
if not any(self.selected_blocks()):
return False
# initialize min and max coordinates
min_x, min_y = max_x, max_y = self.selected_block.coordinate
# rotate each selected block, and find min/max coordinate
for selected_block in self.selected_blocks():
selected_block.rotate(rotation)
# update the min/max coordinate
x, y = selected_block.coordinate
min_x, min_y = min(min_x, x), min(min_y, y)
max_x, max_y = max(max_x, x), max(max_y, y)
# calculate center point of selected blocks
ctr_x, ctr_y = (max_x + min_x) / 2, (max_y + min_y) / 2
# rotate the blocks around the center point
for selected_block in self.selected_blocks():
x, y = selected_block.coordinate
x, y = Utils.get_rotated_coordinate((x - ctr_x, y - ctr_y), rotation)
selected_block.coordinate = (x + ctr_x, y + ctr_y)
return True
def remove_selected(self):
"""
Remove selected elements
Returns:
true if changed.
"""
changed = False
for selected_element in self.selected_elements:
self.remove_element(selected_element)
changed = True
return changed
def update_selected(self):
"""
Remove deleted elements from the selected elements list.
Update highlighting so only the selected are highlighted.
"""
selected_elements = self.selected_elements
elements = self.get_elements()
# remove deleted elements
for selected in list(selected_elements):
if selected in elements:
continue
selected_elements.remove(selected)
if self._old_selected_port and self._old_selected_port.parent not in elements:
self._old_selected_port = None
if self._new_selected_port and self._new_selected_port.parent not in elements:
self._new_selected_port = None
# update highlighting
for element in elements:
element.highlighted = element in selected_elements
###########################################################################
# Draw stuff
###########################################################################
def update_elements_to_draw(self):
hide_disabled_blocks = Actions.TOGGLE_HIDE_DISABLED_BLOCKS.get_active()
hide_variables = Actions.TOGGLE_HIDE_VARIABLES.get_active()
def draw_order(elem):
return elem.highlighted, elem.is_block, elem.enabled
elements = sorted(self.get_elements(), key=draw_order)
del self._elements_to_draw[:]
for element in elements:
if hide_disabled_blocks and not element.enabled:
continue # skip hidden disabled blocks and connections
if hide_variables and (element.is_variable or element.is_import):
continue # skip hidden disabled blocks and connections
self._elements_to_draw.append(element)
def create_labels(self, cr=None):
for element in self._elements_to_draw:
element.create_labels(cr)
def create_shapes(self):
# TODO - this is a workaround for bus ports not having a proper coordinate
# until the shape is drawn. The workaround is to draw blocks before connections
for element in filter(lambda x: x.is_block, self._elements_to_draw):
element.create_shapes()
for element in filter(lambda x: not x.is_block, self._elements_to_draw):
element.create_shapes()
def _drawables(self):
# todo: cache that
show_comments = Actions.TOGGLE_SHOW_BLOCK_COMMENTS.get_active()
hide_disabled_blocks = Actions.TOGGLE_HIDE_DISABLED_BLOCKS.get_active()
for element in self._elements_to_draw:
if element.is_block and show_comments and element.enabled:
yield element.draw_comment
if self._new_connection is not None:
yield self._new_connection.draw
for element in self._elements_to_draw:
if element not in self.selected_elements:
yield element.draw
for element in self.selected_elements:
if element.enabled or not hide_disabled_blocks:
yield element.draw
def draw(self, cr):
"""Draw blocks connections comment and select rectangle"""
for draw_element in self._drawables():
cr.save()
draw_element(cr)
cr.restore()
draw_multi_select_rectangle = (
self.mouse_pressed
and (not self.selected_elements or self.drawing_area.ctrl_mask)
and not self._new_connection
)
if draw_multi_select_rectangle:
x1, y1 = self.press_coor
x2, y2 = self.coordinate
x, y = int(min(x1, x2)), int(min(y1, y2))
w, h = int(abs(x1 - x2)), int(abs(y1 - y2))
cr.set_source_rgba(
colors.HIGHLIGHT_COLOR[0],
colors.HIGHLIGHT_COLOR[1],
colors.HIGHLIGHT_COLOR[2],
0.5,
)
cr.rectangle(x, y, w, h)
cr.fill()
cr.rectangle(x, y, w, h)
cr.stroke()
##########################################################################
# selection handling
##########################################################################
def update_selected_elements(self):
"""
Update the selected elements.
The update behavior depends on the state of the mouse button.
When the mouse button pressed the selection will change when
the control mask is set or the new selection is not in the current group.
When the mouse button is released the selection will change when
the mouse has moved and the control mask is set or the current group is empty.
Attempt to make a new connection if the old and ports are filled.
If the control mask is set, merge with the current elements.
"""
selected_elements = None
if self.mouse_pressed:
new_selections = self.what_is_selected(self.coordinate)
# update the selections if the new selection is not in the current selections
# allows us to move entire selected groups of elements
if not new_selections:
selected_elements = set()
elif self.drawing_area.ctrl_mask or self.selected_elements.isdisjoint(
new_selections
):
selected_elements = new_selections
if self._old_selected_port:
self._old_selected_port.force_show_label = False
self.create_shapes()
self.drawing_area.queue_draw()
elif self._new_selected_port:
self._new_selected_port.force_show_label = True
else: # called from a mouse release
if (
not self.element_moved
and (not self.selected_elements or self.drawing_area.ctrl_mask)
and not self._new_connection
):
selected_elements = self.what_is_selected(
self.coordinate, self.press_coor
)
# this selection and the last were ports, try to connect them
if self.make_connection():
return
# update selected elements
if selected_elements is None:
return
# if ctrl, set the selected elements to the union - intersection of old and new
if self.drawing_area.ctrl_mask:
self.selected_elements ^= selected_elements
else:
self.selected_elements.clear()
self.selected_elements.update(selected_elements)
Actions.ELEMENT_SELECT()
def what_is_selected(self, coor, coor_m=None):
"""
What is selected?
At the given coordinate, return the elements found to be selected.
If coor_m is unspecified, return a list of only the first element found to be selected:
Iterate though the elements backwards since top elements are at the end of the list.
If an element is selected, place it at the end of the list so that is is drawn last,
and hence on top. Update the selected port information.
Args:
coor: the coordinate of the mouse click
coor_m: the coordinate for multi select
Returns:
the selected blocks and connections or an empty list
"""
selected_port = None
selected = set()
# check the elements
for element in reversed(self._elements_to_draw):
selected_element = element.what_is_selected(coor, coor_m)
if not selected_element:
continue
# update the selected port information
if selected_element.is_port:
if not coor_m:
selected_port = selected_element
selected_element = selected_element.parent_block
selected.add(selected_element)
if not coor_m:
break
if selected_port and selected_port.is_source:
selected.remove(selected_port.parent_block)
self._new_connection = DummyConnection(selected_port, coordinate=coor)
self.drawing_area.queue_draw()
# update selected ports
if selected_port is not self._new_selected_port:
self._old_selected_port = self._new_selected_port
self._new_selected_port = selected_port
return selected
def unselect(self):
"""
Set selected elements to an empty set.
"""
self.selected_elements.clear()
def select_all(self):
"""Select all blocks in the flow graph"""
self.selected_elements.clear()
self.selected_elements.update(self._elements_to_draw)
def selected_blocks(self):
"""
Get a group of selected blocks.
Returns:
sub set of blocks in this flow graph
"""
return (e for e in self.selected_elements.copy() if e.is_block)
@property
def selected_block(self):
"""
Get the selected block when a block or port is selected.
Returns:
a block or None
"""
return next(self.selected_blocks(), None)
def get_selected_elements(self):
"""
Get the group of selected elements.
Returns:
sub set of elements in this flow graph
"""
return self.selected_elements
def get_selected_element(self):
"""
Get the selected element.
Returns:
a block, port, or connection or None
"""
return next(iter(self.selected_elements), None)
##########################################################################
# Event Handlers
##########################################################################
def handle_mouse_context_press(self, coordinate, event):
"""
The context mouse button was pressed:
If no elements were selected, perform re-selection at this coordinate.
Then, show the context menu at the mouse click location.
"""
selections = self.what_is_selected(coordinate)
if not selections.intersection(self.selected_elements):
self.coordinate = coordinate
self.mouse_pressed = True
self.update_selected_elements()
self.mouse_pressed = False
if self._new_connection:
self._new_connection = None
self.drawing_area.queue_draw()
self._context_menu.popup(event)
def handle_mouse_selector_press(self, double_click, coordinate):
"""
The selector mouse button was pressed:
Find the selected element. Attempt a new connection if possible.
Open the block params window on a double click.
Update the selection state of the flow graph.
"""
self.press_coor = coordinate
self.coordinate = coordinate
self.mouse_pressed = True
if double_click:
self.unselect()
self.update_selected_elements()
if double_click and self.selected_block:
self.mouse_pressed = False
Actions.BLOCK_PARAM_MODIFY()
def handle_mouse_selector_release(self, coordinate):
"""
The selector mouse button was released:
Update the state, handle motion (dragging).
And update the selected flowgraph elements.
"""
self.coordinate = coordinate
self.mouse_pressed = False
if self.element_moved:
Actions.BLOCK_MOVE()
self.element_moved = False
self.update_selected_elements()
if self._new_connection:
self._new_connection = None
self.drawing_area.queue_draw()
def handle_mouse_motion(self, coordinate):
"""
The mouse has moved, respond to mouse dragging or notify elements
Move a selected element to the new coordinate.
Auto-scroll the scroll bars at the boundaries.
"""
# to perform a movement, the mouse must be pressed
# (no longer checking pending events via Gtk.events_pending() - always true in Windows)
redraw = False
if not self.mouse_pressed or self._new_connection:
redraw = self._handle_mouse_motion_move(coordinate)
if self.mouse_pressed:
redraw = redraw or self._handle_mouse_motion_drag(coordinate)
if redraw:
self.drawing_area.queue_draw()
def _handle_mouse_motion_move(self, coordinate):
# only continue if mouse-over stuff is enabled (just the auto-hide port label stuff for now)
redraw = False
for element in self._elements_to_draw:
over_element = element.what_is_selected(coordinate)
if not over_element:
continue
if over_element != self.element_under_mouse: # over sth new
if self.element_under_mouse:
redraw |= self.element_under_mouse.mouse_out() or False
self.element_under_mouse = over_element
redraw |= over_element.mouse_over() or False
break
else:
if self.element_under_mouse:
redraw |= self.element_under_mouse.mouse_out() or False
self.element_under_mouse = None
if not Actions.TOGGLE_AUTO_HIDE_PORT_LABELS.get_active():
return
if redraw:
# self.create_labels()
self.create_shapes()
return redraw
def _handle_mouse_motion_drag(self, coordinate):
redraw = False
# remove the connection if selected in drag event
if (
len(self.selected_elements) == 1
and self.get_selected_element().is_connection
):
Actions.ELEMENT_DELETE()
redraw = True
if self._new_connection:
e = self.element_under_mouse
if e and e.is_port and e.is_sink:
self._new_connection.update(sink_port=self.element_under_mouse)
else:
self._new_connection.update(coordinate=coordinate, rotation=0)
return True
# move the selected elements and record the new coordinate
x, y = coordinate
if not self.drawing_area.ctrl_mask:
X, Y = self.coordinate
dX, dY = x - X, y - Y
if Actions.TOGGLE_SNAP_TO_GRID.get_active() or self.drawing_area.mod1_mask:
dX, dY = (
int(round(dX / Constants.CANVAS_GRID_SIZE)),
int(round(dY / Constants.CANVAS_GRID_SIZE)),
)
dX, dY = (
dX * Constants.CANVAS_GRID_SIZE,
dY * Constants.CANVAS_GRID_SIZE,
)
else:
dX, dY = int(round(dX)), int(round(dY))
if dX != 0 or dY != 0:
self.move_selected((dX, dY))
self.coordinate = (X + dX, Y + dY)
redraw = True
return redraw
def get_extents(self):
show_comments = Actions.TOGGLE_SHOW_BLOCK_COMMENTS.get_active()
def sub_extents():
for element in self._elements_to_draw:
yield element.get_extents()
if element.is_block and show_comments and element.enabled:
yield element.get_extents_comment()
extent = 10000000, 10000000, 0, 0
cmps = (min, min, max, max)
for sub_extent in sub_extents():
extent = [cmp(xy, e_xy) for cmp, xy, e_xy in zip(cmps, extent, sub_extent)]
return tuple(extent)
|
TaskManagement | OnExitCallbackManager | # Copyright (c) 2018 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
from typing import TYPE_CHECKING, Callable, List
from UM.Logger import Logger
if TYPE_CHECKING:
from cura.CuraApplication import CuraApplication
#
# This class manages all registered upon-exit checks
# that need to be performed when the application tries to exit.
# For example, show a confirmation dialog when there is USB printing in progress.
# All callbacks will be called in the order of when they were registered.
# If all callbacks "pass", for example:
# if the user clicks "yes" on the exit confirmation dialog
# and nothing else is blocking the exit, then the application will quit.
#
class OnExitCallbackManager:
def __init__(self, application: "CuraApplication") -> None:
self._application = application
self._on_exit_callback_list = list() # type: List[Callable]
self._current_callback_idx = 0
self._is_all_checks_passed = False
def addCallback(self, callback: Callable) -> None:
self._on_exit_callback_list.append(callback)
Logger.log("d", "on-app-exit callback [%s] added.", callback)
# Reset the current state so the next time it will call all the callbacks again.
def resetCurrentState(self) -> None:
self._current_callback_idx = 0
self._is_all_checks_passed = False
def getIsAllChecksPassed(self) -> bool:
return self._is_all_checks_passed
# Trigger the next callback if there is one.
# If not, all callbacks have "passed",
# which means we should not prevent the application from quitting,
# and we call the application to actually quit.
def triggerNextCallback(self) -> None:
# Get the next callback and schedule it
this_callback = None
if self._current_callback_idx < len(self._on_exit_callback_list):
this_callback = self._on_exit_callback_list[self._current_callback_idx]
self._current_callback_idx += 1
if this_callback is not None:
Logger.log(
"d", "Scheduled the next on-app-exit callback [%s]", this_callback
)
self._application.callLater(this_callback)
else:
Logger.log(
"d", "No more on-app-exit callbacks to process. Tell the app to exit."
)
self._is_all_checks_passed = True
# Tell the application to exit
self._application.callLater(self._application.closeApplication)
# Callback function which an on-exit callback calls when it finishes.
# It provides a "should_proceed" flag indicating whether the check has "passed",
# or whether quitting the application should be blocked.
# If the last on-exit callback doesn't block quitting, it will call the next
# registered on-exit callback if one is available.
def onCurrentCallbackFinished(self, should_proceed: bool = True) -> None:
if not should_proceed:
Logger.log("d", "on-app-exit callback finished and we should not proceed.")
# Reset the state
self.resetCurrentState()
return
self.triggerNextCallback()
|
context | base | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013 by Ihor E. Novikov
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import wal
from sk1.pwidgets import ActionButton
class CtxPlugin(wal.HPanel):
app = None
insp = None
proxy = None
actions = None
name = "Plugin"
def __init__(self, app, parent):
self.app = app
self.parent = parent
self.insp = self.app.insp
self.actions = self.app.actions
wal.HPanel.__init__(self, parent)
self.build()
self.pack(wal.PLine(self), fill=True, padding=3)
self.hide()
def update(self, *args):
pass
def build(self):
pass
class ActionCtxPlugin(CtxPlugin):
ids = []
def __init__(self, app, parent):
CtxPlugin.__init__(self, app, parent)
def build(self):
for item in self.ids:
if item is None:
self.pack(wal.VLine(self), fill=True, padding=3)
else:
btn = ActionButton(self, self.actions[item])
self.pack(btn, padding=1)
|
network | bmobject | """
BMObject and it's exceptions.
"""
import logging
import time
import protocol
import state
from addresses import calculateInventoryHash
from inventory import Inventory
from network.dandelion import Dandelion
logger = logging.getLogger("default")
class BMObjectInsufficientPOWError(Exception):
"""Exception indicating the object
doesn't have sufficient proof of work."""
errorCodes = "Insufficient proof of work"
class BMObjectExpiredError(Exception):
"""Exception indicating the object's lifetime has expired."""
errorCodes = "Object expired"
class BMObjectUnwantedStreamError(Exception):
"""Exception indicating the object is in a stream
we didn't advertise as being interested in."""
errorCodes = "Object in unwanted stream"
class BMObjectInvalidError(Exception):
"""The object's data does not match object specification."""
errorCodes = "Invalid object"
class BMObjectAlreadyHaveError(Exception):
"""We received a duplicate object (one we already have)"""
errorCodes = "Already have this object"
class BMObject(object): # pylint: disable=too-many-instance-attributes
"""Bitmessage Object as a class."""
# max TTL, 28 days and 3 hours
maxTTL = 28 * 24 * 60 * 60 + 10800
# min TTL, 3 hour (in the past
minTTL = -3600
def __init__(
self, nonce, expiresTime, objectType, version, streamNumber, data, payloadOffset
): # pylint: disable=too-many-arguments
self.nonce = nonce
self.expiresTime = expiresTime
self.objectType = objectType
self.version = version
self.streamNumber = streamNumber
self.inventoryHash = calculateInventoryHash(data)
# copy to avoid memory issues
self.data = bytearray(data)
self.tag = self.data[payloadOffset : payloadOffset + 32]
def checkProofOfWorkSufficient(self):
"""Perform a proof of work check for sufficiency."""
# Let us check to make sure that the proof of work is sufficient.
if not protocol.isProofOfWorkSufficient(self.data):
logger.info("Proof of work is insufficient.")
raise BMObjectInsufficientPOWError()
def checkEOLSanity(self):
"""Check if object's lifetime
isn't ridiculously far in the past or future."""
# EOL sanity check
if self.expiresTime - int(time.time()) > BMObject.maxTTL:
logger.info(
"This object's End of Life time is too far in the future."
" Ignoring it. Time is %i",
self.expiresTime,
)
# .. todo:: remove from download queue
raise BMObjectExpiredError()
if self.expiresTime - int(time.time()) < BMObject.minTTL:
logger.info(
"This object's End of Life time was too long ago."
" Ignoring the object. Time is %i",
self.expiresTime,
)
# .. todo:: remove from download queue
raise BMObjectExpiredError()
def checkStream(self):
"""Check if object's stream matches streams we are interested in"""
if (
self.streamNumber < protocol.MIN_VALID_STREAM
or self.streamNumber > protocol.MAX_VALID_STREAM
):
logger.warning("The object has invalid stream: %s", self.streamNumber)
raise BMObjectInvalidError()
if self.streamNumber not in state.streamsInWhichIAmParticipating:
logger.debug(
"The streamNumber %i isn't one we are interested in.", self.streamNumber
)
raise BMObjectUnwantedStreamError()
def checkAlreadyHave(self):
"""
Check if we already have the object
(so that we don't duplicate it in inventory
or advertise it unnecessarily)
"""
# if it's a stem duplicate, pretend we don't have it
if Dandelion().hasHash(self.inventoryHash):
return
if self.inventoryHash in Inventory():
raise BMObjectAlreadyHaveError()
def checkObjectByType(self):
"""Call a object type specific check
(objects can have additional checks based on their types)"""
if self.objectType == protocol.OBJECT_GETPUBKEY:
self.checkGetpubkey()
elif self.objectType == protocol.OBJECT_PUBKEY:
self.checkPubkey()
elif self.objectType == protocol.OBJECT_MSG:
self.checkMessage()
elif self.objectType == protocol.OBJECT_BROADCAST:
self.checkBroadcast()
# other objects don't require other types of tests
def checkMessage(self): # pylint: disable=no-self-use
""" "Message" object type checks."""
return
def checkGetpubkey(self):
""" "Getpubkey" object type checks."""
if len(self.data) < 42:
logger.info("getpubkey message doesn't contain enough data. Ignoring.")
raise BMObjectInvalidError()
def checkPubkey(self):
""" "Pubkey" object type checks."""
# sanity check
if len(self.data) < 146 or len(self.data) > 440:
logger.info("pubkey object too short or too long. Ignoring.")
raise BMObjectInvalidError()
def checkBroadcast(self):
""" "Broadcast" object type checks."""
if len(self.data) < 180:
logger.debug(
"The payload length of this broadcast"
" packet is unreasonably low. Someone is probably"
" trying funny business. Ignoring message."
)
raise BMObjectInvalidError()
# this isn't supported anymore
if self.version < 2:
raise BMObjectInvalidError()
|
migrations | 0337_more_session_recording_fields | # Generated by Django 3.2.19 on 2023-07-15 08:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("posthog", "0336_alter_survey_type"),
]
operations = [
migrations.AddField(
model_name="sessionrecording",
name="active_seconds",
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name="sessionrecording",
name="console_error_count",
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name="sessionrecording",
name="console_log_count",
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name="sessionrecording",
name="console_warn_count",
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name="sessionrecording",
name="inactive_seconds",
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name="sessionrecording",
name="mouse_activity_count",
field=models.IntegerField(blank=True, null=True),
),
]
|
core | admin | __package__ = "archivebox.core"
from contextlib import redirect_stdout
from datetime import datetime, timezone
from io import StringIO
from pathlib import Path
from config import OUTPUT_DIR, SNAPSHOTS_PER_PAGE
from core.forms import AddLinkForm
from core.mixins import SearchResultsAdminMixin
from core.models import ArchiveResult, Snapshot, Tag
from django import forms
from django.contrib import admin
from django.contrib.auth import get_user_model
from django.shortcuts import redirect, render
from django.urls import path
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from extractors import archive_links
from index.html import snapshot_icons
from logging_util import printable_filesize
from main import add, remove
from ..util import ansi_to_html, htmldecode, urldecode
# Admin URLs
# /admin/
# /admin/login/
# /admin/core/
# /admin/core/snapshot/
# /admin/core/snapshot/:uuid/
# /admin/core/tag/
# /admin/core/tag/:uuid/
# TODO: https://stackoverflow.com/questions/40760880/add-custom-button-to-django-admin-panel
class ArchiveResultInline(admin.TabularInline):
model = ArchiveResult
class TagInline(admin.TabularInline):
model = Snapshot.tags.through
from django.contrib.admin.helpers import ActionForm
from django.contrib.admin.widgets import AutocompleteSelectMultiple
class AutocompleteTags:
model = Tag
search_fields = ["name"]
class AutocompleteTagsAdminStub:
name = "admin"
class SnapshotActionForm(ActionForm):
tags = forms.ModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False,
widget=AutocompleteSelectMultiple(
AutocompleteTags(),
AutocompleteTagsAdminStub(),
),
)
# TODO: allow selecting actions for specific extractors? is this useful?
# EXTRACTOR_CHOICES = [
# (name, name.title())
# for name, _, _ in get_default_archive_methods()
# ]
# extractor = forms.ChoiceField(
# choices=EXTRACTOR_CHOICES,
# required=False,
# widget=forms.MultileChoiceField(attrs={'class': "form-control"})
# )
class SnapshotAdmin(SearchResultsAdminMixin, admin.ModelAdmin):
list_display = ("added", "title_str", "files", "size", "url_str")
sort_fields = ("title_str", "url_str", "added", "files")
readonly_fields = ("info", "bookmarked", "added", "updated")
search_fields = ("id", "url", "timestamp", "title", "tags__name")
fields = ("timestamp", "url", "title", "tags", *readonly_fields)
list_filter = ("added", "updated", "tags", "archiveresult__status")
ordering = ["-added"]
actions = [
"add_tags",
"remove_tags",
"update_titles",
"update_snapshots",
"resnapshot_snapshot",
"overwrite_snapshots",
"delete_snapshots",
]
autocomplete_fields = ["tags"]
inlines = [ArchiveResultInline]
list_per_page = SNAPSHOTS_PER_PAGE
action_form = SnapshotActionForm
def get_urls(self):
urls = super().get_urls()
custom_urls = [
path("grid/", self.admin_site.admin_view(self.grid_view), name="grid")
]
return custom_urls + urls
def get_queryset(self, request):
self.request = request
return super().get_queryset(request).prefetch_related("tags")
def tag_list(self, obj):
return ", ".join(obj.tags.values_list("name", flat=True))
# TODO: figure out a different way to do this, you cant nest forms so this doenst work
# def action(self, obj):
# # csrfmiddlewaretoken: Wa8UcQ4fD3FJibzxqHN3IYrrjLo4VguWynmbzzcPYoebfVUnDovon7GEMYFRgsh0
# # action: update_snapshots
# # select_across: 0
# # _selected_action: 76d29b26-2a88-439e-877c-a7cca1b72bb3
# return format_html(
# '''
# <form action="/admin/core/snapshot/" method="post" onsubmit="e => e.stopPropagation()">
# <input type="hidden" name="csrfmiddlewaretoken" value="{}">
# <input type="hidden" name="_selected_action" value="{}">
# <button name="update_snapshots">Check</button>
# <button name="update_titles">Pull title + favicon</button>
# <button name="update_snapshots">Update</button>
# <button name="overwrite_snapshots">Re-Archive (overwrite)</button>
# <button name="delete_snapshots">Permanently delete</button>
# </form>
# ''',
# csrf.get_token(self.request),
# obj.id,
# )
def info(self, obj):
return format_html(
"""
UUID: <code style="font-size: 10px; user-select: all">{}</code>
Timestamp: <code style="font-size: 10px; user-select: all">{}</code>
URL Hash: <code style="font-size: 10px; user-select: all">{}</code><br/>
Archived: {} ({} files {})
Favicon: <img src="{}" style="height: 20px"/>
Status code: {}
Server: {}
Content type: {}
Extension: {}
<br/><br/>
<a href="/archive/{}">View Snapshot index ➡️</a>
<a href="/admin/core/snapshot/?id__exact={}">View actions ⚙️</a>
""",
obj.id,
obj.timestamp,
obj.url_hash,
"✅" if obj.is_archived else "❌",
obj.num_outputs,
self.size(obj),
f"/archive/{obj.timestamp}/favicon.ico",
obj.status_code or "?",
obj.headers and obj.headers.get("Server") or "?",
obj.headers and obj.headers.get("Content-Type") or "?",
obj.extension or "?",
obj.timestamp,
obj.id,
)
def title_str(self, obj):
canon = obj.as_link().canonical_outputs()
tags = "".join(
format_html(
'<a href="/admin/core/snapshot/?tags__id__exact={}"><span class="tag">{}</span></a> ',
tag.id,
tag,
)
for tag in obj.tags.all()
if str(tag).strip()
)
return format_html(
'<a href="/{}">'
'<img src="/{}/{}" class="favicon" onerror="this.remove()">'
"</a>"
'<a href="/{}/index.html">'
'<b class="status-{}">{}</b>'
"</a>",
obj.archive_path,
obj.archive_path,
canon["favicon_path"],
obj.archive_path,
"fetched" if obj.latest_title or obj.title else "pending",
urldecode(htmldecode(obj.latest_title or obj.title or ""))[:128]
or "Pending...",
) + mark_safe(f' <span class="tags">{tags}</span>')
def files(self, obj):
return snapshot_icons(obj)
files.admin_order_field = "updated"
files.short_description = "Files Saved"
def size(self, obj):
archive_size = (Path(obj.link_dir) / "index.html").exists() and obj.archive_size
if archive_size:
size_txt = printable_filesize(archive_size)
if archive_size > 52428800:
size_txt = mark_safe(f"<b>{size_txt}</b>")
else:
size_txt = mark_safe('<span style="opacity: 0.3">...</span>')
return format_html(
'<a href="/{}" title="View all files">{}</a>',
obj.archive_path,
size_txt,
)
size.admin_order_field = "archiveresult__count"
def url_str(self, obj):
return format_html(
'<a href="{}"><code style="user-select: all;">{}</code></a>',
obj.url,
obj.url,
)
def grid_view(self, request, extra_context=None):
# cl = self.get_changelist_instance(request)
# Save before monkey patching to restore for changelist list view
saved_change_list_template = self.change_list_template
saved_list_per_page = self.list_per_page
saved_list_max_show_all = self.list_max_show_all
# Monkey patch here plus core_tags.py
self.change_list_template = "private_index_grid.html"
self.list_per_page = SNAPSHOTS_PER_PAGE
self.list_max_show_all = self.list_per_page
# Call monkey patched view
rendered_response = self.changelist_view(request, extra_context=extra_context)
# Restore values
self.change_list_template = saved_change_list_template
self.list_per_page = saved_list_per_page
self.list_max_show_all = saved_list_max_show_all
return rendered_response
# for debugging, uncomment this to print all requests:
# def changelist_view(self, request, extra_context=None):
# print('[*] Got request', request.method, request.POST)
# return super().changelist_view(request, extra_context=None)
def update_snapshots(self, request, queryset):
archive_links([snapshot.as_link() for snapshot in queryset], out_dir=OUTPUT_DIR)
update_snapshots.short_description = "Pull"
def update_titles(self, request, queryset):
archive_links(
[snapshot.as_link() for snapshot in queryset],
overwrite=True,
methods=("title", "favicon"),
out_dir=OUTPUT_DIR,
)
update_titles.short_description = "⬇️ Title"
def resnapshot_snapshot(self, request, queryset):
for snapshot in queryset:
timestamp = datetime.now(timezone.utc).isoformat("T", "seconds")
new_url = snapshot.url.split("#")[0] + f"#{timestamp}"
add(new_url, tag=snapshot.tags_str())
resnapshot_snapshot.short_description = "Re-Snapshot"
def overwrite_snapshots(self, request, queryset):
archive_links(
[snapshot.as_link() for snapshot in queryset],
overwrite=True,
out_dir=OUTPUT_DIR,
)
overwrite_snapshots.short_description = "Reset"
def delete_snapshots(self, request, queryset):
remove(snapshots=queryset, yes=True, delete=True, out_dir=OUTPUT_DIR)
delete_snapshots.short_description = "Delete"
def add_tags(self, request, queryset):
tags = request.POST.getlist("tags")
print("[+] Adding tags", tags, "to Snapshots", queryset)
for obj in queryset:
obj.tags.add(*tags)
add_tags.short_description = "+"
def remove_tags(self, request, queryset):
tags = request.POST.getlist("tags")
print("[-] Removing tags", tags, "to Snapshots", queryset)
for obj in queryset:
obj.tags.remove(*tags)
remove_tags.short_description = "–"
title_str.short_description = "Title"
url_str.short_description = "Original URL"
title_str.admin_order_field = "title"
url_str.admin_order_field = "url"
class TagAdmin(admin.ModelAdmin):
list_display = ("slug", "name", "num_snapshots", "snapshots", "id")
sort_fields = ("id", "name", "slug")
readonly_fields = ("id", "num_snapshots", "snapshots")
search_fields = ("id", "name", "slug")
fields = (*readonly_fields, "name", "slug")
actions = ["delete_selected"]
ordering = ["-id"]
def num_snapshots(self, obj):
return format_html(
'<a href="/admin/core/snapshot/?tags__id__exact={}">{} total</a>',
obj.id,
obj.snapshot_set.count(),
)
def snapshots(self, obj):
total_count = obj.snapshot_set.count()
return mark_safe(
"<br/>".join(
format_html(
'{} <code><a href="/admin/core/snapshot/{}/change"><b>[{}]</b></a> {}</code>',
snap.updated.strftime("%Y-%m-%d %H:%M")
if snap.updated
else "pending...",
snap.id,
snap.timestamp,
snap.url,
)
for snap in obj.snapshot_set.order_by("-updated")[:10]
)
+ (
f'<br/><a href="/admin/core/snapshot/?tags__id__exact={obj.id}">and {total_count-10} more...<a>'
if obj.snapshot_set.count() > 10
else ""
)
)
class ArchiveResultAdmin(admin.ModelAdmin):
list_display = (
"id",
"start_ts",
"extractor",
"snapshot_str",
"tags_str",
"cmd_str",
"status",
"output_str",
)
sort_fields = ("start_ts", "extractor", "status")
readonly_fields = ("id", "uuid", "snapshot_str", "tags_str")
search_fields = (
"id",
"uuid",
"snapshot__url",
"extractor",
"output",
"cmd_version",
"cmd",
"snapshot__timestamp",
)
fields = (
*readonly_fields,
"snapshot",
"extractor",
"status",
"start_ts",
"end_ts",
"output",
"pwd",
"cmd",
"cmd_version",
)
autocomplete_fields = ["snapshot"]
list_filter = ("status", "extractor", "start_ts", "cmd_version")
ordering = ["-start_ts"]
list_per_page = SNAPSHOTS_PER_PAGE
def snapshot_str(self, obj):
return format_html(
'<a href="/archive/{}/index.html"><b><code>[{}]</code></b></a><br/>'
"<small>{}</small>",
obj.snapshot.timestamp,
obj.snapshot.timestamp,
obj.snapshot.url[:128],
)
def tags_str(self, obj):
return obj.snapshot.tags_str()
def cmd_str(self, obj):
return format_html(
"<pre>{}</pre>",
" ".join(obj.cmd) if isinstance(obj.cmd, list) else str(obj.cmd),
)
def output_str(self, obj):
return format_html(
'<a href="/archive/{}/{}" class="output-link">↗️</a><pre>{}</pre>',
obj.snapshot.timestamp,
obj.output
if (obj.status == "succeeded")
and obj.extractor not in ("title", "archive_org")
else "index.html",
obj.output,
)
tags_str.short_description = "tags"
snapshot_str.short_description = "snapshot"
class ArchiveBoxAdmin(admin.AdminSite):
site_header = "ArchiveBox"
index_title = "Links"
site_title = "Index"
def get_urls(self):
return [
path("core/snapshot/add/", self.add_view, name="Add"),
] + super().get_urls()
def add_view(self, request):
if not request.user.is_authenticated:
return redirect(f"/admin/login/?next={request.path}")
request.current_app = self.name
context = {
**self.each_context(request),
"title": "Add URLs",
}
if request.method == "GET":
context["form"] = AddLinkForm()
elif request.method == "POST":
form = AddLinkForm(request.POST)
if form.is_valid():
url = form.cleaned_data["url"]
print(f"[+] Adding URL: {url}")
depth = 0 if form.cleaned_data["depth"] == "0" else 1
input_kwargs = {
"urls": url,
"depth": depth,
"update_all": False,
"out_dir": OUTPUT_DIR,
}
add_stdout = StringIO()
with redirect_stdout(add_stdout):
add(**input_kwargs)
print(add_stdout.getvalue())
context.update(
{
"stdout": ansi_to_html(add_stdout.getvalue().strip()),
"form": AddLinkForm(),
}
)
else:
context["form"] = form
return render(template_name="add.html", request=request, context=context)
admin.site = ArchiveBoxAdmin()
admin.site.register(get_user_model())
admin.site.register(Snapshot, SnapshotAdmin)
admin.site.register(Tag, TagAdmin)
admin.site.register(ArchiveResult, ArchiveResultAdmin)
admin.site.disable_action("delete_selected")
|
gui | pyfa_gauge | # ===============================================================================
# PyfaGauge is a generic Gauge implementation tailored for pyfa (the Python
# Fitting Assistant). It uses the easeOutQuad equation from
# caurina.transitions.Tweener to do animations
#
# ToDo: make SetGradient(<value, colour start, colour end)
# ToDo: make a solid gradient (not to->from and not dependant on value)
# ToDo: fix 0 range (currently resets range to 0.01, but this causes problems if
# we really set range at 0.01). Perhaps make it -1 and test percentage as
# a negativeor something.
# ToDo: possibly devise a way to determine transition percents on init
# (currently hardcoded)
#
# ===============================================================================
import copy
import wx
from gui.utils import anim_effects
from gui.utils import color as color_utils
from gui.utils import draw
_t = wx.GetTranslation
class PyGauge(wx.Window):
def __init__(self, parent, font, max_range=100, size=(-1, 30), *args, **kargs):
super().__init__(parent, size=size, *args, **kargs)
self._size = size
self._border_colour = wx.BLACK
self._bar_colour = None
self._bar_gradient = None
self._border_padding = 0
self._max_range = max_range
self._value = 0
self._fraction_digits = 0
self._timer_id = wx.NewId()
self._timer = None
self._oldValue = 0
self._anim_duration = 500
self._anim_step = 0
self._period = 20
self._anim_value = 0
self._anim_direction = 0
self.anim_effect = anim_effects.OUT_QUAD
# transition colors used based on how full (or overfilled) the gauge is.
self.transition_colors = [
(wx.Colour(191, 191, 191), wx.Colour(96, 191, 0)), # < 0-100%
(wx.Colour(191, 167, 96), wx.Colour(255, 191, 0)), # < 100-101%
(wx.Colour(255, 191, 0), wx.Colour(255, 128, 0)), # < 101-103%
(wx.Colour(255, 128, 0), wx.Colour(255, 0, 0)), # < 103-105%
]
self.gradient_effect = -35
self._percentage = 0
self._old_percentage = 0
self._show_remaining = False
self.font = font
self.SetBackgroundColour(wx.Colour(51, 51, 51))
self._tooltip = wx.ToolTip("0.00/100.00")
self.SetToolTip(self._tooltip)
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_TIMER, self.OnTimer)
self.Bind(wx.EVT_ENTER_WINDOW, self.OnWindowEnter)
self.Bind(wx.EVT_LEAVE_WINDOW, self.OnWindowLeave)
self.SetBackgroundStyle(wx.BG_STYLE_PAINT)
def OnEraseBackground(self, event):
pass
def OnWindowEnter(self, event):
self._show_remaining = True
self.Refresh()
def OnWindowLeave(self, event):
self._show_remaining = False
self.Refresh()
def GetBorderColour(self):
return self._border_colour
def SetBorderColour(self, colour):
self._border_colour = colour
def GetBarColour(self):
return self._bar_colour
def SetBarColour(self, colour):
self._bar_colour = colour
def SetFractionDigits(self, digits):
self._fraction_digits = digits
def GetBarGradient(self):
if self._bar_gradient is None:
return None
return self._bar_gradient[0]
def SetBarGradient(self, gradient=None):
if gradient is None:
self._bar_gradient = None
else:
if not isinstance(gradient, list):
self._bar_gradient = [gradient]
else:
self._bar_gradient = list(gradient)
def GetBorderPadding(self):
return self._border_padding
def SetBorderPadding(self, padding):
self._border_padding = padding
def GetRange(self):
"""Returns the maximum value of the gauge."""
return self._max_range
def Animate(self):
# sFit = Fit.getInstance()
if True:
if not self._timer:
self._timer = wx.Timer(self, self._timer_id)
self._anim_step = 0
self._timer.Start(self._period)
else:
self._anim_value = self._percentage
self.Refresh()
def SetRange(self, range, reinit=False, animate=True):
"""
Sets the range of the gauge. The gauge length is its
value as a proportion of the range.
"""
if self._max_range == range:
return
# we cannot have a range of zero (laws of physics, etc), so we set it
if range <= 0:
self._max_range = 0.01
else:
self._max_range = range
if reinit is False:
self._old_percentage = self._percentage
self._percentage = (self._value / self._max_range) * 100
else:
self._old_percentage = self._percentage
self._percentage = 0
self._value = 0
if animate:
self.Animate()
self._tooltip.SetTip(
"%.2f/%.2f"
% (self._value, self._max_range if self._max_range > 0.01 else 0)
)
def GetValue(self):
return self._value
def SetValue(self, value, animate=True):
"""Sets the current position of the gauge."""
if self._value == value:
return
self._old_percentage = self._percentage
self._value = value
if value < 0:
self._value = 0
self._percentage = (self._value / self._max_range) * 100
if animate:
self.Animate()
self._tooltip.SetTip("%.2f/%.2f" % (self._value, self._max_range))
def SetValueRange(self, value, range, reinit=False):
"""Set both value and range of the gauge."""
range_ = float(range)
if range_ <= 0:
self._max_range = 0.01
else:
self._max_range = range_
value = float(value)
self._value = value
if value < 0:
self._value = float(0)
if reinit is False:
self._old_percentage = self._percentage
self._percentage = (self._value / self._max_range) * 100
else:
self._old_percentage = self._percentage
self._percentage = 0
self.Animate()
self._tooltip.SetTip(
"%.2f/%.2f"
% (self._value, self._max_range if float(self._max_range) > 0.01 else 0)
)
def OnPaint(self, event):
dc = wx.AutoBufferedPaintDC(self)
rect = self.GetClientRect()
dc.SetBackground(wx.Brush(self.GetBackgroundColour()))
dc.Clear()
colour = self.GetBackgroundColour()
dc.SetBrush(wx.Brush(colour))
dc.SetPen(wx.Pen(colour))
dc.DrawRectangle(rect)
value = self._percentage
if self._timer:
if self._timer.IsRunning():
value = self._anim_value
if self._border_colour:
dc.SetPen(wx.Pen(self.GetBorderColour()))
dc.DrawRectangle(rect)
pad = 1 + self.GetBorderPadding()
rect.Deflate(pad, pad)
if self.GetBarColour():
# if we have a bar color set, then we will use this
colour = self.GetBarColour()
dc.SetBrush(wx.Brush(colour))
dc.SetPen(wx.Pen(colour))
# calculate width of bar and draw it
if value > 100:
w = rect.width
else:
w = rect.width * (float(value) / 100)
r = copy.copy(rect)
r.width = w
dc.DrawRectangle(r)
else:
# if bar color is not set, then we use pre-defined transitions
# for the colors based on the percentage value
# calculate width of bar
if value > 100:
w = rect.width
else:
w = rect.width * (float(value) / 100)
r = copy.copy(rect)
r.width = w
# determine transition range number and calculate xv (which is the
# progress between the two transition ranges)
pv = value
if pv <= 100:
xv = pv / 100
transition = 0
elif pv <= 101:
xv = pv - 100
transition = 1
elif pv <= 103:
xv = (pv - 101) / 2
transition = 2
elif pv <= 105:
xv = (pv - 103) / 2
transition = 3
else:
pv = 106
xv = pv - 100
transition = -1
if transition != -1:
start_color, end_color = self.transition_colors[transition]
color = color_utils.CalculateTransition(start_color, end_color, xv)
else:
color = wx.Colour(191, 48, 48) # dark red
color_factor = self.gradient_effect / 100
mid_factor = (self.gradient_effect / 2) / 100
if self.gradient_effect > 0:
gradient_color = color_utils.Brighten(color, color_factor)
gradient_mid = color_utils.Brighten(color, mid_factor)
else:
gradient_color = color_utils.Darken(color, color_factor * -1)
gradient_mid = color_utils.Darken(color, mid_factor * -1)
# draw bar
gradient_bitmap = draw.DrawGradientBar(
r.width, r.height, gradient_mid, color, gradient_color
)
if gradient_bitmap is not None:
dc.DrawBitmap(gradient_bitmap, r.left, r.top)
# font stuff begins here
dc.SetFont(self.font)
# determine shadow position
r = copy.copy(rect)
r.left += 1
r.top += 1
if self._max_range == 0.01 and self._value > 0:
format_ = "\u221e" # infinity symbol
# drop shadow
dc.SetTextForeground(wx.Colour(80, 80, 80)) # dark grey
dc.DrawLabel(format_, r, wx.ALIGN_CENTER)
# text
dc.SetTextForeground(wx.WHITE)
dc.DrawLabel(format_, rect, wx.ALIGN_CENTER)
else:
if not self.GetBarColour() and self._show_remaining:
# we only do these for gradients with mouse over
range_ = self._max_range if self._max_range > 0.01 else 0
value = range_ - self._value
if value < 0:
format_ = _t("{{0:.{0}f}} over").format(self._fraction_digits)
value = -value
else:
format_ = _t("{{0:.{0}f}} left").format(self._fraction_digits)
else:
format_ = "{{0:.{0}f}}%".format(str(self._fraction_digits))
# drop shadow
dc.SetTextForeground(wx.Colour(80, 80, 80))
dc.DrawLabel(format_.format(value), r, wx.ALIGN_CENTER)
# text
dc.SetTextForeground(wx.WHITE)
dc.DrawLabel(format_.format(value), rect, wx.ALIGN_CENTER)
def OnTimer(self, event):
old_value = self._old_percentage
value = self._percentage
start = 0
# -1 = left direction, 1 = right direction
direction = 1 if old_value < value else -1
end = direction * (value - old_value)
self._anim_direction = direction
step = self.anim_effect(self._anim_step, start, end, self._anim_duration)
self._anim_step += self._period
if self._timer_id == event.GetId():
stop_timer = False
if self._anim_step > self._anim_duration:
stop_timer = True
# add new value to the animation if we haven't reached our goal
# otherwise, stop animation
if direction == 1:
if old_value + step < value:
self._anim_value = old_value + step
else:
stop_timer = True
else:
if old_value - step > value:
self._anim_value = old_value - step
else:
stop_timer = True
if stop_timer:
self._timer.Stop()
self.Refresh()
|
Arch | importGBXML | # ***************************************************************************
# * Copyright (c) 2015 Yorik van Havre <yorik@uncreated.net> *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
__title__ = "FreeCAD GbXml exporter"
__author__ = "Yorik van Havre"
__url__ = "https://www.freecad.org"
import Draft
import FreeCAD
if FreeCAD.GuiUp:
from draftutils.translate import translate
else:
# \cond
def translate(ctx, txt):
return txt
# \endcond
## @package importGBXML
# \ingroup ARCH
# \brief GBXML file format exporter
#
# This module provides tools to export GBXML files.
def export(objectslist, filename):
if len(objectslist) != 1:
FreeCAD.Console.PrintError(
translate("Arch", "This exporter can currently only export one site object")
+ "\n"
)
return
site = objectslist[0]
if Draft.getType(site) != "Site":
FreeCAD.Console.PrintError(
translate("Arch", "This exporter can currently only export one site object")
+ "\n"
)
return
filestream = pyopen(filename, "wb")
# header
filestream.write('<?xml version="1.0"?>\n')
filestream.write(
"<!-- Exported by FreeCAD %s -->\n" % FreeCAD.Version()[0]
+ FreeCAD.Version()[1]
+ FreeCAD.Version()[2]
)
filestream.write("<gbXML\n")
filestream.write(' xmlns="http://www.gbxml.org/schema"\n')
filestream.write(' xmlns:xhtml="http://www.w3.org/1999/xhtml"\n')
filestream.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"\n')
filestream.write(' xsi:schemaLocation="http://www.gbxml.org/schema"\n ')
filestream.write(' temperatureUnit="C"\n')
filestream.write(' lengthUnit="Meters"\n')
filestream.write(' areaUnit="SquareMeters"\n')
filestream.write(' volumeUnit="CubicMeters"\n')
filestream.write(' useSIUnitsForResults="false" >\n')
filestream.write("\n")
# campus
filestream.write('<Campus id="%s">\n' % site.Name)
filestream.write("<Location>\n")
filestream.write(
" <ZipcodeOrPostalCode>%s</ZipcodeOrPostalCode>\n" % site.PostalCode
)
filestream.write(" <Longitude>%f</Longitude>\n" % site.Longitude)
filestream.write(" <Latitude>%f</Latitude>\n" % site.Latitude)
filestream.write(" <Elevation>%f/Elevation>\n" % site.Elevation.Value)
filestream.write(" <Name>%s</Name>\n" % site.Label)
# filestream.write( ' <CADModelAzimuth>0</CADModelAzimuth>\n' )
# filestream.write( ' <StationId IDType="WMO">53158_2004</StationId>\n' )
filestream.write("</Location>\n")
# buildings
for building in site.Group:
if Draft.getType(building) == "Building":
filestream.write(
' <Building id="$s" buildingType="$s">\n'
% (building.Name, building.BuildingType)
)
filestream.write(
" <Area>$f</Area>\n" % str(building.Area.getValueAs("m^2"))
)
# spaces
for space in Draft.getObjectsOfType(
Draft.get_group_contents(building.Group, addgroups=True), "Space"
):
if not space.Zone:
FreeCAD.Console.PrintError(
translate("Arch", "Error: Space '%s' has no Zone. Aborting.")
% space.Label
+ "\n"
)
return
filestream.write(
' <Space id="%s" spaceType="%s" zoneIdRef="%s" conditionType="%f">\n'
% (space.Name, space.SpaceType, space.Zone.Name, space.Conditioning)
)
# filestream.write( ' <CADObjectId>%s</CADObjectId>\n' % space.Name ) # not sure what this is used for?
filestream.write(" <Name>%s</Name>\n" % space.Label)
filestream.write(
" <Description>%s</Description>\n" % space.Description
)
filestream.write(
' <PeopleNumber unit="NumberOfPeople">%i</PeopleNumber>\n'
% space.NumberOfPeople
)
filestream.write(
' <LightPowerPerArea unit="WattPerSquareMeter">%f</LightPowerPerArea>\n'
% space.LightingPower
/ space.Area.getValueAs("m^2")
)
filestream.write(
' <EquipPowerPerArea unit="WattPerSquareMeter">%f</EquipPowerPerArea>\n'
% space.EquipmentPower
/ space.Area.getValueAs("m^2")
)
filestream.write(
" <Area>$f</Area>\n" % space.Area.getValueAs("m^2")
)
filestream.write(
" <Volume>$f</Volume>\n"
% FreeCAD.Units.Quantity(
space.Shape.Volume, FreeCAD.Units.Volume
).getValueAs("m^3")
)
filestream.write(
' <ShellGeometry id="%s_geometry">\n' % space.Name
)
# shells
for solid in space.Shape.Solids:
filestream.write(" <ClosedShell>\n")
for face in solid.Faces:
filestream.write(" <PolyLoop>\n")
for v in face.OuterWire.Vertexes:
filestream.write(
" <CartesianPoint>\n"
)
filestream.write(
" <Coordinate>%f</Coordinate>\n"
% v.Point.x
)
filestream.write(
" <Coordinate>%f</Coordinate>\n"
% v.Point.y
)
filestream.write(
" <Coordinate>%f</Coordinate>\n"
% v.Point.z
)
filestream.write(
" </CartesianPoint>\n"
)
filestream.write(" </PolyLoop>\n")
filestream.write(" </ClosedShell>\n")
filestream.write(" </ShellGeometry>\n")
filestream.write(" </Space>\n")
# surfaces
for i, face in enumerate(space.Shape.Faces):
filestream.write(
' <SpaceBoundary isSecondLevelBoundary="false" surfaceIdRef="%s_Face%i"\n'
% space.Name,
i,
)
filestream.write(" <PlanarGeometry>\n")
filestream.write(" <PolyLoop>\n")
for v in face.OuterWire.Vertexes:
filestream.write(" <CartesianPoint>\n")
filestream.write(
" <Coordinate>%f</Coordinate>\n"
% v.Point.x
)
filestream.write(
" <Coordinate>%f</Coordinate>\n"
% v.Point.y
)
filestream.write(
" <Coordinate>%f</Coordinate>\n"
% v.Point.z
)
filestream.write(" </CartesianPoint>\n")
filestream.write(" </PolyLoop>\n")
filestream.write(" </PlanarGeometry>\n")
filestream.write(" </SpaceBoundary>\n")
filestream.write(" </Space>\n")
filestream.write(" </Building>\n")
filestream.write("</Campus>\n")
filestream.write("</gbXML>")
"""
<Area>18000.00000</Area>
<Space id="sp1_LabandCorridor_Labcorridor" spaceType="LaboratoryOffice" zoneIdRef="z1_LabandCorridor">
<Name>Lab corridor</Name>
<Description/>
<PeopleNumber unit="NumberOfPeople">1.00000</PeopleNumber>
<LightPowerPerArea unit="WattPerSquareFoot">1.50000</LightPowerPerArea>
<EquipPowerPerArea unit="WattPerSquareFoot">0.00000</EquipPowerPerArea>
<Area>800.00000</Area>
<Volume>6400.00000</Volume>
<ShellGeometry id="geo_sp1_LabandCorridor_Labcorridor">
<ClosedShell>
<PolyLoop>
<CartesianPoint>
<Coordinate>0.00000</Coordinate>
<Coordinate>1200.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
</CartesianPoint>
<CartesianPoint>
<Coordinate>0.00000</Coordinate>
<Coordinate>1200.00000</Coordinate>
<Coordinate>96.00000</Coordinate>
</CartesianPoint>
<CartesianPoint>
<Coordinate>480.00000</Coordinate>
<Coordinate>1200.00000</Coordinate>
<Coordinate>96.00000</Coordinate>
</CartesianPoint>
<CartesianPoint>
<Coordinate>480.00000</Coordinate>
<Coordinate>1200.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
</CartesianPoint>
</PolyLoop>
... repeat
</ClosedShell>
</ShellGeometry>
<SpaceBoundary isSecondLevelBoundary="false" surfaceIdRef="aim1095">
<PlanarGeometry>
<PolyLoop>
<CartesianPoint>
<Coordinate>9.981497</Coordinate>
<Coordinate>-31.19363</Coordinate>
<Coordinate>0</Coordinate>
</CartesianPoint>
<CartesianPoint>
<Coordinate>9.981497</Coordinate>
<Coordinate>-5.193626</Coordinate>
<Coordinate>0</Coordinate>
</CartesianPoint>
<CartesianPoint>
<Coordinate>9.981497</Coordinate>
<Coordinate>-5.193626</Coordinate>
<Coordinate>100</Coordinate>
</CartesianPoint>
<CartesianPoint>
<Coordinate>9.981497</Coordinate>
<Coordinate>-31.19363</Coordinate>
<Coordinate>100</Coordinate>
</CartesianPoint>
</PolyLoop>
</PlanarGeometry>
</SpaceBoundary>
<CADObjectId>21E2</CADObjectId>
</Space>
... repeat
</Building>
<Surface id="su1_Floor" surfaceType="UndergroundSlab" constructionIdRef="construction-1">
<Name>Floor</Name>
<AdjacentSpaceId spaceIdRef="sp1_LabandCorridor_Labcorridor"/>
<RectangularGeometry>
<Azimuth>90.00</Azimuth>
<CartesianPoint>
<Coordinate>0.00000</Coordinate>
<Coordinate>1320.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
</CartesianPoint>
<Tilt>180.00</Tilt>
<Height>480.00000</Height>
<Width>240.00000</Width>
</RectangularGeometry>
<PlanarGeometry>
<PolyLoop>
<CartesianPoint>
<Coordinate>0.00000</Coordinate>
<Coordinate>1320.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
</CartesianPoint>
<CartesianPoint>
<Coordinate>480.00000</Coordinate>
<Coordinate>1320.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
</CartesianPoint>
<CartesianPoint>
<Coordinate>960.00000</Coordinate>
<Coordinate>1320.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
</CartesianPoint>
<CartesianPoint>
<Coordinate>960.00000</Coordinate>
<Coordinate>1200.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
</CartesianPoint>
<CartesianPoint>
<Coordinate>480.00000</Coordinate>
<Coordinate>1200.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
</CartesianPoint>
<CartesianPoint>
<Coordinate>0.00000</Coordinate>
<Coordinate>1200.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
</CartesianPoint>
</PolyLoop>
</PlanarGeometry>
</Surface>
<Surface id="su44_Surface4" surfaceType="ExteriorWall" constructionIdRef="construction-3">
<Name>Surface 4</Name>
<AdjacentSpaceId spaceIdRef="sp7_Office_Office6"/>
<RectangularGeometry>
<Azimuth>180.00</Azimuth>
<CartesianPoint>
<Coordinate>960.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
</CartesianPoint>
<Tilt>90.00</Tilt>
<Height>114.00000</Height>
<Width>480.00000</Width>
</RectangularGeometry>
<PlanarGeometry>
<PolyLoop>
<CartesianPoint>
<Coordinate>960.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
</CartesianPoint>
<CartesianPoint>
<Coordinate>1440.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
</CartesianPoint>
<CartesianPoint>
<Coordinate>1440.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
<Coordinate>114.00000</Coordinate>
</CartesianPoint>
<CartesianPoint>
<Coordinate>960.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
<Coordinate>114.00000</Coordinate>
</CartesianPoint>
</PolyLoop>
</PlanarGeometry>
<Opening id="su44-op1_Opening1" openingType="OperableWindow" windowTypeIdRef="windowType-1">
<Name>Opening1</Name>
<RectangularGeometry>
<CartesianPoint>
<Coordinate>96.00000</Coordinate>
<Coordinate>24.00000</Coordinate>
</CartesianPoint>
<Height>72.00000</Height>
<Width>48.00000</Width>
</RectangularGeometry>
<PlanarGeometry>
<PolyLoop>
<CartesianPoint>
<Coordinate>1056.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
<Coordinate>24.00000</Coordinate>
</CartesianPoint>
<CartesianPoint>
<Coordinate>1104.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
<Coordinate>24.00000</Coordinate>
</CartesianPoint>
<CartesianPoint>
<Coordinate>1104.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
<Coordinate>96.00000</Coordinate>
</CartesianPoint>
<CartesianPoint>
<Coordinate>1056.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
<Coordinate>96.00000</Coordinate>
</CartesianPoint>
</PolyLoop>
</PlanarGeometry>
</Opening>
<Opening id="su44-op2_Opening2" openingType="OperableWindow" windowTypeIdRef="windowType-1">
<Name>Opening2</Name>
<RectangularGeometry>
<CartesianPoint>
<Coordinate>216.00000</Coordinate>
<Coordinate>24.00000</Coordinate>
</CartesianPoint>
<Height>72.00000</Height>
<Width>48.00000</Width>
</RectangularGeometry>
<PlanarGeometry>
<PolyLoop>
<CartesianPoint>
<Coordinate>1176.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
<Coordinate>24.00000</Coordinate>
</CartesianPoint>
<CartesianPoint>
<Coordinate>1224.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
<Coordinate>24.00000</Coordinate>
</CartesianPoint>
<CartesianPoint>
<Coordinate>1224.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
<Coordinate>96.00000</Coordinate>
</CartesianPoint>
<CartesianPoint>
<Coordinate>1176.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
<Coordinate>96.00000</Coordinate>
</CartesianPoint>
</PolyLoop>
</PlanarGeometry>
</Opening>
<Opening id="su44-op3_Opening3" openingType="OperableWindow" windowTypeIdRef="windowType-1">
<Name>Opening3</Name>
<RectangularGeometry>
<CartesianPoint>
<Coordinate>336.00000</Coordinate>
<Coordinate>24.00000</Coordinate>
</CartesianPoint>
<Height>72.00000</Height>
<Width>48.00000</Width>
</RectangularGeometry>
<PlanarGeometry>
<PolyLoop>
<CartesianPoint>
<Coordinate>1296.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
<Coordinate>24.00000</Coordinate>
</CartesianPoint>
<CartesianPoint>
<Coordinate>1344.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
<Coordinate>24.00000</Coordinate>
</CartesianPoint>
<CartesianPoint>
<Coordinate>1344.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
<Coordinate>96.00000</Coordinate>
</CartesianPoint>
<CartesianPoint>
<Coordinate>1296.00000</Coordinate>
<Coordinate>0.00000</Coordinate>
<Coordinate>96.00000</Coordinate>
</CartesianPoint>
</PolyLoop>
</PlanarGeometry>
</Opening>
</Surface>
... repeat
</Campus>
<Construction id="construction-1">
<Name>Standard</Name>
<Description/>
</Construction>
<Construction id="construction-2">
<Name>Standard</Name>
<Description/>
</Construction>
<Construction id="construction-3">
<Name>Standard</Name>
<Description/>
</Construction>
<WindowType id="windowType-1">
<Name>Standard</Name>
<Description/>
</WindowType>
<Zone id="z1_LabandCorridor">
<Name>Lab and Corridor</Name>
<Description/>
<AirChangesPerHour>0"</AirChangesPerHour>
<FlowPerArea unit="CFMPerSquareFoot">0.00000</FlowPerArea>
<FlowPerPerson unit="CFM">0.00000</FlowPerPerson>
<OAFlowPerArea unit="CFMPerSquareFoot">2.37037</OAFlowPerArea>
<OAFlowPerPerson unit="CFM">812.69841</OAFlowPerPerson>
<DesignHeatT>72.00000</DesignHeatT>
<DesignCoolT>75.00000</DesignCoolT>
</Zone>
<Zone id="z2_Office">
<Name>Office</Name>
<Description/>
<AirChangesPerHour>1"</AirChangesPerHour>
<FlowPerArea unit="CFMPerSquareFoot">0.13333</FlowPerArea>
<FlowPerPerson unit="CFM">20.00000</FlowPerPerson>
<OAFlowPerArea unit="CFMPerSquareFoot">0.05333</OAFlowPerArea>
<OAFlowPerPerson unit="CFM">8.00000</OAFlowPerPerson>
<DesignHeatT>72.00000</DesignHeatT>
<DesignCoolT>75.00000</DesignCoolT>
</Zone>
<Zone id="z3_Warehouse">
<Name>Warehouse</Name>
<Description/>
<AirChangesPerHour>5/32"</AirChangesPerHour>
<FlowPerArea unit="CFMPerSquareFoot">0.05000</FlowPerArea>
<FlowPerPerson unit="CFM">25.71429</FlowPerPerson>
<OAFlowPerArea unit="CFMPerSquareFoot">0.00000</OAFlowPerArea>
<OAFlowPerPerson unit="CFM">0.00000</OAFlowPerPerson>
<DesignHeatT>60.00000</DesignHeatT>
<DesignCoolT>80.00000</DesignCoolT>
</Zone>
<DocumentHistory>
<ProgramInfo id="adesk-rvt-1">
<CompanyName>Autodesk, Inc.</CompanyName>
<ProductName>Autodesk Project Vasari CEA</ProductName>
<Version>TP2.0 20110514_1800</Version>
<Platform>Microsoft Windows XP</Platform>
</ProgramInfo>
</DocumentHistory>
<Results xmlns="" id="sp3_LabandCorridor_Lab1" objectIdRef="sp3_LabandCorridor_Lab1" resultsType="CoolingLoad" unit="BtuPerHour">
<ObjectId>sp3_LabandCorridor_Lab1</ObjectId>
<Value>5534.837890625</Value>
<Description>Space Cooling Roof Cond</Description>
<CADObjectId>21E3</CADObjectId>
</Results>
... repeat
</gbXML>"""
|
utils | numberFormatter | import math
from eos.utils.round import roundDec, roundToPrec
def formatAmount(
val, prec=3, lowest=0, highest=0, currency=False, forceSign=False, unitName=None
):
"""
Add suffix to value, transform value to match new suffix and round it.
Keyword arguments:
val -- value to process
prec -- precision of final number (number of significant positions to show)
lowest -- lowest order for suffixizing for numbers 0 < |num| < 1
highest -- highest order for suffixizing for numbers |num| > 1
currency -- if currency, billion suffix will be B instead of G
forceSign -- if True, positive numbers are signed too
unitName -- if specified, will be formatted into a string
"""
if val is None:
return ""
if val == math.inf:
return "\u221e" if unitName is None else "\u221e {}".format(unitName)
# Define suffix maps
posSuffixMap = {3: "k", 6: "M", 9: "B" if currency is True else "G"}
negSuffixMap = {-6: "\u03bc", -3: "m"}
# Define tuple of the map keys
# As we're going to go from the biggest order of abs(key), sort
# them differently due to one set of values being negative
# and other positive
posOrders = tuple(sorted(iter(posSuffixMap.keys()), reverse=True))
negOrders = tuple(sorted(iter(negSuffixMap.keys()), reverse=False))
# Find the least abs(key)
posLowest = min(posOrders)
negHighest = max(negOrders)
# By default, mantissa takes just value and no suffix
mantissa, suffix = val, ""
# Positive suffixes
if abs(val) > 1 and highest >= posLowest:
# Start from highest possible suffix
for key in posOrders:
# Find first suitable suffix and check if it's not above highest order
if abs(val) >= 10**key and key <= highest:
mantissa, suffix = val / float(10**key), posSuffixMap[key]
# Do additional step to eliminate results like 999999 => 1000k
# If we're already using our greatest order, we can't do anything useful
if posOrders.index(key) == 0:
break
else:
# Get order greater than current
prevKey = posOrders[posOrders.index(key) - 1]
# Check if the key to which we potentially can change is greater
# than our highest boundary
if prevKey > highest:
# If it is, bail - we already have acceptable results
break
# Find multiplier to get from one order to another
orderDiff = 10 ** (prevKey - key)
# If rounded mantissa according to our specifications is greater than
# or equal to multiplier
if roundToPrec(mantissa, prec) >= orderDiff:
# Divide mantissa and use suffix of greater order
mantissa, suffix = mantissa / orderDiff, posSuffixMap[prevKey]
# Otherwise consider current results as acceptable
break
# Take numbers between 0 and 1, and matching/below highest possible negative suffix
elif abs(val) < 1 and val != 0 and lowest <= negHighest:
# Start from lowest possible suffix
for key in negOrders:
# Get next order
try:
nextKey = negOrders[negOrders.index(key) + 1]
except IndexError:
nextKey = 0
# Check if mantissa with next suffix is in range [1, 1000)
if abs(val) < 10**nextKey and key >= lowest:
mantissa, suffix = val / float(10**key), negSuffixMap[key]
# Do additional step to eliminate results like 0.9999 => 1000m
# Check if the key we're potentially switching to is greater than our
# upper boundary
if nextKey > highest:
# If it is, leave loop with results we already have
break
# Find the multiplier between current and next order
orderDiff = 10 ** (nextKey - key)
# If rounded mantissa according to our specifications is greater than
# or equal to multiplier
if roundToPrec(mantissa, prec) >= orderDiff:
# Divide mantissa and use suffix of greater order
# Use special handling of zero key as it's not on the map
mantissa, suffix = (
mantissa / orderDiff,
posSuffixMap[nextKey] if nextKey != 0 else "",
)
# Otherwise consider current results as acceptable
break
# Round mantissa according to our prec variable
mantissa = roundToPrec(mantissa, prec)
sign = "+" if forceSign is True and mantissa > 0 else ""
# Round mantissa and add suffix
if unitName is None:
result = "{}{}{}".format(sign, mantissa, suffix)
else:
result = "{}{} {}{}".format(sign, mantissa, suffix, unitName)
return result
|
filter | pfb | #!/usr/bin/env python
#
# Copyright 2009,2010,2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
import math
from gnuradio import blocks, fft, gr
from . import fft
from . import filter_python as filter
from . import optfir
class channelizer_ccf(gr.hier_block2):
"""
Make a Polyphase Filter channelizer (complex in, complex out, floating-point taps)
This simplifies the interface by allowing a single input stream to connect to this block.
It will then output a stream for each channel.
"""
def __init__(self, numchans, taps=None, oversample_rate=1, atten=100):
gr.hier_block2.__init__(
self,
"pfb_channelizer_ccf",
gr.io_signature(1, 1, gr.sizeof_gr_complex),
gr.io_signature(numchans, numchans, gr.sizeof_gr_complex),
)
self._nchans = numchans
self._oversample_rate = oversample_rate
if (taps is not None) and (len(taps) > 0):
self._taps = taps
else:
self._taps = self.create_taps(self._nchans, atten)
self.s2ss = blocks.stream_to_streams(gr.sizeof_gr_complex, self._nchans)
self.pfb = filter.pfb_channelizer_ccf(
self._nchans, self._taps, self._oversample_rate
)
self.connect(self, self.s2ss)
for i in range(self._nchans):
self.connect((self.s2ss, i), (self.pfb, i))
self.connect((self.pfb, i), (self, i))
def set_channel_map(self, newmap):
self.pfb.set_channel_map(newmap)
def set_taps(self, taps):
self.pfb.set_taps(taps)
def taps(self):
return self.pfb.taps()
def declare_sample_delay(self, delay):
self.pfb.declare_sample_delay(delay)
@staticmethod
def create_taps(numchans, atten=100):
# Create a filter that covers the full bandwidth of the input signal
bw = 0.4
tb = 0.2
ripple = 0.1
while True:
try:
taps = optfir.low_pass(1, numchans, bw, bw + tb, ripple, atten)
return taps
except ValueError as e:
# This shouldn't happen, unless numchans is strange
raise RuntimeError(
"couldn't design filter; this probably constitutes a bug"
)
except RuntimeError:
ripple += 0.01
print(
"Warning: set ripple to %.4f dB. If this is a problem, adjust the attenuation or create your own filter taps."
% (ripple)
)
# Build in an exit strategy; if we've come this far, it ain't working.
if ripple >= 1.0:
raise RuntimeError(
"optfir could not generate an appropriate filter."
)
class interpolator_ccf(gr.hier_block2):
"""
Make a Polyphase Filter interpolator (complex in, complex out, floating-point taps)
The block takes a single complex stream in and outputs a single complex
stream out. As such, it requires no extra glue to handle the input/output
streams. This block is provided to be consistent with the interface to the
other PFB block.
"""
def __init__(self, interp, taps=None, atten=100):
gr.hier_block2.__init__(
self,
"pfb_interpolator_ccf",
gr.io_signature(1, 1, gr.sizeof_gr_complex),
gr.io_signature(1, 1, gr.sizeof_gr_complex),
)
self._interp = interp
self._taps = taps
if (taps is not None) and (len(taps) > 0):
self._taps = taps
else:
self._taps = self.create_taps(self._interp, atten)
self.pfb = filter.pfb_interpolator_ccf(self._interp, self._taps)
self.connect(self, self.pfb)
self.connect(self.pfb, self)
def set_taps(self, taps):
self.pfb.set_taps(taps)
def declare_sample_delay(self, delay):
self.pfb.declare_sample_delay(delay)
@staticmethod
def create_taps(interp, atten):
# Create a filter that covers the full bandwidth of the input signal
bw = 0.4
tb = 0.2
ripple = 0.99
while True:
try:
taps = optfir.low_pass(interp, interp, bw, bw + tb, ripple, atten)
return taps
except RuntimeError:
ripple += 0.01
print(
"Warning: set ripple to %.4f dB. If this is a problem, adjust the attenuation or create your own filter taps."
% (ripple)
)
# Build in an exit strategy; if we've come this far, it ain't working.
if ripple >= 1.0:
raise RuntimeError(
"optfir could not generate an appropriate filter."
)
# We bubble up ValueError – probably caused by user input
class decimator_ccf(gr.hier_block2):
"""
Make a Polyphase Filter decimator (complex in, complex out, floating-point taps)
This simplifies the interface by allowing a single input stream to connect to this block.
It will then output a stream that is the decimated output stream.
"""
def __init__(
self,
decim,
taps=None,
channel=0,
atten=100,
use_fft_rotators=True,
use_fft_filters=True,
):
gr.hier_block2.__init__(
self,
"pfb_decimator_ccf",
gr.io_signature(1, 1, gr.sizeof_gr_complex),
gr.io_signature(1, 1, gr.sizeof_gr_complex),
)
self._decim = decim
self._channel = channel
if (taps is not None) and (len(taps) > 0):
self._taps = taps
else:
self._taps = self.create_taps(self._decim, atten)
self.s2ss = blocks.stream_to_streams(gr.sizeof_gr_complex, self._decim)
self.pfb = filter.pfb_decimator_ccf(
self._decim, self._taps, self._channel, use_fft_rotators, use_fft_filters
)
self.connect(self, self.s2ss)
for i in range(self._decim):
self.connect((self.s2ss, i), (self.pfb, i))
self.connect(self.pfb, self)
def set_taps(self, taps):
self.pfb.set_taps(taps)
def set_channel(self, chan):
self.pfb.set_channel(chan)
def declare_sample_delay(self, delay):
self.pfb.declare_sample_delay(delay)
@staticmethod
def create_taps(decim, atten=100):
# Create a filter that covers the full bandwidth of the input signal
bw = 0.4
tb = 0.2
ripple = 0.1
while True:
try:
taps = optfir.low_pass(1, decim, bw, bw + tb, ripple, atten)
return taps
except RuntimeError:
ripple += 0.01
print(
"Warning: set ripple to %.4f dB. If this is a problem, adjust the attenuation or create your own filter taps."
% (ripple)
)
# Build in an exit strategy; if we've come this far, it ain't working.
if ripple >= 1.0:
raise RuntimeError(
"optfir could not generate an appropriate filter."
)
# Not handling ValueError – probably a user input caused this.
class arb_resampler_ccf(gr.hier_block2):
"""
Convenience wrapper for the polyphase filterbank arbitrary resampler.
The block takes a single complex stream in and outputs a single complex
stream out. As such, it requires no extra glue to handle the input/output
streams. This block is provided to be consistent with the interface to the
other PFB block.
"""
def __init__(self, rate, taps=None, flt_size=32, atten=100):
gr.hier_block2.__init__(
self,
"pfb_arb_resampler_ccf",
# Input signature
gr.io_signature(1, 1, gr.sizeof_gr_complex),
gr.io_signature(1, 1, gr.sizeof_gr_complex),
) # Output signature
self._rate = rate
self._size = flt_size
if (taps is not None) and (len(taps) > 0):
self._taps = taps
else:
self._taps = self.create_taps(self._rate, self._size, atten)
self.pfb = filter.pfb_arb_resampler_ccf(self._rate, self._taps, self._size)
# print("PFB has %d taps\n" % (len(self._taps),))
self.connect(self, self.pfb)
self.connect(self.pfb, self)
# Note -- set_taps not implemented in base class yet
def set_taps(self, taps):
self.pfb.set_taps(taps)
def set_rate(self, rate):
self.pfb.set_rate(rate)
def declare_sample_delay(self, delay):
self.pfb.declare_sample_delay(delay)
@staticmethod
def create_taps(rate, flt_size=32, atten=100):
# Create a filter that covers the full bandwidth of the output signal
# If rate >= 1, we need to prevent images in the output,
# so we have to filter it to less than half the channel
# width of 0.5. If rate < 1, we need to filter to less
# than half the output signal's bw to avoid aliasing, so
# the half-band here is 0.5*rate.
percent = 0.80
if rate < 1:
halfband = 0.5 * rate
bw = percent * halfband
tb = (percent / 2.0) * halfband
ripple = 0.1
# As we drop the bw factor, the optfir filter has a harder time converging;
# using the firdes method here for better results.
return filter.firdes.low_pass_2(
flt_size, flt_size, bw, tb, atten, fft.window.WIN_BLACKMAN_HARRIS
)
else:
halfband = 0.5
bw = percent * halfband
tb = (percent / 2.0) * halfband
ripple = 0.1
taps = None
while True:
try:
taps = optfir.low_pass(
flt_size, flt_size, bw, bw + tb, ripple, atten
)
return taps
except RuntimeError:
ripple += 0.01
print(
"Warning: set ripple to %.4f dB. If this is a problem, adjust the attenuation or create your own filter taps."
% (ripple)
)
# Build in an exit strategy; if we've come this far, it ain't working.
if ripple >= 1.0:
raise RuntimeError(
"optfir could not generate an appropriate filter."
)
# We intentionally don't handle ValueError here, because it's most likely caused by user input
class arb_resampler_fff(gr.hier_block2):
"""
Convenience wrapper for the polyphase filterbank arbitrary resampler.
The block takes a single float stream in and outputs a single float
stream out. As such, it requires no extra glue to handle the input/output
streams. This block is provided to be consistent with the interface to the
other PFB block.
"""
def __init__(self, rate, taps=None, flt_size=32, atten=100):
gr.hier_block2.__init__(
self,
"pfb_arb_resampler_fff",
# Input signature
gr.io_signature(1, 1, gr.sizeof_float),
gr.io_signature(1, 1, gr.sizeof_float),
) # Output signature
self._rate = rate
self._size = flt_size
if (taps is not None) and (len(taps) > 0):
self._taps = taps
else:
self._taps = self.create_taps(self._rate, self._size, atten)
self.pfb = filter.pfb_arb_resampler_fff(self._rate, self._taps, self._size)
# print "PFB has %d taps\n" % (len(self._taps),)
self.connect(self, self.pfb)
self.connect(self.pfb, self)
# Note -- set_taps not implemented in base class yet
def set_taps(self, taps):
self.pfb.set_taps(taps)
def set_rate(self, rate):
self.pfb.set_rate(rate)
def declare_sample_delay(self, delay):
self.pfb.declare_sample_delay(delay)
@staticmethod
def create_taps(rate, flt_size=32, atten=100):
# Create a filter that covers the full bandwidth of the input signal
# If rate >= 1, we need to prevent images in the output,
# so we have to filter it to less than half the channel
# width of 0.5. If rate < 1, we need to filter to less
# than half the output signal's bw to avoid aliasing, so
# the half-band here is 0.5*rate.
percent = 0.80
if rate < 1:
halfband = 0.5 * rate
bw = percent * halfband
tb = (percent / 2.0) * halfband
ripple = 0.1
# As we drop the bw factor, the optfir filter has a harder time converging;
# using the firdes method here for better results.
return filter.firdes.low_pass_2(
flt_size, flt_size, bw, tb, atten, fft.window.WIN_BLACKMAN_HARRIS
)
else:
halfband = 0.5
bw = percent * halfband
tb = (percent / 2.0) * halfband
ripple = 0.1
while True:
try:
taps = optfir.low_pass(
flt_size, flt_size, bw, bw + tb, ripple, atten
)
return taps
except RuntimeError:
ripple += 0.01
print(
"Warning: set ripple to %.4f dB. If this is a problem, adjust the attenuation or create your own filter taps."
% (ripple)
)
# Build in an exit strategy; if we've come this far, it ain't working.
if ripple >= 1.0:
raise RuntimeError(
"optfir could not generate an appropriate filter."
)
# If a ValueError happens here, it's probably due to specific user input
class arb_resampler_ccc(gr.hier_block2):
"""
Convenience wrapper for the polyphase filterbank arbitrary resampler.
The block takes a single complex stream in and outputs a single complex
stream out. As such, it requires no extra glue to handle the input/output
streams. This block is provided to be consistent with the interface to the
other PFB block.
"""
def __init__(self, rate, taps=None, flt_size=32, atten=100):
gr.hier_block2.__init__(
self,
"pfb_arb_resampler_ccc",
# Input signature
gr.io_signature(1, 1, gr.sizeof_gr_complex),
gr.io_signature(1, 1, gr.sizeof_gr_complex),
) # Output signature
self._rate = rate
self._size = flt_size
if (taps is not None) and (len(taps) > 0):
self._taps = taps
else:
self._taps = self.create_taps(self._rate, self._size, atten)
self.pfb = filter.pfb_arb_resampler_ccc(self._rate, self._taps, self._size)
# print "PFB has %d taps\n" % (len(self._taps),)
self.connect(self, self.pfb)
self.connect(self.pfb, self)
# Note -- set_taps not implemented in base class yet
def set_taps(self, taps):
self.pfb.set_taps(taps)
def set_rate(self, rate):
self.pfb.set_rate(rate)
def declare_sample_delay(self, delay):
self.pfb.declare_sample_delay(delay)
@staticmethod
def create_taps(rate, flt_size=32, atten=100):
# Create a filter that covers the full bandwidth of the input signal
bw = 0.4
tb = 0.2
ripple = 0.1
while True:
try:
taps = optfir.low_pass(flt_size, flt_size, bw, bw + tb, ripple, atten)
return taps
except RuntimeError:
ripple += 0.01
print(
"Warning: set ripple to %.4f dB. If this is a problem, adjust the attenuation or create your own filter taps."
% (ripple)
)
# Build in an exit strategy; if we've come this far, it ain't working.
if ripple >= 1.0:
raise RuntimeError(
"optfir could not generate an appropriate filter."
)
# If a ValueError happens here, it's probably due to specific user input
class channelizer_hier_ccf(gr.hier_block2):
"""
Make a Polyphase Filter channelizer (complex in, complex out, floating-point taps)
Args:
n_chans: The number of channels to split into.
n_filterbanks: The number of filterbank blocks to use (default=2).
taps: The taps to use. If this is `None` then taps are generated using optfir.low_pass.
outchans: Which channels to output streams for (a list of integers) (default is all channels).
atten: Stop band attenuation.
bw: The fraction of the channel you want to keep.
tb: Transition band with as fraction of channel width.
ripple: Pass band ripple in dB.
"""
def __init__(
self,
n_chans,
n_filterbanks=1,
taps=None,
outchans=None,
atten=100,
bw=1.0,
tb=0.2,
ripple=0.1,
):
if n_filterbanks > n_chans:
n_filterbanks = n_chans
if outchans is None:
outchans = list(range(n_chans))
gr.hier_block2.__init__(
self,
"pfb_channelizer_hier_ccf",
gr.io_signature(1, 1, gr.sizeof_gr_complex),
gr.io_signature(len(outchans), len(outchans), gr.sizeof_gr_complex),
)
if taps is None:
taps = self.create_taps(n_chans, atten, bw, tb, ripple)
taps = list(taps)
extra_taps = int(math.ceil(1.0 * len(taps) / n_chans) * n_chans - len(taps))
taps = taps + [0] * extra_taps
# Make taps for each channel
chantaps = [
list(reversed(taps[i : len(taps) : n_chans])) for i in range(0, n_chans)
]
# Convert the input stream into a stream of vectors.
self.s2v = blocks.stream_to_vector(gr.sizeof_gr_complex, n_chans)
# Create a mapping to separate out each filterbank (a group of channels to be processed together)
# And a list of sets of taps for each filterbank.
low_cpp = int(n_chans / n_filterbanks)
extra = n_chans - low_cpp * n_filterbanks
cpps = [low_cpp + 1] * extra + [low_cpp] * (n_filterbanks - extra)
splitter_mapping = []
filterbanktaps = []
total = 0
for cpp in cpps:
splitter_mapping.append([(0, i) for i in range(total, total + cpp)])
filterbanktaps.append(chantaps[total : total + cpp])
total += cpp
assert total == n_chans
# Split the stream of vectors in n_filterbanks streams of vectors.
self.splitter = blocks.vector_map(
gr.sizeof_gr_complex, [n_chans], splitter_mapping
)
# Create the filterbanks
self.fbs = [filter.filterbank_vcvcf(taps) for taps in filterbanktaps]
# Combine the streams of vectors back into a single stream of vectors.
combiner_mapping = [[]]
for i, cpp in enumerate(cpps):
for j in range(cpp):
combiner_mapping[0].append((i, j))
self.combiner = blocks.vector_map(gr.sizeof_gr_complex, cpps, combiner_mapping)
# Add the final FFT to the channelizer.
self.fft = fft.fft_vcc(n_chans, forward=True, window=[1.0] * n_chans)
# Select the desired channels
if outchans != list(range(n_chans)):
selector_mapping = [[(0, i) for i in outchans]]
self.selector = blocks.vector_map(
gr.sizeof_gr_complex, [n_chans], selector_mapping
)
# Convert stream of vectors to a normal stream.
self.v2ss = blocks.vector_to_streams(gr.sizeof_gr_complex, len(outchans))
self.connect(self, self.s2v, self.splitter)
for i in range(0, n_filterbanks):
self.connect((self.splitter, i), self.fbs[i], (self.combiner, i))
self.connect(self.combiner, self.fft)
if outchans != list(range(n_chans)):
self.connect(self.fft, self.selector, self.v2ss)
else:
self.connect(self.fft, self.v2ss)
for i in range(0, len(outchans)):
self.connect((self.v2ss, i), (self, i))
@staticmethod
def create_taps(n_chans, atten=100, bw=1.0, tb=0.2, ripple=0.1):
return optfir.low_pass(1, n_chans, bw, bw + tb, ripple, atten)
|
bleachbit | Windows | # vim: ts=4:sw=4:expandtab
# BleachBit
# Copyright (C) 2008-2023 Andrew Ziem
# https://www.bleachbit.org
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Functionality specific to Microsoft Windows
The Windows Registry terminology can be confusing. Take for example
the reference
* HKCU\\Software\\BleachBit
* CurrentVersion
These are the terms:
* 'HKCU' is an abbreviation for the hive HKEY_CURRENT_USER.
* 'HKCU\Software\BleachBit' is the key name.
* 'Software' is a sub-key of HCKU.
* 'BleachBit' is a sub-key of 'Software.'
* 'CurrentVersion' is the value name.
* '0.5.1' is the value data.
"""
import glob
import logging
import os
import shutil
import sys
import xml.dom.minidom
from decimal import Decimal
from threading import Event, Thread
import bleachbit
from bleachbit import Command, FileUtilities, General, _
if "win32" == sys.platform:
import winreg
from ctypes import byref, c_buffer, c_ulong, sizeof, windll
import pywintypes
import win32api
import win32con
import win32file
import win32gui
import win32process
import win32security
from win32com.shell import shell, shellcon
psapi = windll.psapi
kernel = windll.kernel32
logger = logging.getLogger(__name__)
def browse_file(_, title):
"""Ask the user to select a single file. Return full path"""
try:
ret = win32gui.GetOpenFileNameW(
None,
Flags=win32con.OFN_EXPLORER
| win32con.OFN_FILEMUSTEXIST
| win32con.OFN_HIDEREADONLY,
Title=title,
)
except pywintypes.error as e:
logger = logging.getLogger(__name__)
if 0 == e.winerror:
logger.debug("browse_file(): user cancelled")
else:
logger.exception("exception in browse_file()")
return None
return ret[0]
def browse_files(_, title):
"""Ask the user to select files. Return full paths"""
try:
# The File parameter is a hack to increase the buffer length.
ret = win32gui.GetOpenFileNameW(
None,
File="\x00" * 10240,
Flags=win32con.OFN_ALLOWMULTISELECT
| win32con.OFN_EXPLORER
| win32con.OFN_FILEMUSTEXIST
| win32con.OFN_HIDEREADONLY,
Title=title,
)
except pywintypes.error as e:
if 0 == e.winerror:
logger.debug("browse_files(): user cancelled")
else:
logger.exception("exception in browse_files()")
return None
_split = ret[0].split("\x00")
if 1 == len(_split):
# only one filename
return _split
dirname = _split[0]
pathnames = [os.path.join(dirname, fname) for fname in _split[1:]]
return pathnames
def browse_folder(_, title):
"""Ask the user to select a folder. Return full path."""
flags = 0x0010 # SHBrowseForFolder path input
pidl = shell.SHBrowseForFolder(None, None, title, flags)[0]
if pidl is None:
# user cancelled
return None
fullpath = shell.SHGetPathFromIDListW(pidl)
return fullpath
def check_dll_hijacking(window=None):
"""Check for possible DLL search-order hijacking
https://bugs.python.org/issue27410
"""
major = sys.version_info[0]
minor = sys.version_info[1]
# BleachBit 4.4.2 uses Python 3.4.4.
# The branch with Python 3.10 was tested as not vulnerable.
if major > 3 or (major == 3 and minor >= 10):
return False
if not (
os.path.exists(r"c:\python3.dll") or os.path.exists(r"c:\dlls\python3.dll")
):
return False
# This workaround will be removed when the Python 3.10 branch is ready.
msg = _(
"The file python3.dll was found in c:\ or c:\dlls, which indicates a possible attempt at DLL search-order hijacking."
)
logger.error(msg)
if window:
from bleachbit.GuiBasic import message_dialog
from gi.repository import Gtk
message_dialog(
window, msg, Gtk.MessageType.WARNING, Gtk.ButtonsType.OK, title=_("Warning")
)
sys.exit(1)
def cleanup_nonce():
"""On exit, clean up GTK junk files"""
for fn in glob.glob(os.path.expandvars("%TEMP%\gdbus-nonce-file-*")):
logger.debug("cleaning GTK nonce file: %s", fn)
FileUtilities.delete(fn)
def csidl_to_environ(varname, csidl):
"""Define an environment variable from a CSIDL for use in CleanerML and Winapp2.ini"""
try:
sppath = shell.SHGetSpecialFolderPath(None, csidl)
except:
logger.info("exception when getting special folder path for %s", varname)
return
# there is exception handling in set_environ()
set_environ(varname, sppath)
def delete_locked_file(pathname):
"""Delete a file that is currently in use"""
if os.path.exists(pathname):
MOVEFILE_DELAY_UNTIL_REBOOT = 4
if 0 == windll.kernel32.MoveFileExW(
pathname, None, MOVEFILE_DELAY_UNTIL_REBOOT
):
from ctypes import WinError
raise WinError()
def delete_registry_value(key, value_name, really_delete):
"""Delete named value under the registry key.
Return boolean indicating whether reference found and
successful. If really_delete is False (meaning preview),
just check whether the value exists."""
(hive, sub_key) = split_registry_key(key)
if really_delete:
try:
hkey = winreg.OpenKey(hive, sub_key, 0, winreg.KEY_SET_VALUE)
winreg.DeleteValue(hkey, value_name)
except WindowsError as e:
if e.winerror == 2:
# 2 = 'file not found' means value does not exist
return False
raise
else:
return True
try:
hkey = winreg.OpenKey(hive, sub_key)
winreg.QueryValueEx(hkey, value_name)
except WindowsError as e:
if e.winerror == 2:
return False
raise
else:
return True
def delete_registry_key(parent_key, really_delete):
"""Delete registry key including any values and sub-keys.
Return boolean whether found and success. If really
delete is False (meaning preview), just check whether
the key exists."""
parent_key = str(parent_key) # Unicode to byte string
(hive, parent_sub_key) = split_registry_key(parent_key)
hkey = None
try:
hkey = winreg.OpenKey(hive, parent_sub_key)
except WindowsError as e:
if e.winerror == 2:
# 2 = 'file not found' happens when key does not exist
return False
if not really_delete:
return True
if not hkey:
# key not found
return False
keys_size = winreg.QueryInfoKey(hkey)[0]
child_keys = [parent_key + "\\" + winreg.EnumKey(hkey, i) for i in range(keys_size)]
for child_key in child_keys:
delete_registry_key(child_key, True)
winreg.DeleteKey(hive, parent_sub_key)
return True
def delete_updates():
"""Returns commands for deleting Windows Updates files"""
windir = os.path.expandvars("%windir%")
dirs = glob.glob(os.path.join(windir, "$NtUninstallKB*"))
dirs += [os.path.expandvars(r"%windir%\SoftwareDistribution")]
dirs += [os.path.expandvars(r"%windir%\SoftwareDistribution.old")]
dirs += [os.path.expandvars(r"%windir%\SoftwareDistribution.bak")]
dirs += [os.path.expandvars(r"%windir%\ie7updates")]
dirs += [os.path.expandvars(r"%windir%\ie8updates")]
dirs += [os.path.expandvars(r"%windir%\system32\catroot2")]
dirs += [os.path.expandvars(r"%systemdrive%\windows.old")]
dirs += [os.path.expandvars(r"%systemdrive%\$windows.~bt")]
dirs += [os.path.expandvars(r"%systemdrive%\$windows.~ws")]
if not dirs:
# if nothing to delete, then also do not restart service
return
args = []
def run_wu_service():
General.run_external(args)
return 0
services = {}
all_services = ("wuauserv", "cryptsvc", "bits", "msiserver")
for service in all_services:
import win32serviceutil
services[service] = win32serviceutil.QueryServiceStatus(service)[1] == 4
logger.debug(
"Windows service {} has current state: {}".format(
service, services[service]
)
)
if services[service]:
args = ["net", "stop", service]
yield Command.Function(None, run_wu_service, " ".join(args))
for path1 in dirs:
for path2 in FileUtilities.children_in_directory(path1, True):
yield Command.Delete(path2)
if os.path.exists(path1):
yield Command.Delete(path1)
for this_service in all_services:
if services[this_service]:
args = ["net", "start", this_service]
yield Command.Function(None, run_wu_service, " ".join(args))
def detect_registry_key(parent_key):
"""Detect whether registry key exists"""
try:
parent_key = str(parent_key) # Unicode to byte string
except UnicodeEncodeError:
return False
(hive, parent_sub_key) = split_registry_key(parent_key)
hkey = None
try:
hkey = winreg.OpenKey(hive, parent_sub_key)
except WindowsError as e:
if e.winerror == 2:
# 2 = 'file not found' happens when key does not exist
return False
if not hkey:
# key not found
return False
return True
def elevate_privileges(uac):
"""On Windows Vista and later, try to get administrator
privileges. If successful, return True (so original process
can exit). If failed or not applicable, return False."""
if shell.IsUserAnAdmin():
logger.debug("already an admin (UAC not required)")
htoken = win32security.OpenProcessToken(
win32api.GetCurrentProcess(),
win32security.TOKEN_ADJUST_PRIVILEGES | win32security.TOKEN_QUERY,
)
newPrivileges = [
(
win32security.LookupPrivilegeValue(None, "SeBackupPrivilege"),
win32security.SE_PRIVILEGE_ENABLED,
),
(
win32security.LookupPrivilegeValue(None, "SeRestorePrivilege"),
win32security.SE_PRIVILEGE_ENABLED,
),
]
win32security.AdjustTokenPrivileges(htoken, 0, newPrivileges)
win32file.CloseHandle(htoken)
return False
elif not uac:
return False
if hasattr(sys, "frozen"):
# running frozen in py2exe
exe = sys.executable
parameters = "--gui --no-uac"
else:
pyfile = os.path.join(bleachbit.bleachbit_exe_path, "bleachbit.py")
# If the Python file is on a network drive, do not offer the UAC because
# the administrator may not have privileges and user will not be
# prompted.
if len(pyfile) > 0 and path_on_network(pyfile):
logger.debug("debug: skipping UAC because '%s' is on network", pyfile)
return False
parameters = '"%s" --gui --no-uac' % pyfile
exe = sys.executable
parameters = _add_command_line_parameters(parameters)
logger.debug("elevate_privileges() exe=%s, parameters=%s", exe, parameters)
rc = None
try:
rc = shell.ShellExecuteEx(
lpVerb="runas", lpFile=exe, lpParameters=parameters, nShow=win32con.SW_SHOW
)
except pywintypes.error as e:
if 1223 == e.winerror:
logger.debug("user denied the UAC dialog")
return False
raise
logger.debug("ShellExecuteEx=%s", rc)
if isinstance(rc, dict):
return True
return False
def _add_command_line_parameters(parameters):
"""
Add any command line parameters such as --debug-log.
"""
if "--context-menu" in sys.argv:
return '{} {} "{}"'.format(parameters, " ".join(sys.argv[1:-1]), sys.argv[-1])
return "{} {}".format(parameters, " ".join(sys.argv[1:]))
def empty_recycle_bin(path, really_delete):
"""Empty the recycle bin or preview its size.
If the recycle bin is empty, it is not emptied again to avoid an error.
Keyword arguments:
path -- A drive, folder or None. None refers to all recycle bins.
really_delete -- If True, then delete. If False, then just preview.
"""
(bytes_used, num_files) = shell.SHQueryRecycleBin(path)
if really_delete and num_files > 0:
# Trying to delete an empty Recycle Bin on Vista/7 causes a
# 'catastrophic failure'
flags = (
shellcon.SHERB_NOSOUND
| shellcon.SHERB_NOCONFIRMATION
| shellcon.SHERB_NOPROGRESSUI
)
shell.SHEmptyRecycleBin(None, path, flags)
return bytes_used
def get_clipboard_paths():
"""Return a tuple of Unicode pathnames from the clipboard"""
import win32clipboard
win32clipboard.OpenClipboard()
path_list = ()
try:
path_list = win32clipboard.GetClipboardData(win32clipboard.CF_HDROP)
except TypeError:
pass
finally:
win32clipboard.CloseClipboard()
return path_list
def get_fixed_drives():
"""Yield each fixed drive"""
for drive in win32api.GetLogicalDriveStrings().split("\x00"):
if win32file.GetDriveType(drive) == win32file.DRIVE_FIXED:
# Microsoft Office 2010 Starter creates a virtual drive that
# looks much like a fixed disk but isdir() returns false
# and free_space() returns access denied.
# https://bugs.launchpad.net/bleachbit/+bug/1474848
if os.path.isdir(drive):
yield drive
def get_known_folder_path(folder_name):
"""Return the path of a folder by its Folder ID
Requires Windows Vista, Server 2008, or later
Based on the code Michael Kropat (mkropat) from
<https://gist.github.com/mkropat/7550097>
licensed under the GNU GPL"""
import ctypes
from ctypes import wintypes
from uuid import UUID
class GUID(ctypes.Structure):
_fields_ = [
("Data1", wintypes.DWORD),
("Data2", wintypes.WORD),
("Data3", wintypes.WORD),
("Data4", wintypes.BYTE * 8),
]
def __init__(self, uuid_):
ctypes.Structure.__init__(self)
(
self.Data1,
self.Data2,
self.Data3,
self.Data4[0],
self.Data4[1],
rest,
) = uuid_.fields
for i in range(2, 8):
self.Data4[i] = rest >> (8 - i - 1) * 8 & 0xFF
class FOLDERID:
LocalAppDataLow = UUID("{A520A1A4-1780-4FF6-BD18-167343C5AF16}")
Fonts = UUID("{FD228CB7-AE11-4AE3-864C-16F3910AB8FE}")
class UserHandle:
current = wintypes.HANDLE(0)
_CoTaskMemFree = windll.ole32.CoTaskMemFree
_CoTaskMemFree.restype = None
_CoTaskMemFree.argtypes = [ctypes.c_void_p]
try:
_SHGetKnownFolderPath = windll.shell32.SHGetKnownFolderPath
except AttributeError:
# Not supported on Windows XP
return None
_SHGetKnownFolderPath.argtypes = [
ctypes.POINTER(GUID),
wintypes.DWORD,
wintypes.HANDLE,
ctypes.POINTER(ctypes.c_wchar_p),
]
class PathNotFoundException(Exception):
pass
folderid = getattr(FOLDERID, folder_name)
fid = GUID(folderid)
pPath = ctypes.c_wchar_p()
S_OK = 0
if (
_SHGetKnownFolderPath(
ctypes.byref(fid), 0, UserHandle.current, ctypes.byref(pPath)
)
!= S_OK
):
raise PathNotFoundException(folder_name)
path = pPath.value
_CoTaskMemFree(pPath)
return path
def get_recycle_bin():
"""Yield a list of files in the recycle bin"""
pidl = shell.SHGetSpecialFolderLocation(0, shellcon.CSIDL_BITBUCKET)
desktop = shell.SHGetDesktopFolder()
h = desktop.BindToObject(pidl, None, shell.IID_IShellFolder)
for item in h:
path = h.GetDisplayNameOf(item, shellcon.SHGDN_FORPARSING)
if os.path.isdir(path):
# Return the contents of a normal directory, but do
# not recurse Windows symlinks in the Recycle Bin.
yield from FileUtilities.children_in_directory(path, True)
yield path
def get_windows_version():
"""Get the Windows major and minor version in a decimal like 10.0"""
v = win32api.GetVersionEx(0)
vstr = "%d.%d" % (v[0], v[1])
return Decimal(vstr)
def is_junction(path):
"""Check whether the path is a link
On Python 2.7 the function os.path.islink() always returns False,
so this is needed
"""
FILE_ATTRIBUTE_REPARSE_POINT = 0x400
attr = windll.kernel32.GetFileAttributesW(path)
return bool(attr & FILE_ATTRIBUTE_REPARSE_POINT)
def is_process_running(name):
"""Return boolean whether process (like firefox.exe) is running
Works on Windows Vista or later, but on Windows XP gives an ImportError
"""
import psutil
name = name.lower()
for proc in psutil.process_iter():
try:
if proc.name().lower() == name:
return True
except psutil.NoSuchProcess:
pass
return False
def move_to_recycle_bin(path):
"""Move 'path' into recycle bin"""
shell.SHFileOperation(
(
0,
shellcon.FO_DELETE,
path,
None,
shellcon.FOF_ALLOWUNDO | shellcon.FOF_NOCONFIRMATION,
)
)
def parse_windows_build(build=None):
"""
Parse build string like 1.2.3 or 1.2 to numeric,
ignoring the third part, if present.
"""
if not build:
# If not given, default to current system's version
return get_windows_version()
return Decimal(".".join(build.split(".")[0:2]))
def path_on_network(path):
"""Check whether 'path' is on a network drive"""
drive = os.path.splitdrive(path)[0]
if drive.startswith(r"\\"):
return True
return win32file.GetDriveType(drive) == win32file.DRIVE_REMOTE
def shell_change_notify():
"""Notify the Windows shell of update.
Used in windows_explorer.xml."""
shell.SHChangeNotify(shellcon.SHCNE_ASSOCCHANGED, shellcon.SHCNF_IDLIST, None, None)
return 0
def set_environ(varname, path):
"""Define an environment variable for use in CleanerML and Winapp2.ini"""
if not path:
return
if varname in os.environ:
# logger.debug('set_environ(%s, %s): skipping because environment variable is already defined', varname, path)
if "nt" == os.name:
os.environ[varname] = os.path.expandvars("%%%s%%" % varname)
# Do not redefine the environment variable when it already exists
# But re-encode them with utf-8 instead of mbcs
return
try:
if not os.path.exists(path):
raise RuntimeError(
"Variable %s points to a non-existent path %s" % (varname, path)
)
os.environ[varname] = path
except:
logger.exception(
"set_environ(%s, %s): exception when setting environment variable",
varname,
path,
)
def setup_environment():
"""Define any extra environment variables for use in CleanerML and Winapp2.ini"""
csidl_to_environ("commonappdata", shellcon.CSIDL_COMMON_APPDATA)
csidl_to_environ("documents", shellcon.CSIDL_PERSONAL)
# Windows XP does not define localappdata, but Windows Vista and 7 do
csidl_to_environ("localappdata", shellcon.CSIDL_LOCAL_APPDATA)
csidl_to_environ("music", shellcon.CSIDL_MYMUSIC)
csidl_to_environ("pictures", shellcon.CSIDL_MYPICTURES)
csidl_to_environ("video", shellcon.CSIDL_MYVIDEO)
# LocalLowAppData does not have a CSIDL for use with
# SHGetSpecialFolderPath. Instead, it is identified using
# SHGetKnownFolderPath in Windows Vista and later
try:
path = get_known_folder_path("LocalAppDataLow")
except:
logger.exception("exception identifying LocalAppDataLow")
else:
set_environ("LocalAppDataLow", path)
# %cd% can be helpful for cleaning portable applications when
# BleachBit is portable. It is the same variable name as defined by
# cmd.exe .
set_environ("cd", os.getcwd())
def split_registry_key(full_key):
r"""Given a key like HKLM\Software split into tuple (hive, key).
Used internally."""
assert len(full_key) >= 6
[k1, k2] = full_key.split("\\", 1)
hive_map = {
"HKCR": winreg.HKEY_CLASSES_ROOT,
"HKCU": winreg.HKEY_CURRENT_USER,
"HKLM": winreg.HKEY_LOCAL_MACHINE,
"HKU": winreg.HKEY_USERS,
}
if k1 not in hive_map:
raise RuntimeError("Invalid Windows registry hive '%s'" % k1)
return hive_map[k1], k2
def symlink_or_copy(src, dst):
"""Symlink with fallback to copy
Symlink is faster and uses virtually no storage, but it it requires administrator
privileges or Windows developer mode.
If symlink is not available, just copy the file.
"""
try:
os.symlink(src, dst)
logger.debug("linked %s to %s", src, dst)
except (PermissionError, OSError) as e:
shutil.copy(src, dst)
logger.debug("copied %s to %s", src, dst)
def has_fontconfig_cache(font_conf_file):
dom = xml.dom.minidom.parse(font_conf_file)
fc_element = dom.getElementsByTagName("fontconfig")[0]
cachefile = "d031bbba323fd9e5b47e0ee5a0353f11-le32d8.cache-6"
expanded_localdata = os.path.expandvars("%LOCALAPPDATA%")
expanded_homepath = os.path.join(
os.path.expandvars("%HOMEDRIVE%"), os.path.expandvars("%HOMEPATH%")
)
for dir_element in fc_element.getElementsByTagName("cachedir"):
if dir_element.firstChild.nodeValue == "LOCAL_APPDATA_FONTCONFIG_CACHE":
dirpath = os.path.join(expanded_localdata, "fontconfig", "cache")
elif (
dir_element.firstChild.nodeValue == "fontconfig"
and dir_element.getAttribute("prefix") == "xdg"
):
dirpath = os.path.join(expanded_homepath, ".cache", "fontconfig")
elif dir_element.firstChild.nodeValue == "~/.fontconfig":
dirpath = os.path.join(expanded_homepath, ".fontconfig")
else:
# user has entered a custom directory
dirpath = dir_element.firstChild.nodeValue
if dirpath and os.path.exists(os.path.join(dirpath, cachefile)):
return True
return False
def get_font_conf_file():
"""Return the full path to fonts.conf"""
if hasattr(sys, "frozen"):
# running inside py2exe
return os.path.join(bleachbit.bleachbit_exe_path, "etc", "fonts", "fonts.conf")
import gi
gnome_dir = os.path.join(os.path.dirname(os.path.dirname(gi.__file__)), "gnome")
if not os.path.isdir(gnome_dir):
# BleachBit is running from a stand-alone Python installation.
gnome_dir = os.path.join(sys.exec_prefix, "..", "..")
return os.path.join(gnome_dir, "etc", "fonts", "fonts.conf")
class SplashThread(Thread):
def __init__(
self, group=None, target=None, name=None, args=(), kwargs={}, Verbose=None
):
super().__init__(group, self._show_splash_screen, name, args, kwargs)
self._splash_screen_started = Event()
self._splash_screen_handle = None
self._splash_screen_height = None
self._splash_screen_width = None
def start(self):
Thread.start(self)
self._splash_screen_started.wait()
logger.debug("SplashThread started")
def run(self):
self._splash_screen_handle = self._target()
self._splash_screen_started.set()
# Dispatch messages
win32gui.PumpMessages()
def join(self, *args):
import win32con
import win32gui
win32gui.PostMessage(self._splash_screen_handle, win32con.WM_CLOSE, 0, 0)
Thread.join(self, *args)
def _show_splash_screen(self):
# get instance handle
hInstance = win32api.GetModuleHandle()
# the class name
className = "SimpleWin32"
# create and initialize window class
wndClass = win32gui.WNDCLASS()
wndClass.style = win32con.CS_HREDRAW | win32con.CS_VREDRAW
wndClass.lpfnWndProc = self.wndProc
wndClass.hInstance = hInstance
wndClass.hIcon = win32gui.LoadIcon(0, win32con.IDI_APPLICATION)
wndClass.hCursor = win32gui.LoadCursor(0, win32con.IDC_ARROW)
wndClass.hbrBackground = win32gui.GetStockObject(win32con.WHITE_BRUSH)
wndClass.lpszClassName = className
# register window class
wndClassAtom = None
try:
wndClassAtom = win32gui.RegisterClass(wndClass)
except Exception as e:
raise e
displayWidth = win32api.GetSystemMetrics(0)
displayHeigh = win32api.GetSystemMetrics(1)
self._splash_screen_height = 100
self._splash_screen_width = displayWidth // 4
windowPosX = (displayWidth - self._splash_screen_width) // 2
windowPosY = (displayHeigh - self._splash_screen_height) // 2
hWindow = win32gui.CreateWindow(
wndClassAtom, # it seems message dispatching only works with the atom, not the class name
"Bleachbit splash screen",
win32con.WS_POPUPWINDOW | win32con.WS_VISIBLE,
windowPosX,
windowPosY,
self._splash_screen_width,
self._splash_screen_height,
0,
0,
hInstance,
None,
)
is_splash_screen_on_top = self._force_set_foreground_window(hWindow)
logger.debug("Is splash screen on top: {}".format(is_splash_screen_on_top))
return hWindow
def _force_set_foreground_window(self, hWindow):
# As there are some restrictions about which processes can call SetForegroundWindow as described here:
# https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-setforegroundwindow
# we try consecutively three different ways to show the splash screen on top of all other windows.
# Solution 1: Pressing alt key unlocks SetForegroundWindow
# https://stackoverflow.com/questions/14295337/win32gui-setactivewindow-error-the-specified-procedure-could-not-be-found
# Not using win32com.client.Dispatch like in the link because there are problems when building with py2exe.
ALT_KEY = win32con.VK_MENU
RIGHT_ALT = 0xB8
win32api.keybd_event(ALT_KEY, RIGHT_ALT, 0, 0)
win32api.keybd_event(ALT_KEY, RIGHT_ALT, win32con.KEYEVENTF_KEYUP, 0)
win32gui.ShowWindow(hWindow, win32con.SW_SHOW)
try:
win32gui.SetForegroundWindow(hWindow)
except Exception as e:
exc_message = str(e)
logger.debug(
"Failed attempt to show splash screen with keybd_event: {}".format(
exc_message
)
)
if win32gui.GetForegroundWindow() == hWindow:
return True
# Solution 2: Attaching current thread to the foreground thread in order to use BringWindowToTop
# https://shlomio.wordpress.com/2012/09/04/solved-setforegroundwindow-win32-api-not-always-works/
(
foreground_thread_id,
foreground_process_id,
) = win32process.GetWindowThreadProcessId(win32gui.GetForegroundWindow())
appThread = win32api.GetCurrentThreadId()
if foreground_thread_id != appThread:
try:
win32process.AttachThreadInput(foreground_thread_id, appThread, True)
win32gui.BringWindowToTop(hWindow)
win32gui.ShowWindow(hWindow, win32con.SW_SHOW)
win32process.AttachThreadInput(foreground_thread_id, appThread, False)
except Exception as e:
exc_message = str(e)
logger.debug(
"Failed attempt to show splash screen with AttachThreadInput: {}".format(
exc_message
)
)
else:
win32gui.BringWindowToTop(hWindow)
win32gui.ShowWindow(hWindow, win32con.SW_SHOW)
if win32gui.GetForegroundWindow() == hWindow:
return True
# Solution 3: Working with timers that lock/unlock SetForegroundWindow
# https://gist.github.com/EBNull/1419093
try:
timeout = win32gui.SystemParametersInfo(
win32con.SPI_GETFOREGROUNDLOCKTIMEOUT
)
win32gui.SystemParametersInfo(
win32con.SPI_SETFOREGROUNDLOCKTIMEOUT, 0, win32con.SPIF_SENDCHANGE
)
win32gui.BringWindowToTop(hWindow)
win32gui.SetForegroundWindow(hWindow)
win32gui.SystemParametersInfo(
win32con.SPI_SETFOREGROUNDLOCKTIMEOUT, timeout, win32con.SPIF_SENDCHANGE
)
except Exception as e:
exc_message = str(e)
logger.debug(
"Failed attempt to show splash screen with SystemParametersInfo: {}".format(
exc_message
)
)
if win32gui.GetForegroundWindow() == hWindow:
return True
# Solution 4: If on some machines the splash screen still doesn't come on top, we can try
# the following solution that combines attaching to a thread and timers:
# https://www.codeproject.com/Tips/76427/How-to-Bring-Window-to-Top-with-SetForegroundWindo
return False
def wndProc(self, hWnd, message, wParam, lParam):
if message == win32con.WM_PAINT:
hDC, paintStruct = win32gui.BeginPaint(hWnd)
folder_with_ico_file = "share" if hasattr(sys, "frozen") else "windows"
filename = os.path.join(
os.path.dirname(sys.argv[0]), folder_with_ico_file, "bleachbit.ico"
)
flags = win32con.LR_LOADFROMFILE
hIcon = win32gui.LoadImage(0, filename, win32con.IMAGE_ICON, 0, 0, flags)
# Default icon size seems to be 32 pixels so we center the icon vertically.
default_icon_size = 32
icon_top_margin = self._splash_screen_height - 2 * (default_icon_size + 2)
win32gui.DrawIcon(hDC, 0, icon_top_margin, hIcon)
# win32gui.DrawIconEx(hDC, 0, 0, hIcon, 64, 64, 0, 0, win32con.DI_NORMAL)
rect = win32gui.GetClientRect(hWnd)
textmetrics = win32gui.GetTextMetrics(hDC)
text_left_margin = 2 * default_icon_size
text_rect = (
text_left_margin,
(rect[3] - textmetrics["Height"]) // 2,
rect[2],
rect[3],
)
win32gui.DrawText(
hDC,
_("BleachBit is starting...\n"),
-1,
text_rect,
win32con.DT_WORDBREAK,
)
win32gui.EndPaint(hWnd, paintStruct)
return 0
elif message == win32con.WM_DESTROY:
win32gui.PostQuitMessage(0)
return 0
else:
return win32gui.DefWindowProc(hWnd, message, wParam, lParam)
splash_thread = SplashThread()
|
design | fir_design | # Copyright 2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
import scipy
from gnuradio import fft, filter
from PyQt5 import QtGui, QtWidgets
# Filter design functions using a window
def design_win_lpf(fs, gain, wintype, mainwin):
ret = True
pb, r = getfloat(mainwin.gui.endofLpfPassBandEdit.text())
ret = r and ret
sb, r = getfloat(mainwin.gui.startofLpfStopBandEdit.text())
ret = r and ret
atten, r = getfloat(mainwin.gui.lpfStopBandAttenEdit.text())
ret = r and ret
if ret:
tb = sb - pb
try:
taps = filter.firdes.low_pass_2(gain, fs, pb, tb, atten, wintype)
except (RuntimeError, IndexError) as e:
reply = QtWidgets.QMessageBox.information(
mainwin, "Runtime Error", e.args[0], QtWidgets.QMessageBox.Ok
)
return ([], [], ret)
else:
params = {
"fs": fs,
"gain": gain,
"wintype": wintype,
"filttype": "lpf",
"pbend": pb,
"sbstart": sb,
"atten": atten,
"ntaps": len(taps),
}
return (taps, params, ret)
else:
return ([], [], ret)
def design_win_bpf(fs, gain, wintype, mainwin):
ret = True
pb1, r = getfloat(mainwin.gui.startofBpfPassBandEdit.text())
ret = r and ret
pb2, r = getfloat(mainwin.gui.endofBpfPassBandEdit.text())
ret = r and ret
tb, r = getfloat(mainwin.gui.bpfTransitionEdit.text())
ret = r and ret
atten, r = getfloat(mainwin.gui.bpfStopBandAttenEdit.text())
ret = r and ret
if ret:
try:
taps = filter.firdes.band_pass_2(gain, fs, pb1, pb2, tb, atten, wintype)
except RuntimeError as e:
reply = QtWidgets.QMessageBox.information(
mainwin, "Runtime Error", e.args[0], QtWidgets.QMessageBox.Ok
)
return ([], [], ret)
else:
params = {
"fs": fs,
"gain": gain,
"wintype": wintype,
"filttype": "bpf",
"pbstart": pb1,
"pbend": pb2,
"tb": tb,
"atten": atten,
"ntaps": len(taps),
}
return (taps, params, r)
else:
return ([], [], ret)
def design_win_cbpf(fs, gain, wintype, mainwin):
ret = True
pb1, r = getfloat(mainwin.gui.startofBpfPassBandEdit.text())
ret = r and ret
pb2, r = getfloat(mainwin.gui.endofBpfPassBandEdit.text())
ret = r and ret
tb, r = getfloat(mainwin.gui.bpfTransitionEdit.text())
ret = r and ret
atten, r = getfloat(mainwin.gui.bpfStopBandAttenEdit.text())
ret = r and ret
if ret:
try:
taps = filter.firdes.complex_band_pass_2(
gain, fs, pb1, pb2, tb, atten, wintype
)
except RuntimeError as e:
reply = QtWidgets.QMessageBox.information(
mainwin, "Runtime Error", e.args[0], QtWidgets.QMessageBox.Ok
)
return ([], [], ret)
else:
params = {
"fs": fs,
"gain": gain,
"wintype": wintype,
"filttype": "cbpf",
"pbstart": pb1,
"pbend": pb2,
"tb": tb,
"atten": atten,
"ntaps": len(taps),
}
return (taps, params, r)
else:
return ([], [], ret)
def design_win_bnf(fs, gain, wintype, mainwin):
ret = True
pb1, r = getfloat(mainwin.gui.startofBnfStopBandEdit.text())
ret = r and ret
pb2, r = getfloat(mainwin.gui.endofBnfStopBandEdit.text())
ret = r and ret
tb, r = getfloat(mainwin.gui.bnfTransitionEdit.text())
ret = r and ret
atten, r = getfloat(mainwin.gui.bnfStopBandAttenEdit.text())
ret = r and ret
if ret:
try:
taps = filter.firdes.band_reject_2(gain, fs, pb1, pb2, tb, atten, wintype)
except RuntimeError as e:
reply = QtWidgets.QMessageBox.information(
mainwin, "Runtime Error", e.args[0], QtWidgets.QMessageBox.Ok
)
return ([], [], ret)
else:
params = {
"fs": fs,
"gain": gain,
"wintype": wintype,
"filttype": "bnf",
"sbstart": pb1,
"sbend": pb2,
"tb": tb,
"atten": atten,
"ntaps": len(taps),
}
return (taps, params, r)
else:
return ([], [], ret)
def design_win_hpf(fs, gain, wintype, mainwin):
ret = True
sb, r = getfloat(mainwin.gui.endofHpfStopBandEdit.text())
ret = r and ret
pb, r = getfloat(mainwin.gui.startofHpfPassBandEdit.text())
ret = r and ret
atten, r = getfloat(mainwin.gui.hpfStopBandAttenEdit.text())
ret = r and ret
if ret:
tb = pb - sb
try:
taps = filter.firdes.high_pass_2(gain, fs, pb, tb, atten, wintype)
except RuntimeError as e:
reply = QtWidgets.QMessageBox.information(
mainwin, "Runtime Error", e.args[0], QtWidgets.QMessageBox.Ok
)
else:
params = {
"fs": fs,
"gain": gain,
"wintype": wintype,
"filttype": "hpf",
"sbend": sb,
"pbstart": pb,
"atten": atten,
"ntaps": len(taps),
}
return (taps, params, ret)
else:
return ([], [], ret)
def design_win_hb(fs, gain, wintype, mainwin):
ret = True
filtord, r = getfloat(mainwin.gui.firhbordEdit.text())
ret = r and ret
trwidth, r = getfloat(mainwin.gui.firhbtrEdit.text())
ret = r and ret
filtwin = {
fft.window.WIN_HAMMING: "hamming",
fft.window.WIN_HANN: "hanning",
fft.window.WIN_BLACKMAN: "blackman",
fft.window.WIN_RECTANGULAR: "boxcar",
fft.window.WIN_KAISER: ("kaiser", 4.0),
fft.window.WIN_BLACKMAN_hARRIS: "blackmanharris",
}
if int(filtord) & 1:
reply = QtWidgets.QMessageBox.information(
mainwin,
"Filter order should be even",
"Filter order should be even",
QtWidgets.QMessageBox.Ok,
)
return ([], [], False)
if ret:
taps = scipy.signal.firwin(int(filtord) + 1, 0.5, window=filtwin[wintype])
taps[abs(taps) <= 1e-6] = 0.0
params = {
"fs": fs,
"gain": gain,
"wintype": wintype,
"filttype": "hb",
"ntaps": len(taps),
}
return (taps, params, ret)
else:
return ([], [], ret)
def design_win_rrc(fs, gain, wintype, mainwin):
ret = True
sr, r = getfloat(mainwin.gui.rrcSymbolRateEdit.text())
ret = r and ret
alpha, r = getfloat(mainwin.gui.rrcAlphaEdit.text())
ret = r and ret
ntaps, r = getint(mainwin.gui.rrcNumTapsEdit.text())
ret = r and ret
if ret:
try:
taps = filter.firdes.root_raised_cosine(gain, fs, sr, alpha, ntaps)
except RuntimeError as e:
reply = QtWidgets.QMessageBox.information(
mainwin, "Runtime Error", e.args[0], QtWidgets.QMessageBox.Ok
)
else:
params = {
"fs": fs,
"gain": gain,
"wintype": wintype,
"filttype": "rrc",
"srate": sr,
"alpha": alpha,
"ntaps": ntaps,
}
return (taps, params, ret)
else:
return ([], [], ret)
def design_win_gaus(fs, gain, wintype, mainwin):
ret = True
sr, r = getfloat(mainwin.gui.gausSymbolRateEdit.text())
ret = r and ret
bt, r = getfloat(mainwin.gui.gausBTEdit.text())
ret = r and ret
ntaps, r = getint(mainwin.gui.gausNumTapsEdit.text())
ret = r and ret
if ret:
spb = fs / sr
try:
taps = filter.firdes.gaussian(gain, spb, bt, ntaps)
except RuntimeError as e:
reply = QtWidgets.QMessageBox.information(
mainwin, "Runtime Error", e.args[0], QtWidgets.QMessageBox.Ok
)
else:
params = {
"fs": fs,
"gain": gain,
"wintype": wintype,
"filttype": "gaus",
"srate": sr,
"bt": bt,
"ntaps": ntaps,
}
return (taps, params, ret)
else:
return ([], [], ret)
# Design Functions for Equiripple Filters
def design_opt_lpf(fs, gain, mainwin):
ret = True
pb, r = getfloat(mainwin.gui.endofLpfPassBandEdit.text())
ret = r and ret
sb, r = getfloat(mainwin.gui.startofLpfStopBandEdit.text())
ret = r and ret
atten, r = getfloat(mainwin.gui.lpfStopBandAttenEdit.text())
ret = r and ret
ripple, r = getfloat(mainwin.gui.lpfPassBandRippleEdit.text())
ret = r and ret
if ret:
try:
taps = filter.optfir.low_pass(gain, fs, pb, sb, ripple, atten)
except ValueError as e:
reply = QtWidgets.QMessageBox.information(
mainwin,
"Invalid filter parameters",
e.args[0],
QtWidgets.QMessageBox.Ok,
)
return ([], [], False)
except RuntimeError as e:
reply = QtWidgets.QMessageBox.information(
mainwin, "Filter did not converge", e.args[0], QtWidgets.QMessageBox.Ok
)
return ([], [], False)
else:
params = {
"fs": fs,
"gain": gain,
"wintype": mainwin.EQUIRIPPLE_FILT,
"filttype": "lpf",
"pbend": pb,
"sbstart": sb,
"atten": atten,
"ripple": ripple,
"ntaps": len(taps),
}
return (taps, params, ret)
else:
return ([], [], ret)
def design_opt_bpf(fs, gain, mainwin):
ret = True
pb1, r = getfloat(mainwin.gui.startofBpfPassBandEdit.text())
ret = r and ret
pb2, r = getfloat(mainwin.gui.endofBpfPassBandEdit.text())
ret = r and ret
tb, r = getfloat(mainwin.gui.bpfTransitionEdit.text())
ret = r and ret
atten, r = getfloat(mainwin.gui.bpfStopBandAttenEdit.text())
ret = r and ret
ripple, r = getfloat(mainwin.gui.bpfPassBandRippleEdit.text())
ret = r and ret
if r:
sb1 = pb1 - tb
sb2 = pb2 + tb
try:
taps = filter.optfir.band_pass(gain, fs, sb1, pb1, pb2, sb2, ripple, atten)
except ValueError as e:
reply = QtWidgets.QMessageBox.information(
mainwin,
"Invalid filter parameters",
e.args[0],
QtWidgets.QMessageBox.Ok,
)
return ([], [], False)
except RuntimeError as e:
reply = QtWidgets.QMessageBox.information(
mainwin, "Filter did not converge", e.args[0], QtWidgets.QMessageBox.Ok
)
return ([], [], False)
else:
params = {
"fs": fs,
"gain": gain,
"wintype": mainwin.EQUIRIPPLE_FILT,
"filttype": "bpf",
"pbstart": pb1,
"pbend": pb2,
"tb": tb,
"atten": atten,
"ripple": ripple,
"ntaps": len(taps),
}
return (taps, params, r)
else:
return ([], [], r)
def design_opt_cbpf(fs, gain, mainwin):
ret = True
pb1, r = getfloat(mainwin.gui.startofBpfPassBandEdit.text())
ret = r and ret
pb2, r = getfloat(mainwin.gui.endofBpfPassBandEdit.text())
ret = r and ret
tb, r = getfloat(mainwin.gui.bpfTransitionEdit.text())
ret = r and ret
atten, r = getfloat(mainwin.gui.bpfStopBandAttenEdit.text())
ret = r and ret
ripple, r = getfloat(mainwin.gui.bpfPassBandRippleEdit.text())
ret = r and ret
if r:
sb1 = pb1 - tb
sb2 = pb2 + tb
try:
taps = filter.optfir.complex_band_pass(
gain, fs, sb1, pb1, pb2, sb2, ripple, atten
)
except ValueError as e:
reply = QtWidgets.QMessageBox.information(
mainwin,
"Invalid filter parameters",
e.args[0],
QtWidgets.QMessageBox.Ok,
)
return ([], [], False)
except RuntimeError as e:
reply = QtWidgets.QMessageBox.information(
mainwin, "Filter did not converge", e.args[0], QtWidgets.QMessageBox.Ok
)
return ([], [], False)
else:
params = {
"fs": fs,
"gain": gain,
"wintype": mainwin.EQUIRIPPLE_FILT,
"filttype": "cbpf",
"pbstart": pb1,
"pbend": pb2,
"tb": tb,
"atten": atten,
"ripple": ripple,
"ntaps": len(taps),
}
return (taps, params, r)
else:
return ([], [], r)
def design_opt_bnf(fs, gain, mainwin):
ret = True
sb1, r = getfloat(mainwin.gui.startofBnfStopBandEdit.text())
ret = r and ret
sb2, r = getfloat(mainwin.gui.endofBnfStopBandEdit.text())
ret = r and ret
tb, r = getfloat(mainwin.gui.bnfTransitionEdit.text())
ret = r and ret
atten, r = getfloat(mainwin.gui.bnfStopBandAttenEdit.text())
ret = r and ret
ripple, r = getfloat(mainwin.gui.bnfPassBandRippleEdit.text())
ret = r and ret
if ret:
pb1 = sb1 - tb
pb2 = sb2 + tb
try:
taps = filter.optfir.band_reject(
gain, fs, pb1, sb1, sb2, pb2, ripple, atten
)
except ValueError as e:
reply = QtWidgets.QMessageBox.information(
mainwin,
"Invalid filter parameters",
e.args[0],
QtWidgets.QMessageBox.Ok,
)
return ([], [], False)
except RuntimeError as e:
reply = QtWidgets.QMessageBox.information(
mainwin, "Filter did not converge", e.args[0], QtWidgets.QMessageBox.Ok
)
return ([], [], False)
else:
params = {
"fs": fs,
"gain": gain,
"wintype": mainwin.EQUIRIPPLE_FILT,
"filttype": "bnf",
"sbstart": pb1,
"sbend": pb2,
"tb": tb,
"atten": atten,
"ripple": ripple,
"ntaps": len(taps),
}
return (taps, params, ret)
else:
return ([], [], ret)
def design_opt_hb(fs, gain, mainwin):
ret = True
filtord, r = getfloat(mainwin.gui.firhbordEdit.text())
ret = r and ret
trwidth, r = getfloat(mainwin.gui.firhbtrEdit.text())
ret = r and ret
if int(filtord) & 1:
reply = QtWidgets.QMessageBox.information(
mainwin,
"Filter order should be even",
"Filter order should be even",
QtWidgets.QMessageBox.Ok,
)
return ([], [], False)
if ret:
try:
bands = [0, 0.25 - (trwidth / fs), 0.25 + (trwidth / fs), 0.5]
taps = scipy.signal.remez(int(filtord) + 1, bands, [1, 0], [1, 1])
taps[abs(taps) <= 1e-6] = 0.0
except RuntimeError as e:
reply = QtWidgets.QMessageBox.information(
mainwin, "Filter Design Error", e.args[0], QtWidgets.QMessageBox.Ok
)
return ([], [], False)
else:
params = {
"fs": fs,
"gain": gain,
"wintype": mainwin.EQUIRIPPLE_FILT,
"filttype": "hb",
"ntaps": len(taps),
}
return (taps, params, ret)
else:
return ([], [], ret)
def design_opt_hpf(fs, gain, mainwin):
ret = True
sb, r = getfloat(mainwin.gui.endofHpfStopBandEdit.text())
ret = r and ret
pb, r = getfloat(mainwin.gui.startofHpfPassBandEdit.text())
ret = r and ret
atten, r = getfloat(mainwin.gui.hpfStopBandAttenEdit.text())
ret = r and ret
ripple, r = getfloat(mainwin.gui.hpfPassBandRippleEdit.text())
ret = r and ret
if ret:
try:
taps = filter.optfir.high_pass(gain, fs, sb, pb, atten, ripple)
except ValueError as e:
reply = QtWidgets.QMessageBox.information(
mainwin,
"Invalid filter parameters",
e.args[0],
QtWidgets.QMessageBox.Ok,
)
return ([], [], False)
except RuntimeError as e:
reply = QtWidgets.QMessageBox.information(
mainwin, "Filter did not converge", e.args[0], QtWidgets.QMessageBox.Ok
)
return ([], [], False)
else:
params = {
"fs": fs,
"gain": gain,
"wintype": mainwin.EQUIRIPPLE_FILT,
"filttype": "hpf",
"sbend": sb,
"pbstart": pb,
"atten": atten,
"ripple": ripple,
"ntaps": len(taps),
}
return (taps, params, ret)
else:
return ([], [], ret)
def getint(value):
try:
return (int(value), True)
except ValueError:
return (0, False)
def getfloat(value):
try:
return (float(value), True)
except ValueError:
return ("NaN", False)
|
writer2 | indexer | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import absolute_import, division, print_function, unicode_literals
# from future_builtins import filter, map
__license__ = "GPL v3"
__copyright__ = "2011, Kovid Goyal <kovid@kovidgoyal.net>"
__docformat__ = "restructuredtext en"
from collections import OrderedDict, defaultdict
from struct import pack
from calibre.ebooks.mobi.utils import CNCX as CNCX_
from calibre.ebooks.mobi.utils import (
RECORD_SIZE,
align_block,
encint,
encode_number_as_hex,
encode_tbs,
)
from cStringIO import StringIO
class CNCX(CNCX_): # {{{
def __init__(self, toc, is_periodical):
strings = []
for item in toc.iterdescendants(breadth_first=True):
strings.append(item.title)
if is_periodical:
strings.append(item.klass)
if item.author:
strings.append(item.author)
if item.description:
strings.append(item.description)
CNCX_.__init__(self, strings)
# }}}
class TAGX(object): # {{{
BITMASKS = {11: 0b1}
BITMASKS.update({x: (1 << i) for i, x in enumerate([1, 2, 3, 4, 5, 21, 22, 23])})
BITMASKS.update({x: (1 << i) for i, x in enumerate([69, 70, 71, 72, 73])})
NUM_VALUES = defaultdict(lambda: 1)
NUM_VALUES[11] = 3
NUM_VALUES[0] = 0
def __init__(self):
self.byts = bytearray()
def add_tag(self, tag):
buf = self.byts
buf.append(tag)
buf.append(self.NUM_VALUES[tag])
# bitmask
buf.append(self.BITMASKS[tag] if tag else 0)
# eof
buf.append(0 if tag else 1)
def header(self, control_byte_count):
header = b"TAGX"
# table length, control byte count
header += pack(b">II", 12 + len(self.byts), control_byte_count)
return header
@property
def periodical(self):
"""
TAGX block for the Primary index header of a periodical
"""
map(self.add_tag, (1, 2, 3, 4, 5, 21, 22, 23, 0, 69, 70, 71, 72, 73, 0))
return self.header(2) + bytes(self.byts)
@property
def secondary(self):
"""
TAGX block for the secondary index header of a periodical
"""
map(self.add_tag, (11, 0))
return self.header(1) + bytes(self.byts)
@property
def flat_book(self):
"""
TAGX block for the primary index header of a flat book
"""
map(self.add_tag, (1, 2, 3, 4, 0))
return self.header(1) + bytes(self.byts)
# }}}
# Index Entries {{{
class IndexEntry(object):
TAG_VALUES = {
"offset": 1,
"size": 2,
"label_offset": 3,
"depth": 4,
"class_offset": 5,
"secondary": 11,
"parent_index": 21,
"first_child_index": 22,
"last_child_index": 23,
"image_index": 69,
"desc_offset": 70,
"author_offset": 71,
}
RTAG_MAP = {v: k for k, v in TAG_VALUES.iteritems()}
def __init__(self, offset, label_offset):
self.offset, self.label_offset = offset, label_offset
self.depth, self.class_offset = 0, None
self.control_byte_count = 1
self.length = 0
self.index = 0
self.parent_index = None
self.first_child_index = None
self.last_child_index = None
self.image_index = None
self.author_offset = None
self.desc_offset = None
def __repr__(self):
return (
"IndexEntry(offset=%r, depth=%r, length=%r, index=%r," " parent_index=%r)"
) % (self.offset, self.depth, self.length, self.index, self.parent_index)
@dynamic_property
def size(self):
def fget(self):
return self.length
def fset(self, val):
self.length = val
return property(fget=fget, fset=fset, doc="Alias for length")
@property
def next_offset(self):
return self.offset + self.length
@property
def tag_nums(self):
for i in range(1, 5):
yield i
for attr in (
"class_offset",
"parent_index",
"first_child_index",
"last_child_index",
):
if getattr(self, attr) is not None:
yield self.TAG_VALUES[attr]
@property
def entry_type(self):
ans = 0
for tag in self.tag_nums:
ans |= TAGX.BITMASKS[tag]
return ans
def attr_for_tag(self, tag):
return self.RTAG_MAP[tag]
@property
def bytestring(self):
buf = StringIO()
if isinstance(self.index, int):
buf.write(encode_number_as_hex(self.index))
else:
raw = bytearray(self.index.encode("ascii"))
raw.insert(0, len(raw))
buf.write(bytes(raw))
et = self.entry_type
buf.write(bytes(bytearray([et])))
if self.control_byte_count == 2:
flags = 0
for attr in ("image_index", "desc_offset", "author_offset"):
val = getattr(self, attr)
if val is not None:
tag = self.TAG_VALUES[attr]
bm = TAGX.BITMASKS[tag]
flags |= bm
buf.write(bytes(bytearray([flags])))
for tag in self.tag_nums:
attr = self.attr_for_tag(tag)
val = getattr(self, attr)
if isinstance(val, int):
val = [val]
for x in val:
buf.write(encint(x))
if self.control_byte_count == 2:
for attr in ("image_index", "desc_offset", "author_offset"):
val = getattr(self, attr)
if val is not None:
buf.write(encint(val))
ans = buf.getvalue()
return ans
class PeriodicalIndexEntry(IndexEntry):
def __init__(self, offset, label_offset, class_offset, depth):
IndexEntry.__init__(self, offset, label_offset)
self.depth = depth
self.class_offset = class_offset
self.control_byte_count = 2
class SecondaryIndexEntry(IndexEntry):
INDEX_MAP = {
"author": 73,
"caption": 72,
"credit": 71,
"description": 70,
"mastheadImage": 69,
}
def __init__(self, index):
IndexEntry.__init__(self, 0, 0)
self.index = index
tag = self.INDEX_MAP[index]
# The values for this index entry
# I dont know what the 5 means, it is not the number of entries
self.secondary = [5 if tag == min(self.INDEX_MAP.itervalues()) else 0, 0, tag]
@property
def tag_nums(self):
yield 11
@property
def entry_type(self):
return 1
@classmethod
def entries(cls):
rmap = {v: k for k, v in cls.INDEX_MAP.iteritems()}
for tag in sorted(rmap, reverse=True):
yield cls(rmap[tag])
# }}}
class TBS(object): # {{{
"""
Take the list of index nodes starting/ending on a record and calculate the
trailing byte sequence for the record.
"""
def __init__(
self, data, is_periodical, first=False, section_map={}, after_first=False
):
self.section_map = section_map
if is_periodical:
# The starting bytes.
# The value is zero which I think indicates the periodical
# index entry. The values for the various flags seem to be
# unused. If the 0b100 is present, it means that the record
# deals with section 1 (or is the final record with section
# transitions).
self.type_010 = encode_tbs(0, {0b010: 0}, flag_size=3)
self.type_011 = encode_tbs(0, {0b010: 0, 0b001: 0}, flag_size=3)
self.type_110 = encode_tbs(0, {0b100: 2, 0b010: 0}, flag_size=3)
self.type_111 = encode_tbs(0, {0b100: 2, 0b010: 0, 0b001: 0}, flag_size=3)
if not data:
byts = b""
if after_first:
# This can happen if a record contains only text between
# the periodical start and the first section
byts = self.type_011
self.bytestring = byts
else:
depth_map = defaultdict(list)
for x in ("starts", "ends", "completes"):
for idx in data[x]:
depth_map[idx.depth].append(idx)
for l in depth_map.itervalues():
l.sort(key=lambda x: x.offset)
self.periodical_tbs(data, first, depth_map)
else:
if not data:
self.bytestring = b""
else:
self.book_tbs(data, first)
def periodical_tbs(self, data, first, depth_map):
buf = StringIO()
has_section_start = depth_map[1] and set(depth_map[1]).intersection(
set(data["starts"])
)
spanner = data["spans"]
parent_section_index = -1
if depth_map[0]:
# We have a terminal record
# Find the first non periodical node
first_node = None
for nodes in (depth_map[1], depth_map[2]):
for node in nodes:
if first_node is None or (node.offset, node.depth) < (
first_node.offset,
first_node.depth,
):
first_node = node
typ = self.type_110 if has_section_start else self.type_010
# parent_section_index is needed for the last record
if first_node is not None and first_node.depth > 0:
parent_section_index = (
first_node.index
if first_node.depth == 1
else first_node.parent_index
)
else:
parent_section_index = max(self.section_map.iterkeys())
else:
# Non terminal record
if spanner is not None:
# record is spanned by a single article
parent_section_index = spanner.parent_index
typ = self.type_110 if parent_section_index == 1 else self.type_010
elif not depth_map[1]:
# has only article nodes, i.e. spanned by a section
parent_section_index = depth_map[2][0].parent_index
typ = self.type_111 if parent_section_index == 1 else self.type_010
else:
# has section transitions
if depth_map[2]:
parent_section_index = depth_map[2][0].parent_index
else:
parent_section_index = depth_map[1][0].index
typ = self.type_011
buf.write(typ)
if typ not in (self.type_110, self.type_111) and parent_section_index > 0:
extra = {}
# Write starting section information
if spanner is None:
num_articles = len(
[a for a in depth_map[1] if a.parent_index == parent_section_index]
)
if not depth_map[1]:
extra = {0b0001: 0}
if num_articles > 1:
extra = {0b0100: num_articles}
buf.write(encode_tbs(parent_section_index, extra))
if spanner is None:
articles = depth_map[2]
sections = set([self.section_map[a.parent_index] for a in articles])
sections = sorted(sections, key=lambda x: x.offset)
section_map = {
s: [a for a in articles if a.parent_index == s.index] for s in sections
}
for i, section in enumerate(sections):
# All the articles in this record that belong to section
articles = section_map[section]
first_article = articles[0]
last_article = articles[-1]
num = len(articles)
last_article_ends = (
last_article in data["ends"] or last_article in data["completes"]
)
try:
next_sec = sections[i + 1]
except:
next_sec = None
extra = {}
if num > 1:
extra[0b0100] = num
if False and i == 0 and next_sec is not None:
# Write offset to next section from start of record
# I can't figure out exactly when Kindlegen decides to
# write this so I have disabled it for now.
extra[0b0001] = next_sec.offset - data["offset"]
buf.write(encode_tbs(first_article.index - section.index, extra))
if next_sec is not None:
buf.write(
encode_tbs(last_article.index - next_sec.index, {0b1000: 0})
)
# If a section TOC starts and extends into the next record add
# a trailing vwi. We detect this by TBS type==3, processing last
# section present in the record, and the last article in that
# section either ends or completes and doesn't finish
# on the last byte of the record.
elif (
typ == self.type_011
and last_article_ends
and ((last_article.offset + last_article.size) % RECORD_SIZE > 0)
):
buf.write(
encode_tbs(last_article.index - section.index - 1, {0b1000: 0})
)
else:
buf.write(encode_tbs(spanner.index - parent_section_index, {0b0001: 0}))
self.bytestring = buf.getvalue()
def book_tbs(self, data, first):
spanner = data["spans"]
if spanner is not None:
self.bytestring = encode_tbs(
spanner.index, {0b010: 0, 0b001: 0}, flag_size=3
)
else:
starts, completes, ends = (data["starts"], data["completes"], data["ends"])
if not completes and (
(len(starts) == 1 and not ends) or (len(ends) == 1 and not starts)
):
node = starts[0] if starts else ends[0]
self.bytestring = encode_tbs(node.index, {0b010: 0}, flag_size=3)
else:
nodes = []
for x in (starts, completes, ends):
nodes.extend(x)
nodes.sort(key=lambda x: x.index)
self.bytestring = encode_tbs(
nodes[0].index, {0b010: 0, 0b100: len(nodes)}, flag_size=3
)
# }}}
class Indexer(object): # {{{
def __init__(
self,
serializer,
number_of_text_records,
size_of_last_text_record,
masthead_offset,
is_periodical,
opts,
oeb,
):
self.serializer = serializer
self.number_of_text_records = number_of_text_records
self.text_size = (
RECORD_SIZE * (self.number_of_text_records - 1) + size_of_last_text_record
)
self.masthead_offset = masthead_offset
self.secondary_record_offset = None
self.oeb = oeb
self.log = oeb.log
self.opts = opts
self.is_periodical = is_periodical
if self.is_periodical and self.masthead_offset is None:
raise ValueError("Periodicals must have a masthead")
self.log.info(
"Generating MOBI index for a %s"
% ("periodical" if self.is_periodical else "book")
)
self.is_flat_periodical = False
if self.is_periodical:
periodical_node = iter(oeb.toc).next()
sections = tuple(periodical_node)
self.is_flat_periodical = len(sections) == 1
self.records = []
if self.is_periodical:
# Ensure all articles have an author and description before
# creating the CNCX
for node in oeb.toc.iterdescendants():
if node.klass == "article":
aut, desc = node.author, node.description
if not aut:
aut = _("Unknown")
if not desc:
desc = _("No details available")
node.author, node.description = aut, desc
self.cncx = CNCX(oeb.toc, self.is_periodical)
if self.is_periodical:
self.indices = self.create_periodical_index()
else:
self.indices = self.create_book_index()
if not self.indices:
raise ValueError("No valid entries in TOC, cannot generate index")
self.records.append(self.create_index_record())
self.records.insert(0, self.create_header())
self.records.extend(self.cncx.records)
if is_periodical:
self.secondary_record_offset = len(self.records)
self.records.append(self.create_header(secondary=True))
self.records.append(self.create_index_record(secondary=True))
self.calculate_trailing_byte_sequences()
def create_index_record(self, secondary=False): # {{{
header_length = 192
buf = StringIO()
indices = list(SecondaryIndexEntry.entries()) if secondary else self.indices
# Write index entries
offsets = []
for i in indices:
offsets.append(buf.tell())
buf.write(i.bytestring)
index_block = align_block(buf.getvalue())
# Write offsets to index entries as an IDXT block
idxt_block = b"IDXT"
buf.seek(0), buf.truncate(0)
for offset in offsets:
buf.write(pack(b">H", header_length + offset))
idxt_block = align_block(idxt_block + buf.getvalue())
body = index_block + idxt_block
header = b"INDX"
buf.seek(0), buf.truncate(0)
buf.write(pack(b">I", header_length))
buf.write(b"\0" * 4) # Unknown
buf.write(pack(b">I", 1)) # Header type? Or index record number?
buf.write(b"\0" * 4) # Unknown
# IDXT block offset
buf.write(pack(b">I", header_length + len(index_block)))
# Number of index entries
buf.write(pack(b">I", len(offsets)))
# Unknown
buf.write(b"\xff" * 8)
# Unknown
buf.write(b"\0" * 156)
header += buf.getvalue()
ans = header + body
if len(ans) > 0x10000:
raise ValueError("Too many entries (%d) in the TOC" % len(offsets))
return ans
# }}}
def create_header(self, secondary=False): # {{{
buf = StringIO()
if secondary:
tagx_block = TAGX().secondary
else:
tagx_block = TAGX().periodical if self.is_periodical else TAGX().flat_book
header_length = 192
# Ident 0 - 4
buf.write(b"INDX")
# Header length 4 - 8
buf.write(pack(b">I", header_length))
# Unknown 8-16
buf.write(b"\0" * 8)
# Index type: 0 - normal, 2 - inflection 16 - 20
buf.write(pack(b">I", 2))
# IDXT offset 20-24
buf.write(pack(b">I", 0)) # Filled in later
# Number of index records 24-28
buf.write(pack(b">I", 1 if secondary else len(self.records)))
# Index Encoding 28-32
buf.write(pack(b">I", 65001)) # utf-8
# Unknown 32-36
buf.write(b"\xff" * 4)
# Number of index entries 36-40
indices = list(SecondaryIndexEntry.entries()) if secondary else self.indices
buf.write(pack(b">I", len(indices)))
# ORDT offset 40-44
buf.write(pack(b">I", 0))
# LIGT offset 44-48
buf.write(pack(b">I", 0))
# Number of LIGT entries 48-52
buf.write(pack(b">I", 0))
# Number of CNCX records 52-56
buf.write(pack(b">I", 0 if secondary else len(self.cncx.records)))
# Unknown 56-180
buf.write(b"\0" * 124)
# TAGX offset 180-184
buf.write(pack(b">I", header_length))
# Unknown 184-192
buf.write(b"\0" * 8)
# TAGX block
buf.write(tagx_block)
num = len(indices)
# The index of the last entry in the NCX
idx = indices[-1].index
if isinstance(idx, int):
idx = encode_number_as_hex(idx)
else:
idx = idx.encode("ascii")
idx = (bytes(bytearray([len(idx)]))) + idx
buf.write(idx)
# The number of entries in the NCX
buf.write(pack(b">H", num))
# Padding
pad = (4 - (buf.tell() % 4)) % 4
if pad:
buf.write(b"\0" * pad)
idxt_offset = buf.tell()
buf.write(b"IDXT")
buf.write(pack(b">H", header_length + len(tagx_block)))
buf.write(b"\0")
buf.seek(20)
buf.write(pack(b">I", idxt_offset))
return align_block(buf.getvalue())
# }}}
def create_book_index(self): # {{{
indices = []
seen = set()
id_offsets = self.serializer.id_offsets
# Flatten toc so that chapter to chapter jumps work with all sub
# chapter levels as well
for node in self.oeb.toc.iterdescendants():
try:
offset = id_offsets[node.href]
label = self.cncx[node.title]
except:
self.log.warn(
"TOC item %s [%s] not found in document" % (node.title, node.href)
)
continue
if offset in seen:
continue
seen.add(offset)
indices.append(IndexEntry(offset, label))
indices.sort(key=lambda x: x.offset)
# Set lengths
for i, index in enumerate(indices):
try:
next_offset = indices[i + 1].offset
except:
next_offset = self.serializer.body_end_offset
index.length = next_offset - index.offset
# Remove empty indices
indices = [x for x in indices if x.length > 0]
# Reset lengths in case any were removed
for i, index in enumerate(indices):
try:
next_offset = indices[i + 1].offset
except:
next_offset = self.serializer.body_end_offset
index.length = next_offset - index.offset
# Set index values
for index, x in enumerate(indices):
x.index = index
return indices
# }}}
def create_periodical_index(self): # {{{
periodical_node = iter(self.oeb.toc).next()
periodical_node_offset = self.serializer.body_start_offset
periodical_node_size = self.serializer.body_end_offset - periodical_node_offset
normalized_sections = []
id_offsets = self.serializer.id_offsets
periodical = PeriodicalIndexEntry(
periodical_node_offset,
self.cncx[periodical_node.title],
self.cncx[periodical_node.klass],
0,
)
periodical.length = periodical_node_size
periodical.first_child_index = 1
periodical.image_index = self.masthead_offset
seen_sec_offsets = set()
seen_art_offsets = set()
for sec in periodical_node:
normalized_articles = []
try:
offset = id_offsets[sec.href]
label = self.cncx[sec.title]
klass = self.cncx[sec.klass]
except:
continue
if offset in seen_sec_offsets:
continue
seen_sec_offsets.add(offset)
section = PeriodicalIndexEntry(offset, label, klass, 1)
section.parent_index = 0
for art in sec:
try:
offset = id_offsets[art.href]
label = self.cncx[art.title]
klass = self.cncx[art.klass]
except:
continue
if offset in seen_art_offsets:
continue
seen_art_offsets.add(offset)
article = PeriodicalIndexEntry(offset, label, klass, 2)
normalized_articles.append(article)
article.author_offset = self.cncx[art.author]
article.desc_offset = self.cncx[art.description]
if getattr(art, "toc_thumbnail", None) is not None:
try:
ii = self.serializer.images[art.toc_thumbnail] - 1
if ii > -1:
article.image_index = ii
except KeyError:
pass # Image not found in serializer
if normalized_articles:
normalized_articles.sort(key=lambda x: x.offset)
normalized_sections.append((section, normalized_articles))
normalized_sections.sort(key=lambda x: x[0].offset)
# Set lengths
for s, x in enumerate(normalized_sections):
sec, normalized_articles = x
try:
sec.length = normalized_sections[s + 1][0].offset - sec.offset
except:
sec.length = self.serializer.body_end_offset - sec.offset
for i, art in enumerate(normalized_articles):
try:
art.length = normalized_articles[i + 1].offset - art.offset
except:
art.length = sec.offset + sec.length - art.offset
if art.length < 0:
self.log.warn("len of article invalid, set to zero.")
art.length = 0
# Filter
for i, x in list(enumerate(normalized_sections)):
sec, normalized_articles = x
normalized_articles = filter(lambda x: x.length > 0, normalized_articles)
normalized_sections[i] = (sec, normalized_articles)
normalized_sections = filter(
lambda x: x[0].length > 0 and x[1], normalized_sections
)
# Set indices
i = 0
for sec, articles in normalized_sections:
i += 1
sec.index = i
sec.parent_index = 0
for sec, articles in normalized_sections:
for art in articles:
i += 1
art.index = i
art.parent_index = sec.index
for sec, normalized_articles in normalized_sections:
sec.first_child_index = normalized_articles[0].index
sec.last_child_index = normalized_articles[-1].index
# Set lengths again to close up any gaps left by filtering
for s, x in enumerate(normalized_sections):
sec, articles = x
try:
next_offset = normalized_sections[s + 1][0].offset
except:
next_offset = self.serializer.body_end_offset
sec.length = next_offset - sec.offset
for a, art in enumerate(articles):
try:
next_offset = articles[a + 1].offset
except:
next_offset = sec.next_offset
art.length = next_offset - art.offset
# Sanity check
for s, x in enumerate(normalized_sections):
sec, articles = x
try:
next_sec = normalized_sections[s + 1][0]
except:
if (
sec.length == 0
or sec.next_offset != self.serializer.body_end_offset
):
raise ValueError("Invalid section layout")
else:
if next_sec.offset != sec.next_offset or sec.length == 0:
raise ValueError("Invalid section layout")
for a, art in enumerate(articles):
try:
next_art = articles[a + 1]
except:
if art.length == 0 or art.next_offset != sec.next_offset:
raise ValueError("Invalid article layout")
else:
if art.length == 0 or art.next_offset != next_art.offset:
raise ValueError("Invalid article layout")
# Flatten
indices = [periodical]
for sec, articles in normalized_sections:
indices.append(sec)
periodical.last_child_index = sec.index
for sec, articles in normalized_sections:
for a in articles:
indices.append(a)
return indices
# }}}
# TBS {{{
def calculate_trailing_byte_sequences(self):
self.tbs_map = {}
found_node = False
sections = [i for i in self.indices if i.depth == 1]
section_map = OrderedDict(
(i.index, i) for i in sorted(sections, key=lambda x: x.offset)
)
deepest = max(i.depth for i in self.indices)
for i in xrange(self.number_of_text_records):
offset = i * RECORD_SIZE
next_offset = offset + RECORD_SIZE
data = {
"ends": [],
"completes": [],
"starts": [],
"spans": None,
"offset": offset,
"record_number": i + 1,
}
for index in self.indices:
if index.offset >= next_offset:
# Node starts after current record
if index.depth == deepest:
break
else:
continue
if index.next_offset <= offset:
# Node ends before current record
continue
if index.offset >= offset:
# Node starts in current record
if index.next_offset <= next_offset:
# Node ends in current record
data["completes"].append(index)
else:
data["starts"].append(index)
else:
# Node starts before current records
if index.next_offset <= next_offset:
# Node ends in current record
data["ends"].append(index)
elif index.depth == deepest:
data["spans"] = index
if (
data["ends"]
or data["completes"]
or data["starts"]
or data["spans"] is not None
):
self.tbs_map[i + 1] = TBS(
data,
self.is_periodical,
first=not found_node,
section_map=section_map,
)
found_node = True
else:
self.tbs_map[i + 1] = TBS(
{},
self.is_periodical,
first=False,
after_first=found_node,
section_map=section_map,
)
def get_trailing_byte_sequence(self, num):
return self.tbs_map[num].bytestring
# }}}
# }}}
|
chardet | codingstatemachine | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import logging
from .enums import MachineState
class CodingStateMachine(object):
"""
A state machine to verify a byte sequence for a particular encoding. For
each byte the detector receives, it will feed that byte to every active
state machine available, one byte at a time. The state machine changes its
state based on its previous state and the byte it receives. There are 3
states in a state machine that are of interest to an auto-detector:
START state: This is the state to start with, or a legal byte sequence
(i.e. a valid code point) for character has been identified.
ME state: This indicates that the state machine identified a byte sequence
that is specific to the charset it is designed for and that
there is no other possible encoding which can contain this byte
sequence. This will to lead to an immediate positive answer for
the detector.
ERROR state: This indicates the state machine identified an illegal byte
sequence for that encoding. This will lead to an immediate
negative answer for this encoding. Detector will exclude this
encoding from consideration from here on.
"""
def __init__(self, sm):
self._model = sm
self._curr_byte_pos = 0
self._curr_char_len = 0
self._curr_state = None
self.logger = logging.getLogger(__name__)
self.reset()
def reset(self):
self._curr_state = MachineState.START
def next_state(self, c):
# for each byte we get its class
# if it is first byte, we also get byte length
byte_class = self._model["class_table"][c]
if self._curr_state == MachineState.START:
self._curr_byte_pos = 0
self._curr_char_len = self._model["char_len_table"][byte_class]
# from byte's class and state_table, we get its next state
curr_state = self._curr_state * self._model["class_factor"] + byte_class
self._curr_state = self._model["state_table"][curr_state]
self._curr_byte_pos += 1
return self._curr_state
def get_current_charlen(self):
return self._curr_char_len
def get_coding_state_machine(self):
return self._model["name"]
@property
def language(self):
return self._model["language"]
|
server | workflow | """Workflow decoder and validator.
The main function to start working with this module is ``load``. It decodes the
JSON-encoded bytes and validates the document against the schema.
>>> import workflow
>>> with open("workflow.json") as file_object:
wf = workflow.load(file_object)
If the document cannot be validated, ``jsonschema.ValidationError`` is raised.
Otherwise, ``load`` will return an instance of ``Workflow`` which is used in
MCPServer to read workflow links that can be instances of three different
classes ``Chain``, ``Link`` and ``WatchedDir``. They have different method
sets.
"""
import json
import os
from django.conf import settings as django_settings
from jsonschema import FormatChecker, validate
from jsonschema.exceptions import ValidationError
from server.jobs import Job
from server.translation import FALLBACK_LANG, TranslationLabel
_LATEST_SCHEMA = "workflow-schema-v1.json"
ASSETS_DIR = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(os.path.join(__file__)))), "assets"
)
DEFAULT_WORKFLOW = os.path.join(ASSETS_DIR, "workflow.json")
def _invert_job_statuses():
"""Return an inverted dict of job statuses, i.e. indexed by labels."""
statuses = {}
for status in Job.STATUSES:
label = str(status[1])
statuses[label] = status[0]
return statuses
# Job statuses (from ``Job.STATUSES``) indexed by the English labels.
# This is useful when decoding the values used in the JSON-encoded workflow
# where we're using labels instead of IDs.
_STATUSES = _invert_job_statuses()
class Workflow:
def __init__(self, parsed_obj):
self._src = parsed_obj
self._decode_chains()
self._decode_links()
self._decode_wdirs()
def __str__(self):
return "Chains {}, links {}, watched directories: {}".format(
len(self.chains), len(self.links), len(self.wdirs)
)
def _decode_chains(self):
self.chains = {}
for chain_id, chain_obj in self._src["chains"].items():
self.chains[chain_id] = Chain(chain_id, chain_obj, self)
def _decode_links(self):
self.links = {}
for link_id, link_obj in self._src["links"].items():
self.links[link_id] = Link(link_id, link_obj, self)
def _decode_wdirs(self):
self.wdirs = []
for wdir_obj in self._src["watched_directories"]:
self.wdirs.append(WatchedDir(wdir_obj, self))
def get_chains(self):
return self.chains
def get_links(self):
return self.links
def get_wdirs(self):
return self.wdirs
def get_chain(self, chain_id):
return self.chains[chain_id]
def get_link(self, link_id):
return self.links[link_id]
class BaseLink:
def __str__(self):
return self.id
def get_label(self, key, lang=FALLBACK_LANG, fallback_label=None):
"""Proxy to find translated attributes."""
try:
instance = self._src[key]
except KeyError:
return None
return instance.get_label(lang, fallback_label)
def _decode_translation(self, translation_dict):
return TranslationLabel(translation_dict)
@property
def workflow(self):
return self._workflow
class Chain(BaseLink):
def __init__(self, id_, attrs, workflow):
self.id = id_
self._src = attrs
self._workflow = workflow
self._decode_translations()
def __repr__(self):
return f"Chain <{self.id}>"
def __getitem__(self, key):
return self._src[key]
def _decode_translations(self):
self._src["description"] = self._decode_translation(self._src["description"])
@property
def link(self):
return self._workflow.get_link(self._src["link_id"])
class Link(BaseLink):
def __init__(self, id_, attrs, workflow):
self.id = id_
self._src = attrs
self._workflow = workflow
self._decode_job_statuses()
self._decode_translations()
def __repr__(self):
return f"Link <{self.id}>"
def __getitem__(self, key):
return self._src[key]
def _decode_job_statuses(self):
"""Replace status labels with their IDs.
In JSON, a job status is encoded using its English label, e.g. "Failed"
instead of the corresponding value in ``JOB.STATUS_FAILED``. This
method decodes the statuses so it becomes easier to work with them
internally.
"""
self._src["fallback_job_status"] = _STATUSES[self._src["fallback_job_status"]]
for obj in self._src["exit_codes"].values():
obj["job_status"] = _STATUSES[obj["job_status"]]
def _decode_translations(self):
self._src["description"] = self._decode_translation(self._src["description"])
self._src["group"] = self._decode_translation(self._src["group"])
config = self._src["config"]
if config["@manager"] == "linkTaskManagerReplacementDicFromChoice":
for item in config["replacements"]:
item["description"] = self._decode_translation(item["description"])
@property
def config(self):
return self._src["config"]
@property
def is_terminal(self):
"""Check if the link is indicated as a terminal link."""
return self._src.get("end", False)
def get_next_link(self, code):
code = str(code)
try:
link_id = self._src["exit_codes"][code]["link_id"]
except KeyError:
link_id = self._src["fallback_link_id"]
return self._workflow.get_link(link_id)
def get_status_id(self, code):
"""Return the expected Job status ID given an exit code."""
code = str(code)
try:
status_id = self._src["exit_codes"][code]["job_status"]
except KeyError:
status_id = self._src["fallback_job_status"]
return status_id
class WatchedDir(BaseLink):
def __init__(self, attrs, workflow):
self.path = attrs["path"]
self._src = attrs
self._workflow = workflow
def __str__(self):
return self.path
def __repr__(self):
return f"Watched directory <{self.path}>"
def __getitem__(self, key):
return self._src[key]
@property
def only_dirs(self):
return bool(self._src["only_dirs"])
@property
def unit_type(self):
return self._src["unit_type"]
@property
def chain(self):
return self._workflow.get_chain(self._src["chain_id"])
class WorkflowJSONDecoder(json.JSONDecoder):
def decode(self, foo, **kwargs):
parsed_json = super().decode(foo, **kwargs)
return Workflow(parsed_json)
def load(fp):
"""Read JSON document from file-like object, validate and decode it."""
blob = fp.read() # Read once, used twice.
_validate(blob)
parsed = json.loads(blob, cls=WorkflowJSONDecoder)
return parsed
def load_workflow():
workflow_path = DEFAULT_WORKFLOW
if django_settings.WORKFLOW_FILE != "":
workflow_path = django_settings.WORKFLOW_FILE
with open(workflow_path) as workflow_file:
return load(workflow_file)
class SchemaValidationError(ValidationError):
"""It wraps ``jsonschema.exceptions.ValidationError``."""
def _validate(blob):
"""Decode and validate the JSON document."""
try:
validate(json.loads(blob), _get_schema(), format_checker=FormatChecker())
except ValidationError as err:
raise SchemaValidationError(**err._contents())
def _get_schema():
"""Decode the default schema and return it."""
schema = os.path.join(ASSETS_DIR, _LATEST_SCHEMA)
with open(schema) as fp:
return json.load(fp)
|
versions | 0366ba6575ca_add_table_for_comments | """Add table for comments
Revision ID: 0366ba6575ca
Revises: 1093835a1051
Create Date: 2020-08-14 00:46:54.161120
"""
import sqlalchemy as sa
from alembic import op # noqa: I001
# revision identifiers, used by Alembic.
revision = "0366ba6575ca"
down_revision = "1093835a1051"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"comments",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("type", sa.String(length=80), nullable=True),
sa.Column("content", sa.Text(), nullable=True),
sa.Column("date", sa.DateTime(), nullable=True),
sa.Column("author_id", sa.Integer(), nullable=True),
sa.Column("challenge_id", sa.Integer(), nullable=True),
sa.Column("user_id", sa.Integer(), nullable=True),
sa.Column("team_id", sa.Integer(), nullable=True),
sa.Column("page_id", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(["author_id"], ["users.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(
["challenge_id"], ["challenges.id"], ondelete="CASCADE"
),
sa.ForeignKeyConstraint(["page_id"], ["pages.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(["team_id"], ["teams.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(["user_id"], ["users.id"], ondelete="CASCADE"),
sa.PrimaryKeyConstraint("id"),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("comments")
# ### end Alembic commands ###
|
example-iauthfunctions | plugin_v4 | # encoding: utf-8
from typing import Optional, cast
import ckan.plugins as plugins
import ckan.plugins.toolkit as toolkit
from ckan.types import AuthResult, Context, ContextValidator, DataDict
def group_create(context: Context, data_dict: Optional[DataDict] = None) -> AuthResult:
# Get the user name of the logged-in user.
user_name = context["user"]
# Get a list of the members of the 'curators' group.
try:
members = toolkit.get_action("member_list")(
{}, {"id": "curators", "object_type": "user"}
)
except toolkit.ObjectNotFound:
# The curators group doesn't exist.
return {
"success": False,
"msg": "The curators groups doesn't exist, so only sysadmins "
"are authorized to create groups.",
}
# 'members' is a list of (user_id, object_type, capacity) tuples, we're
# only interested in the user_ids.
member_ids = [member_tuple[0] for member_tuple in members]
# We have the logged-in user's user name, get their user id.
convert_user_name_or_id_to_id = cast(
ContextValidator, toolkit.get_converter("convert_user_name_or_id_to_id")
)
try:
user_id = convert_user_name_or_id_to_id(user_name, context)
except toolkit.Invalid:
# The user doesn't exist (e.g. they're not logged-in).
return {
"success": False,
"msg": "You must be logged-in as a member of the curators "
"group to create new groups.",
}
# Finally, we can test whether the user is a member of the curators group.
if user_id in member_ids:
return {"success": True}
else:
return {"success": False, "msg": "Only curators are allowed to create groups"}
class ExampleIAuthFunctionsPlugin(plugins.SingletonPlugin):
plugins.implements(plugins.IAuthFunctions)
def get_auth_functions(self):
return {"group_create": group_create}
|
aliceVision | StructureFromMotion | __version__ = "3.3"
from meshroom.core import desc
class StructureFromMotion(desc.AVCommandLineNode):
commandLine = "aliceVision_incrementalSfM {allParams}"
size = desc.DynamicNodeSize("input")
category = "Sparse Reconstruction"
documentation = """
This node will analyze feature matches to understand the geometric relationship behind all the 2D observations,
and infer the rigid scene structure (3D points) with the pose (position and orientation) and internal calibration of all cameras.
The pipeline is a growing reconstruction process (called incremental SfM): it first computes an initial two-view reconstruction that is iteratively extended by adding new views.
1/ Fuse 2-View Matches into Tracks
It fuses all feature matches between image pairs into tracks. Each track represents a candidate point in space, visible from multiple cameras.
However, at this step of the pipeline, it still contains many outliers.
2/ Initial Image Pair
It chooses the best initial image pair. This choice is critical for the quality of the final reconstruction.
It should indeed provide robust matches and contain reliable geometric information.
So, this image pair should maximize the number of matches and the repartition of the corresponding features in each image.
But at the same time, the angle between the cameras should also be large enough to provide reliable geometric information.
3/ Initial 2-View Geometry
It computes the fundamental matrix between the 2 images selected and consider that the first one is the origin of the coordinate system.
4/ Triangulate
Now with the pose of the 2 first cameras, it triangulates the corresponding 2D features into 3D points.
5/ Next Best View Selection
After that, it selects all the images that have enough associations with the features that are already reconstructed in 3D.
6/ Estimate New Cameras
Based on these 2D-3D associations it performs the resectioning of each of these new cameras.
The resectioning is a Perspective-n-Point algorithm (PnP) in a RANSAC framework to find the pose of the camera that validates most of the features associations.
On each camera, a non-linear minimization is performed to refine the pose.
7/ Triangulate
From these new cameras poses, some tracks become visible by 2 or more resected cameras and it triangulates them.
8/ Optimize
It performs a Bundle Adjustment to refine everything: extrinsics and intrinsics parameters of all cameras as well as the position of all 3D points.
It filters the results of the Bundle Adjustment by removing all observations that have high reprojection error or insufficient angles between observations.
9/ Loop from 5 to 9
As we have triangulated new points, we get more image candidates for next best views selection and we can iterate from 5 to 9.
It iterates like that, adding cameras and triangulating new 2D features into 3D points and removing 3D points that became invalidated, until we cannot localize new views.
## Online
[https://alicevision.org/#photogrammetry/sfm](https://alicevision.org/#photogrammetry/sfm)
"""
inputs = [
desc.File(
name="input",
label="SfMData",
description="SfMData file.",
value="",
uid=[0],
),
desc.ListAttribute(
elementDesc=desc.File(
name="featuresFolder",
label="Features Folder",
description="",
value="",
uid=[0],
),
name="featuresFolders",
label="Features Folders",
description="Folder(s) containing the extracted features and descriptors.",
),
desc.ListAttribute(
elementDesc=desc.File(
name="matchesFolder",
label="Matches Folder",
description="",
value="",
uid=[0],
),
name="matchesFolders",
label="Matches Folders",
description="Folder(s) in which the computed matches are stored.",
),
desc.ChoiceParam(
name="describerTypes",
label="Describer Types",
description="Describer types used to describe an image.",
value=["dspsift"],
values=[
"sift",
"sift_float",
"sift_upright",
"dspsift",
"akaze",
"akaze_liop",
"akaze_mldb",
"cctag3",
"cctag4",
"sift_ocv",
"akaze_ocv",
"tag16h5",
],
exclusive=False,
uid=[0],
joinChar=",",
),
desc.ChoiceParam(
name="localizerEstimator",
label="Localizer Estimator",
description="Estimator type used to localize cameras (acransac, ransac, lsmeds, loransac, maxconsensus).",
value="acransac",
values=["acransac", "ransac", "lsmeds", "loransac", "maxconsensus"],
exclusive=True,
uid=[0],
advanced=True,
),
desc.ChoiceParam(
name="observationConstraint",
label="Observation Constraint",
description="Observation constraint mode used in the optimization:\n"
" - Basic: Use standard reprojection error in pixel coordinates.\n"
" - Scale: Use reprojection error in pixel coordinates but relative to the feature scale.",
value="Scale",
values=["Basic", "Scale"],
exclusive=True,
uid=[0],
advanced=True,
),
desc.IntParam(
name="localizerEstimatorMaxIterations",
label="Localizer Max Ransac Iterations",
description="Maximum number of iterations allowed in the Ransac step.",
value=4096,
range=(1, 20000, 1),
uid=[0],
advanced=True,
),
desc.FloatParam(
name="localizerEstimatorError",
label="Localizer Max Ransac Error",
description="Maximum error (in pixels) allowed for camera localization (resectioning).\n"
"If set to 0, it will select a threshold according to the localizer estimator used\n"
"(if ACRansac, it will analyze the input data to select the optimal value).",
value=0.0,
range=(0.0, 100.0, 0.1),
uid=[0],
advanced=True,
),
desc.BoolParam(
name="lockScenePreviouslyReconstructed",
label="Lock Previously Reconstructed Scene",
description="Lock previously reconstructed poses and intrinsics.\n"
"This option is useful for SfM augmentation.",
value=False,
uid=[0],
),
desc.BoolParam(
name="useLocalBA",
label="Local Bundle Adjustment",
description="It reduces the reconstruction time, especially for large datasets (500+ images),\n"
"by avoiding computation of the Bundle Adjustment on areas that are not changing.",
value=True,
uid=[0],
),
desc.IntParam(
name="localBAGraphDistance",
label="LocalBA Graph Distance",
description="Graph-distance limit to define the active region in the Local Bundle Adjustment strategy.",
value=1,
range=(2, 10, 1),
uid=[0],
advanced=True,
),
desc.IntParam(
name="nbFirstUnstableCameras",
label="First Unstable Cameras Nb",
description="Number of cameras for which the bundle adjustment is performed every single time a camera is added.\n"
"This leads to more stable results while computations are not too expensive, as there is little data.\n"
"Past this number, the bundle adjustment will only be performed once for N added cameras.",
value=30,
range=(0, 100, 1),
uid=[0],
advanced=True,
),
desc.IntParam(
name="maxImagesPerGroup",
label="Max Images Per Group",
description="Maximum number of cameras that can be added before the bundle adjustment has to be performed again.\n"
"This prevents adding too much data at once without performing the bundle adjustment.",
value=30,
range=(0, 100, 1),
uid=[0],
advanced=True,
),
desc.IntParam(
name="bundleAdjustmentMaxOutliers",
label="Max Nb Of Outliers After BA",
description="Threshold for the maximum number of outliers allowed at the end of a bundle adjustment iteration.\n"
"Using a negative value for this threshold will disable BA iterations.",
value=50,
range=(-1, 1000, 1),
uid=[0],
advanced=True,
),
desc.IntParam(
name="maxNumberOfMatches",
label="Maximum Number Of Matches",
description="Maximum number of matches per image pair (and per feature type).\n"
"This can be useful to have a quick reconstruction overview.\n"
"0 means no limit.",
value=0,
range=(0, 50000, 1),
uid=[0],
),
desc.IntParam(
name="minNumberOfMatches",
label="Minimum Number Of Matches",
description="Minimum number of matches per image pair (and per feature type).\n"
"This can be useful to have a meaningful reconstruction with accurate keypoints.\n"
"0 means no limit.",
value=0,
range=(0, 50000, 1),
uid=[0],
),
desc.IntParam(
name="minInputTrackLength",
label="Min Input Track Length",
description="Minimum track length in input of SfM.",
value=2,
range=(2, 10, 1),
uid=[0],
),
desc.IntParam(
name="minNumberOfObservationsForTriangulation",
label="Min Observations For Triangulation",
description="Minimum number of observations to triangulate a point.\n"
"Setting it to 3 (or more) reduces drastically the noise in the point cloud,\n"
"but the number of final poses is a little bit reduced\n"
"(from 1.5% to 11% on the tested datasets).",
value=2,
range=(2, 10, 1),
uid=[0],
advanced=True,
),
desc.FloatParam(
name="minAngleForTriangulation",
label="Min Angle For Triangulation",
description="Minimum angle for triangulation.",
value=3.0,
range=(0.1, 10.0, 0.1),
uid=[0],
advanced=True,
),
desc.FloatParam(
name="minAngleForLandmark",
label="Min Angle For Landmark",
description="Minimum angle for landmark.",
value=2.0,
range=(0.1, 10.0, 0.1),
uid=[0],
advanced=True,
),
desc.FloatParam(
name="maxReprojectionError",
label="Max Reprojection Error",
description="Maximum reprojection error.",
value=4.0,
range=(0.1, 10.0, 0.1),
uid=[0],
advanced=True,
),
desc.FloatParam(
name="minAngleInitialPair",
label="Min Angle Initial Pair",
description="Minimum angle for the initial pair.",
value=5.0,
range=(0.1, 10.0, 0.1),
uid=[0],
advanced=True,
),
desc.FloatParam(
name="maxAngleInitialPair",
label="Max Angle Initial Pair",
description="Maximum angle for the initial pair.",
value=40.0,
range=(0.1, 60.0, 0.1),
uid=[0],
advanced=True,
),
desc.BoolParam(
name="useOnlyMatchesFromInputFolder",
label="Use Only Matches From Input Folder",
description="Use only matches from the input matchesFolder parameter.\n"
"Matches folders previously added to the SfMData file will be ignored.",
value=False,
uid=[],
advanced=True,
),
desc.BoolParam(
name="useRigConstraint",
label="Use Rig Constraint",
description="Enable/Disable rig constraint.",
value=True,
uid=[0],
advanced=True,
),
desc.IntParam(
name="rigMinNbCamerasForCalibration",
label="Min Nb Cameras For Rig Calibration",
description="Minimum number of cameras to start the calibration of the rig.",
value=20,
range=(1, 50, 1),
uid=[0],
advanced=True,
),
desc.BoolParam(
name="lockAllIntrinsics",
label="Lock All Intrinsic Camera Parameters",
description="Force to keep all the intrinsic parameters of the cameras (focal length, \n"
"principal point, distortion if any) constant during the reconstruction.\n"
"This may be helpful if the input cameras are already fully calibrated.",
value=False,
uid=[0],
),
desc.IntParam(
name="minNbCamerasToRefinePrincipalPoint",
label="Min Nb Cameras To Refine Principal Point",
description="Minimum number of cameras to refine the principal point of the cameras (one of the intrinsic parameters of the camera).\n"
"If we do not have enough cameras, the principal point is considered to be in the center of the image.\n"
"If minNbCamerasToRefinePrincipalPoint <= 0, the principal point is never refined."
"If minNbCamerasToRefinePrincipalPoint is set to 1, the principal point is always refined.",
value=3,
range=(0, 20, 1),
uid=[0],
advanced=True,
),
desc.BoolParam(
name="filterTrackForks",
label="Filter Track Forks",
description="Enable/Disable the track forks removal. A track contains a fork when incoherent matches \n"
"lead to multiple features in the same image for a single track.",
value=False,
uid=[0],
),
desc.BoolParam(
name="computeStructureColor",
label="Compute Structure Color",
description="Enable/Disable color computation of every 3D point.",
value=True,
uid=[0],
),
desc.BoolParam(
name="useAutoTransform",
label="Automatic Alignment",
description="Enable/Disable automatic alignment of the 3D reconstruction.\n"
"Determines scene orientation from the cameras' X axis,\n"
"determines north and scale from GPS information if available,\n"
"and defines ground level from the point cloud.",
value=True,
uid=[0],
),
desc.File(
name="initialPairA",
label="Initial Pair A",
description="View ID or filename of the first image (either with or without the full path).",
value="",
uid=[0],
),
desc.File(
name="initialPairB",
label="Initial Pair B",
description="View ID or filename of the second image (either with or without the full path).",
value="",
uid=[0],
),
desc.ChoiceParam(
name="interFileExtension",
label="Inter File Extension",
description="Extension of the intermediate file export.",
value=".abc",
values=(".abc", ".ply"),
exclusive=True,
uid=[],
advanced=True,
),
desc.BoolParam(
name="logIntermediateSteps",
label="Log Intermediate Steps",
description="Dump the current state of the scene as an SfMData file every 3 resections.",
value=False,
uid=[],
advanced=True,
),
desc.ChoiceParam(
name="verboseLevel",
label="Verbose Level",
description="Verbosity level (fatal, error, warning, info, debug, trace).",
value="info",
values=["fatal", "error", "warning", "info", "debug", "trace"],
exclusive=True,
uid=[],
),
]
outputs = [
desc.File(
name="output",
label="SfMData",
description="Path to the output SfM point cloud file (in SfMData format).",
value=desc.Node.internalFolder + "sfm.abc",
uid=[],
),
desc.File(
name="outputViewsAndPoses",
label="Views And Poses",
description="Path to the output SfMData file with cameras (views and poses).",
value=desc.Node.internalFolder + "cameras.sfm",
uid=[],
),
desc.File(
name="extraInfoFolder",
label="Folder",
description="Folder for intermediate reconstruction files and additional reconstruction information files.",
value=desc.Node.internalFolder,
uid=[],
),
]
|
PyObjCTest | test_cfsocket | import socket
import struct
import sys
import time
import CoreFoundation
from CoreFoundation import *
from PyObjCTools.TestSupport import *
try:
unicode
except NameError:
unicode = str
try:
long
except NameError:
long = int
try:
buffer
except NameError:
buffer = memoryview
def onTheNetwork():
try:
socket.gethostbyname("www.apple.com")
except socket.gaierror:
return False
return True
class TestSocket(TestCase):
def testTypes(self):
self.assertIsCFType(CFSocketRef)
def testTypeID(self):
self.assertIsInstance(CFSocketGetTypeID(), (int, long))
def testConstants(self):
self.assertEqual(kCFSocketSuccess, 0)
self.assertEqual(kCFSocketError, -1)
self.assertEqual(kCFSocketTimeout, -2)
self.assertEqual(kCFSocketNoCallBack, 0)
self.assertEqual(kCFSocketReadCallBack, 1)
self.assertEqual(kCFSocketAcceptCallBack, 2)
self.assertEqual(kCFSocketDataCallBack, 3)
self.assertEqual(kCFSocketConnectCallBack, 4)
self.assertEqual(kCFSocketWriteCallBack, 8)
self.assertEqual(kCFSocketAutomaticallyReenableReadCallBack, 1)
self.assertEqual(kCFSocketAutomaticallyReenableAcceptCallBack, 2)
self.assertEqual(kCFSocketAutomaticallyReenableDataCallBack, 3)
self.assertEqual(kCFSocketAutomaticallyReenableWriteCallBack, 8)
self.assertEqual(kCFSocketCloseOnInvalidate, 128)
self.assertIsInstance(kCFSocketCommandKey, unicode)
self.assertIsInstance(kCFSocketNameKey, unicode)
self.assertIsInstance(kCFSocketValueKey, unicode)
self.assertIsInstance(kCFSocketResultKey, unicode)
self.assertIsInstance(kCFSocketErrorKey, unicode)
self.assertIsInstance(kCFSocketRegisterCommand, unicode)
self.assertIsInstance(kCFSocketRetrieveCommand, unicode)
self.assertEqual(kCFSocketLeaveErrors, 64)
def testStructs(self):
o = CFSocketSignature()
self.assertHasAttr(o, "protocolFamily")
self.assertHasAttr(o, "socketType")
self.assertHasAttr(o, "protocol")
self.assertHasAttr(o, "address")
def testNameRegistry(self):
p1 = CFSocketGetDefaultNameRegistryPortNumber()
self.assertIsInstance(p1, (int, long))
CFSocketSetDefaultNameRegistryPortNumber(p1 + 1)
p2 = CFSocketGetDefaultNameRegistryPortNumber()
self.assertIsInstance(p2, (int, long))
self.assertEqual(p2, p1 + 1)
CFSocketSetDefaultNameRegistryPortNumber(p1)
@onlyIf(onTheNetwork(), "cannot test without internet connection")
def testSocketFunctions(self):
data = {}
state = []
def callback(sock, kind, address, data, info):
state.append((sock, kind, address, data, info))
sock = CFSocketCreate(
None,
socket.AF_INET,
socket.SOCK_STREAM,
0,
kCFSocketReadCallBack | kCFSocketWriteCallBack,
callback,
data,
)
self.assertIsInstance(sock, CFSocketRef)
localaddr = struct.pack(">BBHBBBB", 16, socket.AF_INET, 9425, 127, 0, 0, 1)
localaddr += b"\0" * 8
if sys.version_info[0] == 2:
localaddr = buffer(localaddr)
err = CFSocketSetAddress(sock, localaddr)
self.assertEqual(err, kCFSocketSuccess)
sd = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
try:
sock = CFSocketCreateWithNative(
None,
sd.fileno(),
kCFSocketReadCallBack | kCFSocketWriteCallBack,
callback,
data,
)
self.assertIsInstance(sock, CFSocketRef)
n = CFSocketGetNative(sock)
self.assertIsInstance(n, (int, long))
self.assertEqual(n, sd.fileno())
ctx = CFSocketGetContext(sock, None)
self.assertIs(ctx, data)
flags = CFSocketGetSocketFlags(sock)
self.assertIsInstance(flags, (int, long))
CFSocketSetSocketFlags(
sock,
kCFSocketAutomaticallyReenableReadCallBack
| kCFSocketAutomaticallyReenableAcceptCallBack,
)
flags2 = CFSocketGetSocketFlags(sock)
self.assertIsInstance(flags2, (int, long))
self.assertEqual(
flags2,
kCFSocketAutomaticallyReenableReadCallBack
| kCFSocketAutomaticallyReenableAcceptCallBack,
)
# Note: I don't expect anyone to actually use this api, building
# struct sockaddr buffers by hand is madness in python.
ip = socket.gethostbyname("www.apple.com")
ip = map(int, ip.split("."))
sockaddr = struct.pack(">BBHBBBB", 16, socket.AF_INET, 80, *ip)
sockaddr += b"\0" * 8
if sys.version_info[0] == 2:
sockaddr = buffer(sockaddr)
e = CFSocketConnectToAddress(sock, sockaddr, 1.0)
self.assertIsInstance(e, (int, long))
self.assertEqual(e, kCFSocketSuccess)
self.assertResultIsCFRetained(CFSocketCopyPeerAddress)
addr = CFSocketCopyPeerAddress(sock)
self.assertIsInstance(addr, CFDataRef)
self.assertResultIsCFRetained(CFSocketCopyAddress)
addr = CFSocketCopyAddress(sock)
self.assertIsInstance(addr, CFDataRef)
CFSocketDisableCallBacks(
sock, kCFSocketReadCallBack | kCFSocketAcceptCallBack
)
CFSocketEnableCallBacks(
sock, kCFSocketReadCallBack | kCFSocketAcceptCallBack
)
if sys.version_info[0] == 2:
err = CFSocketSendData(sock, None, buffer("GET / HTTP/1.0"), 1.0)
else:
err = CFSocketSendData(sock, None, b"GET / HTTP/1.0", 1.0)
self.assertEqual(err, kCFSocketSuccess)
ok = CFSocketIsValid(sock)
self.assertIs(ok, True)
CFSocketInvalidate(sock)
self.assertResultIsBOOL(CFSocketIsValid)
ok = CFSocketIsValid(sock)
self.assertIs(ok, False)
localaddr = struct.pack(">BBHBBBB", 16, socket.AF_INET, 9424, 127, 0, 0, 1)
localaddr += b"\0" * 8
signature = CFSocketSignature(
socket.AF_INET, socket.SOCK_STREAM, 0, buffer(localaddr)
)
sock = CFSocketCreateWithSocketSignature(
None,
signature,
kCFSocketReadCallBack | kCFSocketWriteCallBack,
callback,
data,
)
self.assertIsInstance(sock, CFSocketRef)
signature = CFSocketSignature(
socket.AF_INET, socket.SOCK_STREAM, 0, buffer(sockaddr)
)
sock = CFSocketCreateConnectedToSocketSignature(
None,
signature,
kCFSocketReadCallBack | kCFSocketWriteCallBack,
callback,
data,
1.0,
)
self.assertIsInstance(sock, CFSocketRef)
self.assertResultIsCFRetained(CFSocketCreateRunLoopSource)
src = CFSocketCreateRunLoopSource(None, sock, 0)
self.assertIsInstance(src, CFRunLoopSourceRef)
finally:
sd.close()
def testSocketNameServer(self):
# The documentation says:
# Name server functionality is currently inoperable in Mac OS X.
#
# Therefore these functions are not available from Python
self.assertNotHasAttr(CoreFoundation, "CFSocketCopyRegisteredSocketSignature")
self.assertNotHasAttr(CoreFoundation, "CFSocketCopyRegisteredValue")
self.assertNotHasAttr(CoreFoundation, "CFSocketRegisterSocketSignature")
self.assertNotHasAttr(CoreFoundation, "CFSocketRegisterValue")
self.assertNotHasAttr(CoreFoundation, "CFSocketUnregister")
if __name__ == "__main__":
main()
|
gui | scratchwindow | # This file is part of MyPaint.
# Copyright (C) 2011-2018 by the MyPaint Development Team.
# Copyright (C) 2011 by Ben O'Steen <bosteen@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Scratchpad panel"""
## Imports
from __future__ import division, print_function
import logging
from lib.gettext import gettext as _
from lib.gibindings import Gtk
from .toolstack import TOOL_WIDGET_NATURAL_HEIGHT_SHORT, SizedVBoxToolWidget
from .widgets import inline_toolbar
logger = logging.getLogger(__name__)
## Class defs
class ScratchpadTool(SizedVBoxToolWidget):
__gtype_name__ = "MyPaintScratchpadTool"
SIZED_VBOX_NATURAL_HEIGHT = TOOL_WIDGET_NATURAL_HEIGHT_SHORT
tool_widget_title = _("Scratchpad")
tool_widget_icon_name = "mypaint-scratchpad-symbolic"
tool_widget_description = _(
"Mix colors and make sketches on " "separate scrap pages"
)
def __init__(self):
super(SizedVBoxToolWidget, self).__init__()
from gui.application import get_app
app = get_app()
self.app = app
toolbar = inline_toolbar(
app,
[
("ScratchNew", "mypaint-add-symbolic"),
("ScratchLoad", None),
("ScratchSaveAs", "mypaint-document-save-symbolic"),
("ScratchRevert", "mypaint-document-revert-symbolic"),
],
)
scratchpad_view = app.scratchpad_doc.tdw
scratchpad_view.set_size_request(64, 64)
self.connect("destroy-event", self._save_cb)
self.connect("delete-event", self._save_cb)
scratchpad_box = Gtk.EventBox()
scratchpad_box.add(scratchpad_view)
self.pack_start(scratchpad_box, True, True, 0)
self.pack_start(toolbar, False, True, 0)
def _save_cb(self, action):
filename = self.app.scratchpad_filename
logger.info("Saving the scratchpad to %r", filename)
self.app.filehandler.save_scratchpad(filename)
|
soundconverter | setup | #!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# SoundConverter - GNOME application for converting between audio formats.
# Copyright 2004 Lars Wirzenius
# Copyright 2005-2020 Gautier Portet
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 3 of the License.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
import sys
try:
import DistUtilsExtra.auto
except ImportError as e:
sys.stderr.write("You need python-distutils-extra\n")
sys.stderr.write(e)
sys.exit(1)
import os
import DistUtilsExtra.auto
# This will automatically, assuming that the prefix is /usr
# - Compile and install po files to /usr/share/locale*.mo,
# - Install .desktop files to /usr/share/applications
# - Install all the py files to /usr/lib/python3.8/site-packages/soundconverter
# - Copy bin to /usr/bin
# - Copy the rest to /usr/share/soundconverter, like the .glade file
# Thanks to DistUtilsExtra (https://salsa.debian.org/python-team/modules/python-distutils-extra/-/tree/master/doc) # noqa
class Install(DistUtilsExtra.auto.install_auto):
def run(self):
DistUtilsExtra.auto.install_auto.run(self)
# after DistUtilsExtra automatically copied data/org.soundconverter.gschema.xml
# to /usr/share/glib-2.0/schemas/ it doesn't seem to compile them.
glib_schema_path = os.path.join(self.install_data, "share/glib-2.0/schemas/")
cmd = "glib-compile-schemas {}".format(glib_schema_path)
print("running {}".format(cmd))
os.system(cmd)
DistUtilsExtra.auto.setup(
name="soundconverter",
version="4.0.4",
description=(
"A simple sound converter application for the GNOME environment. "
"It writes WAV, FLAC, MP3, and Ogg Vorbis files."
),
license="GPL-3.0",
data_files=[
("share/metainfo/", ["data/soundconverter.appdata.xml"]),
("share/pixmaps/", ["data/soundconverter.png"]),
("share/icons/hicolor/scalable/apps/", ["data/soundconverter.svg"]),
],
cmdclass={"install": Install},
scripts=["bin/soundconverter"],
)
|
network | request | from __future__ import annotations
import json
import logging
from time import time
from typing import TYPE_CHECKING, Callable, Dict, List, Optional, Union
from urllib.parse import urlencode
from PyQt5.QtCore import QObject, pyqtSignal
from PyQt5.QtNetwork import QNetworkReply, QNetworkRequest
from tribler.gui.utilities import connect
REQUEST_ID = "_request_id"
if TYPE_CHECKING:
from tribler.gui.network.request_manager import RequestManager
DATA_TYPE = Optional[Union[bytes, str, Dict, List]]
def make_reply_errors_map() -> Dict[int, str]:
errors_map = {}
for attr_name in dir(QNetworkReply):
if attr_name[0].isupper() and attr_name.endswith(
"Error"
): # SomeError, but not the `setError` method
error_code = getattr(QNetworkReply, attr_name)
if isinstance(error_code, int): # an additional safety check, just for case
errors_map[error_code] = attr_name
return errors_map
reply_errors = make_reply_errors_map()
class Request(QObject):
GET = "GET"
POST = "POST"
PUT = "PUT"
PATCH = "PATCH"
DELETE = "DELETE"
# This signal is called if we receive some real reply from the request
# and if the user defined a callback to call on the received data.
# We implement the callback as a signal call and not as a direct callback
# because we want the request object be deleted independent of what happens
# during the callback call.
on_finished_signal = pyqtSignal(object)
def __init__(
self,
endpoint: str,
on_success: Callable = lambda _: None,
url_params: Optional[Dict] = None,
data: DATA_TYPE = None,
method: str = GET,
capture_errors: bool = True,
priority=QNetworkRequest.NormalPriority,
raw_response: bool = False,
):
super().__init__()
self.logger = logging.getLogger(self.__class__.__name__)
self.endpoint = endpoint
self.url_params = url_params
self.priority = priority
self.method = method
self.capture_errors = capture_errors
self.raw_response = raw_response
self.data = data
if isinstance(data, (Dict, List)):
raw_data = json.dumps(data).encode("utf8")
elif isinstance(data, str):
raw_data = data.encode("utf8")
else:
raw_data = data
self.raw_data: Optional[bytes] = raw_data
connect(self.on_finished_signal, on_success)
self.reply: Optional[
QNetworkReply
] = None # to hold the associated QNetworkReply object
self.manager: Optional[RequestManager] = None
self.url: str = ""
self.time = time()
self.status_code = 0
self.status_text = "unknown"
self.cancellable = True
self.id = 0
def set_manager(self, manager: RequestManager):
self.manager = manager
self._set_url(manager.get_base_url())
def _set_url(self, base_url: str):
self.url = base_url + self.endpoint
if self.url_params:
# Encode True and False as "1" and "0" and not as "True" and "False"
url_params = {
key: int(value) if isinstance(value, bool) else value
for key, value in self.url_params.items()
}
self.url += "?" + urlencode(url_params, doseq=True)
def on_finished(self):
if not self.reply or not self.manager:
return
self.logger.info(f"Finished: {self}")
try:
# If HTTP status code is available on the reply, we process that first.
# This is because self.reply.error() is not always QNetworkReply.NoError even if there is HTTP response.
# One example case is for HTTP Status Code 413 (HTTPRequestEntityTooLarge) for which QNetworkReply
# error code is QNetworkReply.UnknownContentError
if status_code := self.reply.attribute(
QNetworkRequest.HttpStatusCodeAttribute
):
self._handle_http_response(status_code)
return
# Process any other NetworkReply Error response.
error_code = self.reply.error()
self._handle_network_reply_errors(error_code)
except Exception as e: # pylint: disable=broad-except
self.logger.exception(e)
self.cancel()
finally:
self._delete()
def _handle_network_reply_errors(self, error_code):
error_name = reply_errors.get(error_code, "<unknown error>")
self.status_code = -error_code # QNetworkReply errors are set negative to distinguish from HTTP response codes.
self.status_text = f"{self.status_code}: {error_code}"
self.logger.warning(
f"Request {self} finished with error: {self.status_code} ({error_name})"
)
def _handle_http_response(self, status_code):
self.logger.debug(f"Update {self}: {status_code}")
self.status_code = status_code
self.status_text = str(status_code)
data = bytes(self.reply.readAll())
if self.raw_response:
self.logger.debug("Create a raw response")
header = self.reply.header(QNetworkRequest.ContentTypeHeader)
self.on_finished_signal.emit((data, header))
return
if not data:
self.logger.error(f"No data received in the reply for {self}")
return
self.logger.debug("Create a json response")
result = json.loads(data)
if isinstance(result, dict):
result[REQUEST_ID] = self.id
is_error = "error" in result
if is_error and self.capture_errors:
text = self.manager.show_error(self, result)
raise Warning(text)
self.on_finished_signal.emit(result)
def cancel(self):
"""
Cancel the request by aborting the reply handle
"""
try:
self.logger.warning(f"Request was canceled: {self}")
if self.reply:
self.reply.abort()
finally:
self._delete()
def _delete(self):
"""
Call Qt deletion procedure for the object and its member objects
and remove the object from the request_manager's list of requests in flight
"""
self.logger.debug(f"Delete for {self}")
if self.manager:
self.manager.remove(self)
self.manager = None
if self.reply:
self.reply.deleteLater()
self.reply = None
def __str__(self):
return f"{self.method} {self.url}"
|
extractor | atttechchannel | from __future__ import unicode_literals
from ..utils import unified_strdate
from .common import InfoExtractor
class ATTTechChannelIE(InfoExtractor):
_VALID_URL = r"https?://techchannel\.att\.com/play-video\.cfm/([^/]+/)*(?P<id>.+)"
_TEST = {
"url": "http://techchannel.att.com/play-video.cfm/2014/1/27/ATT-Archives-The-UNIX-System-Making-Computers-Easier-to-Use",
"info_dict": {
"id": "11316",
"display_id": "ATT-Archives-The-UNIX-System-Making-Computers-Easier-to-Use",
"ext": "flv",
"title": "AT&T Archives : The UNIX System: Making Computers Easier to Use",
"description": "A 1982 film about UNIX is the foundation for software in use around Bell Labs and AT&T.",
"thumbnail": r"re:^https?://.*\.jpg$",
"upload_date": "20140127",
},
"params": {
# rtmp download
"skip_download": True,
},
}
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
video_url = self._search_regex(
r"url\s*:\s*'(rtmp://[^']+)'", webpage, "video URL"
)
video_id = self._search_regex(
r"mediaid\s*=\s*(\d+)", webpage, "video id", fatal=False
)
title = self._og_search_title(webpage)
description = self._og_search_description(webpage)
thumbnail = self._og_search_thumbnail(webpage)
upload_date = unified_strdate(
self._search_regex(
r"[Rr]elease\s+date:\s*(\d{1,2}/\d{1,2}/\d{4})",
webpage,
"upload date",
fatal=False,
),
False,
)
return {
"id": video_id,
"display_id": display_id,
"url": video_url,
"ext": "flv",
"title": title,
"description": description,
"thumbnail": thumbnail,
"upload_date": upload_date,
}
|
utils | cleantext | __license__ = "GPL 3"
__copyright__ = "2010, sengian <sengian1@gmail.com>"
__docformat__ = "restructuredtext en"
import re
import htmlentitydefs
from future_builtins import map
_ascii_pat = None
def clean_ascii_chars(txt, charlist=None):
r"""
Remove ASCII control chars.
This is all control chars except \t, \n and \r
"""
if not txt:
return ""
global _ascii_pat
if _ascii_pat is None:
chars = set(xrange(32))
chars.add(127)
for x in (9, 10, 13):
chars.remove(x)
_ascii_pat = re.compile("|".join(map(unichr, chars)))
if charlist is None:
pat = _ascii_pat
else:
pat = re.compile("|".join(map(unichr, charlist)))
return pat.sub("", txt)
def allowed(x):
x = ord(x)
return (
(x != 127 and (31 < x < 0xD7FF or x in (9, 10, 13)))
or (0xE000 < x < 0xFFFD)
or (0x10000 < x < 0x10FFFF)
)
def clean_xml_chars(unicode_string):
return "".join(filter(allowed, unicode_string))
# Fredrik Lundh: http://effbot.org/zone/re-sub.htm#unescape-html
# Removes HTML or XML character references and entities from a text string.
#
# @param text The HTML (or XML) source text.
# @return The plain text, as a Unicode string, if necessary.
def unescape(text, rm=False, rchar=""):
def fixup(m, rm=rm, rchar=rchar):
text = m.group(0)
if text[:2] == "&#":
# character reference
try:
if text[:3] == "&#x":
return unichr(int(text[3:-1], 16))
else:
return unichr(int(text[2:-1]))
except ValueError:
pass
else:
# named entity
try:
text = unichr(htmlentitydefs.name2codepoint[text[1:-1]])
except KeyError:
pass
if rm:
return rchar # replace by char
return text # leave as is
return re.sub("&#?\w+;", fixup, text)
|
migrations | 0005_auto_20220704_1947 | # Generated by Django 3.2.5 on 2022-07-04 19:47
import django.db.models.deletion
import django_migration_linter as linter
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("schedules", "0004_customoncallshift_until"),
]
operations = [
linter.IgnoreMigration(),
migrations.CreateModel(
name="OnCallScheduleWeb",
fields=[
(
"oncallschedule_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="schedules.oncallschedule",
),
),
("time_zone", models.CharField(default="UTC", max_length=100)),
],
options={
"abstract": False,
"base_manager_name": "objects",
},
bases=("schedules.oncallschedule",),
),
migrations.AddField(
model_name="customoncallshift",
name="schedule",
field=models.ForeignKey(
default=None,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="custom_shifts",
to="schedules.oncallschedule",
),
),
migrations.AlterField(
model_name="customoncallshift",
name="type",
field=models.IntegerField(
choices=[
(0, "Single event"),
(1, "Recurrent event"),
(2, "Rolling users"),
(3, "Override"),
]
),
),
]
|
scripts | example_post | # ***************************************************************************
# * Copyright (c) 2014 Yorik van Havre <yorik@uncreated.net> *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * FreeCAD is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Lesser General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with FreeCAD; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
import datetime
TOOLTIP = """
This is an example postprocessor file for the Path workbench. It is used
to save a list of FreeCAD Path objects to a file.
Read the Path Workbench documentation to know how to convert Path objects
to GCode.
"""
now = datetime.datetime.now()
# to distinguish python built-in open function from the one declared below
if open.__module__ in ["__builtin__", "io"]:
pythonopen = open
def export(objectslist, filename, argstring):
"called when freecad exports a list of objects"
if len(objectslist) > 1:
print("This script is unable to write more than one Path object")
return
obj = objectslist[0]
if not hasattr(obj, "Path"):
print("the given object is not a path")
gcode = obj.Path.toGCode()
gcode = parse(gcode)
gfile = pythonopen(filename, "w")
gfile.write(gcode)
gfile.close()
def parse(inputstring):
"parse(inputstring): returns a parsed output string"
print("postprocessing...")
output = ""
# write some stuff first
output += "N10 ;time:" + str(now) + "\n"
output += "N20 G17 G20 G80 G40 G90\n"
output += "N30 (Exported by FreeCAD)\n"
linenr = 100
lastcommand = None
# treat the input line by line
lines = inputstring.split("\n")
for line in lines:
# split the G/M command from the arguments
if " " in line:
command, args = line.split(" ", 1)
else:
# no space found, which means there are no arguments
command = line
args = ""
# add a line number
output += "N" + str(linenr) + " "
# only print the command if it is not the same as the last one
if command != lastcommand:
output += command + " "
output += args + "\n"
# increment the line number
linenr += 10
# store the latest command
lastcommand = command
# write some more stuff at the end
output += "N" + str(linenr) + " M05\n"
output += "N" + str(linenr + 10) + " M25\n"
output += "N" + str(linenr + 20) + " G00 X-1.0 Y1.0\n"
output += "N" + str(linenr + 30) + " G17 G80 G40 G90\n"
output += "N" + str(linenr + 40) + " M99\n"
print("done postprocessing.")
return output
# print(__name__ + " gcode postprocessor loaded.")
|
plugin | preset | try:
from sg_py_vendor.pymarshal import type_assert, type_assert_dict, type_assert_iter
except ImportError:
from pymarshal import type_assert, type_assert_dict, type_assert_iter
__all__ = [
"Preset",
"PresetBank",
]
class Preset:
def __init__(
self,
name,
controls,
custom,
):
self.name = type_assert(
name,
str,
check=lambda x: len(x) < 24,
desc="The display name of the preset",
)
self.controls = type_assert_dict(
controls,
kcls=int, # control number
vcls=int, # value
desc="The control values for the plugin",
)
self.custom = type_assert_dict(
custom,
kcls=str, # name
vcls=str, # value
desc="The custom control values for the plugin",
)
class PresetBank:
def __init__(
self,
plugin_uid,
presets,
):
self.plugin_uid = type_assert(
plugin_uid,
int,
desc="The uid of the plugin this preset bank is for",
)
self.presets = type_assert_iter(
presets,
Preset,
desc="The presets in this bank",
)
|
hogql | placeholders | from typing import Dict, List, Optional
from posthog.hogql import ast
from posthog.hogql.errors import HogQLException
from posthog.hogql.visitor import CloningVisitor, TraversingVisitor
def replace_placeholders(
node: ast.Expr, placeholders: Optional[Dict[str, ast.Expr]]
) -> ast.Expr:
return ReplacePlaceholders(placeholders).visit(node)
def find_placeholders(node: ast.Expr) -> List[str]:
finder = FindPlaceholders()
finder.visit(node)
return list(finder.found)
class FindPlaceholders(TraversingVisitor):
def __init__(self):
super().__init__()
self.found: set[str] = set()
def visit_placeholder(self, node: ast.Placeholder):
self.found.add(node.field)
class ReplacePlaceholders(CloningVisitor):
def __init__(self, placeholders: Optional[Dict[str, ast.Expr]]):
super().__init__()
self.placeholders = placeholders
def visit_placeholder(self, node):
if not self.placeholders:
raise HogQLException(
f"Placeholders, such as {{{node.field}}}, are not supported in this context"
)
if (
node.field in self.placeholders
and self.placeholders[node.field] is not None
):
new_node = self.placeholders[node.field]
new_node.start = node.start
new_node.end = node.end
return new_node
raise HogQLException(
f"Placeholder {{{node.field}}} is not available in this context. You can use the following: "
+ ", ".join((f"{placeholder}" for placeholder in self.placeholders))
)
|
network | assemble | """
Create bitmessage protocol command packets
"""
import struct
import addresses
from network.constants import MAX_ADDR_COUNT
from network.node import Peer
from protocol import CreatePacket, encodeHost
def assemble_addr(peerList):
"""Create address command"""
if isinstance(peerList, Peer):
peerList = [peerList]
if not peerList:
return b""
retval = b""
for i in range(0, len(peerList), MAX_ADDR_COUNT):
payload = addresses.encodeVarint(len(peerList[i : i + MAX_ADDR_COUNT]))
for stream, peer, timestamp in peerList[i : i + MAX_ADDR_COUNT]:
# 64-bit time
payload += struct.pack(">Q", timestamp)
payload += struct.pack(">I", stream)
# service bit flags offered by this node
payload += struct.pack(">q", 1)
payload += encodeHost(peer.host)
# remote port
payload += struct.pack(">H", peer.port)
retval += CreatePacket("addr", payload)
return retval
|
process-manager | sql_scripts | CREATE_TABLES = """
CREATE TABLE IF NOT EXISTS processes (
rowid INTEGER PRIMARY KEY AUTOINCREMENT,
row_version INTEGER NOT NULL DEFAULT 0, -- incremented every time the row is updated
pid INTEGER NOT NULL, -- process ID
kind TEXT NOT NULL, -- process type, 'core' or 'gui'
"primary" INT NOT NULL, -- 1 means the process is considered to be the "main" process of the specified kind
canceled INT NOT NULL, -- 1 means that another process is already working as primary, so this process is stopped
app_version TEXT NOT NULL, -- the Tribler version
started_at INT NOT NULL, -- unix timestamp of the time when the process was started
creator_pid INT, -- for a Core process this is the pid of the corresponding GUI process
api_port INT, -- Core API port, for GUI process this is a suggested port that Core can use
finished_at INT, -- unix timestamp of the time when the process was finished
exit_code INT, -- for completed process this is the exit code, 0 means successful run without termination
error_msg TEXT -- a description of an exception that possibly led to the process termination
)
"""
DELETE_OLD_RECORDS = """
DELETE FROM processes -- delete all non-primary records that are older than 30 days or not in the 100 last records
WHERE "primary" = 0 -- never delete current primary processes
AND (
finished_at < strftime('%s') - (60 * 60 * 24) * 30 -- delete record if a process finished more than 30 days ago
OR rowid NOT IN (
SELECT rowid FROM processes ORDER BY rowid DESC LIMIT 100 -- only keep last 100 processes
)
)
"""
SELECT_COLUMNS = (
'rowid, row_version, pid, kind, "primary", canceled, app_version, '
"started_at, creator_pid, api_port, finished_at, exit_code, error_msg"
)
|
installer | middleware | # This file is part of Archivematica.
#
# Copyright 2010-2013 Artefactual Systems Inc. <http://artefactual.com>
#
# Archivematica is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Archivematica is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Archivematica. If not, see <http://www.gnu.org/licenses/>.
from re import compile as re_compile
import components.helpers as helpers
from django.conf import settings
from django.shortcuts import redirect
from django.urls import reverse
from django.utils.deprecation import MiddlewareMixin
EXEMPT_URLS = None
def _load_exempt_urls():
global EXEMPT_URLS
EXEMPT_URLS = [re_compile("{}$".format(settings.LOGIN_URL.lstrip("/")))]
if hasattr(settings, "LOGIN_EXEMPT_URLS"):
EXEMPT_URLS += [re_compile(expr) for expr in settings.LOGIN_EXEMPT_URLS]
_load_exempt_urls()
class ConfigurationCheckMiddleware(MiddlewareMixin):
"""Redirect users to the installer page or the login page.
The presence of the pipeline UUID in the database is an indicator of
whether the application has already been set up.
"""
def process_request(self, request):
dashboard_uuid = helpers.get_setting("dashboard_uuid")
# Start off the installer unless the user is already there.
if not dashboard_uuid:
if reverse("installer:welcome") == request.path_info:
return
return redirect("installer:welcome")
# Send the user to the login page if needed.
if not request.user.is_authenticated:
path = request.path_info.lstrip("/")
if not any(m.match(path) for m in EXEMPT_URLS):
return redirect(settings.LOGIN_URL)
# Share the ID of the pipeline with the application views.
request.dashboard_uuid = dashboard_uuid
|
utils | jinja | # SPDX-FileCopyrightText: Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# SPDX-License-Identifier: GPL-3.0-or-later
"""Utilities related to jinja2."""
import contextlib
import functools
import html
import os
import os.path
import posixpath
from typing import Any, Callable, FrozenSet, Iterator, List, Set, Tuple
import jinja2
import jinja2.nodes
from qutebrowser.misc import debugcachestats
from qutebrowser.qt.core import QUrl
from qutebrowser.utils import log, qtutils, resources, urlutils, utils
html_fallback = """
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Error while loading template</title>
</head>
<body>
<p><span style="font-size:120%;color:red">
The %FILE% template could not be found!<br>
Please check your qutebrowser installation
</span><br>
%ERROR%
</p>
</body>
</html>
"""
class Loader(jinja2.BaseLoader):
"""Jinja loader which uses resources.read_file to load templates.
Attributes:
_subdir: The subdirectory to find templates in.
"""
def __init__(self, subdir: str) -> None:
self._subdir = subdir
def get_source(
self, _env: jinja2.Environment, template: str
) -> Tuple[str, str, Callable[[], bool]]:
path = os.path.join(self._subdir, template)
try:
source = resources.read_file(path)
except OSError as e:
source = html_fallback.replace("%ERROR%", html.escape(str(e)))
source = source.replace("%FILE%", html.escape(template))
log.misc.exception(
"The {} template could not be loaded from {}".format(template, path)
)
# Currently we don't implement auto-reloading, so we always return True
# for up-to-date.
return source, path, lambda: True
class Environment(jinja2.Environment):
"""Our own jinja environment which is more strict."""
def __init__(self) -> None:
super().__init__(
loader=Loader("html"),
autoescape=lambda _name: self._autoescape,
undefined=jinja2.StrictUndefined,
)
self.globals["resource_url"] = self._resource_url
self.globals["file_url"] = urlutils.file_url
self.globals["data_url"] = self._data_url
self.globals["qcolor_to_qsscolor"] = qtutils.qcolor_to_qsscolor
self._autoescape = True
@contextlib.contextmanager
def no_autoescape(self) -> Iterator[None]:
"""Context manager to temporarily turn off autoescaping."""
self._autoescape = False
yield
self._autoescape = True
def _resource_url(self, path: str) -> str:
"""Load qutebrowser resource files.
Arguments:
path: The relative path to the resource.
"""
assert not posixpath.isabs(path), path
url = QUrl("qute://resource")
url.setPath("/" + path)
urlutils.ensure_valid(url)
urlstr = url.toString(urlutils.FormatOption.ENCODED)
return urlstr
def _data_url(self, path: str) -> str:
"""Get a data: url for the broken qutebrowser logo."""
data = resources.read_file_binary(path)
mimetype = utils.guess_mimetype(path)
return urlutils.data_url(mimetype, data).toString()
def getattr(self, obj: Any, attribute: str) -> Any:
"""Override jinja's getattr() to be less clever.
This means it doesn't fall back to __getitem__, and it doesn't hide
AttributeError.
"""
return getattr(obj, attribute)
def render(template: str, **kwargs: Any) -> str:
"""Render the given template and pass the given arguments to it."""
return environment.get_template(template).render(**kwargs)
environment = Environment()
js_environment = jinja2.Environment(loader=Loader("javascript"))
@debugcachestats.register()
@functools.lru_cache
def template_config_variables(template: str) -> FrozenSet[str]:
"""Return the config variables used in the template."""
unvisted_nodes: List[jinja2.nodes.Node] = [environment.parse(template)]
result: Set[str] = set()
while unvisted_nodes:
node = unvisted_nodes.pop()
if not isinstance(node, jinja2.nodes.Getattr):
unvisted_nodes.extend(node.iter_child_nodes())
continue
# List of attribute names in reverse order.
# For example it's ['ab', 'c', 'd'] for 'conf.d.c.ab'.
attrlist: List[str] = []
while isinstance(node, jinja2.nodes.Getattr):
attrlist.append(node.attr)
node = node.node
if isinstance(node, jinja2.nodes.Name):
if node.name == "conf":
result.add(".".join(reversed(attrlist)))
# otherwise, the node is a Name node so it doesn't have any
# child nodes
else:
unvisted_nodes.append(node)
from qutebrowser.config import config
for option in result:
config.instance.ensure_has_opt(option)
return frozenset(result)
|
qtui | common | # Copyright (C) 2018 Thomas Hess <thomas.hess@udo.edu>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import enum
import functools
import os.path
import pathlib
import re
import autokey.configmanager.configmanager_constants as cm_constants
from autokey.logger import get_logger
from PyQt5 import uic
from PyQt5.QtCore import QFile, QSize
from PyQt5.QtGui import QColor, QFont, QFontDatabase, QIcon, QPainter, QPixmap
from PyQt5.QtSvg import QSvgRenderer
from PyQt5.QtWidgets import QLabel, QMessageBox
try:
import autokey.qtui.compiled_resources
except ModuleNotFoundError:
import warnings
# No compiled resource module found. Load bare files from disk instead.
warn_msg = (
"Compiled Qt resources file not found. If autokey is launched directly from the source directory, "
"this is expected and harmless. If not, this indicates a failure in the resource compilation."
)
warnings.warn(warn_msg)
RESOURCE_PATH_PREFIX = str(pathlib.Path(__file__).resolve().parent / "resources")
local_path = pathlib.Path(__file__).resolve().parent.parent.parent.parent / "config"
if local_path.exists():
# This is running from the source directory, thus icons are in <root>/config
ICON_PATH_PREFIX = str(local_path)
else:
# This is an installation. Icons reside in autokey/qtui/resources/icons, where they were copied by setup.py
ICON_PATH_PREFIX = str(
pathlib.Path(__file__).resolve().parent / "resources" / "icons"
)
del local_path
else:
import atexit
# Compiled resources found, so use it.
RESOURCE_PATH_PREFIX = ":"
ICON_PATH_PREFIX = ":/icons"
atexit.register(autokey.qtui.compiled_resources.qCleanupResources)
logger = get_logger(__name__)
del get_logger
EMPTY_FIELD_REGEX = re.compile(r"^ *$", re.UNICODE)
def monospace_font() -> QFont:
"""
Returns a monospace font used in the code editor widgets.
:return: QFont instance having a monospace font.
"""
font = QFontDatabase.systemFont(QFontDatabase.FixedFont)
# font = QFont("monospace")
# font.setStyleHint(QFont.Monospace)
return font
def set_url_label(label: QLabel, path: str):
# In both cases, only replace the first occurence.
if path.startswith(cm_constants.CONFIG_DEFAULT_FOLDER):
text = path.replace(cm_constants.CONFIG_DEFAULT_FOLDER, "(Default folder)", 1)
else:
# if bob has added a path '/home/bob/some/folder/home/bobbie/foo/' to autokey, the desired replacement text
# is '~/some/folder/home/bobbie/foo/' and NOT '~/some/folder~bie/foo/'
text = path.replace(os.path.expanduser("~"), "~", 1)
url = "file://" + path
if not label.openExternalLinks():
# The openExternalLinks property is not set in the UI file, so fail fast instead of doing workarounds.
raise ValueError(
"QLabel with disabled openExternalLinks property used to display an external URL. "
"This won’t work, so fail now. Label: {}, Text: {}".format(
label, label.text()
)
)
# TODO elide text?
label.setText("""<a href="{url}">{text}</a>""".format(url=url, text=text))
def validate(expression, message, widget, parent):
if not expression:
QMessageBox.critical(parent, message, message)
if widget is not None:
widget.setFocus()
return expression
class AutoKeyIcon(enum.Enum):
AUTOKEY = "autokey.png"
AUTOKEY_SCALABLE = "autokey.svg"
SYSTEM_TRAY = "autokey-status.svg"
SYSTEM_TRAY_DARK = "autokey-status-dark.svg"
SYSTEM_TRAY_ERROR = "autokey-status-error.svg"
@functools.lru_cache()
def load_icon(name: AutoKeyIcon) -> QIcon:
file_path = ICON_PATH_PREFIX + "/" + name.value
icon = QIcon(file_path)
if not icon.availableSizes() and file_path.endswith(".svg"):
# FIXME: Work around Qt Bug: https://bugreports.qt.io/browse/QTBUG-63187
# Manually render the SVG to some common icon sizes.
icon = QIcon() # Discard the bugged QIcon
renderer = QSvgRenderer(file_path)
for size in (16, 22, 24, 32, 64, 128):
pixmap = QPixmap(QSize(size, size))
pixmap.fill(QColor(255, 255, 255, 0))
renderer.render(QPainter(pixmap))
icon.addPixmap(pixmap)
return icon
def _get_ui_qfile(name: str):
"""
Returns an opened, read-only QFile for the given QtDesigner UI file name. Expects a plain name like "centralwidget".
The file ending and resource path is added automatically.
Raises FileNotFoundError, if the given ui file does not exist.
:param name:
:return:
"""
file_path = RESOURCE_PATH_PREFIX + "/ui/{ui_file_name}.ui".format(ui_file_name=name)
file = QFile(file_path)
if not file.exists():
raise FileNotFoundError("UI file not found: " + file_path)
file.open(QFile.ReadOnly)
return file
def load_ui_from_file(name: str):
"""
Returns a tuple from uic.loadUiType(), loading the ui file with the given name.
:param name:
:return:
"""
ui_file = _get_ui_qfile(name)
try:
base_type = uic.loadUiType(ui_file, from_imports=True)
finally:
ui_file.close()
return base_type
"""
This renamed function is supposed to be used during class definition to make the intention clear.
Usage example:
class SomeWidget(*inherits_from_ui_file_with_name("SomeWidgetUiFileName")):
def __init__(self, parent):
super(SomeWidget, self).__init__(parent)
self.setupUi(self)
"""
inherits_from_ui_file_with_name = load_ui_from_file
|
workflows | batch_exports | import collections.abc
import csv
import dataclasses
import datetime as dt
import gzip
import json
import logging
import logging.handlers
import queue
import tempfile
import typing
import uuid
from string import Template
import brotli
from asgiref.sync import sync_to_async
from posthog.batch_exports.service import (
BatchExportsInputsProtocol,
create_batch_export_run,
update_batch_export_run_status,
)
from posthog.kafka_client.client import KafkaProducer
from posthog.kafka_client.topics import KAFKA_LOG_ENTRIES
from temporalio import activity, workflow
SELECT_QUERY_TEMPLATE = Template(
"""
SELECT $fields
FROM events
WHERE
-- These 'timestamp' checks are a heuristic to exploit the sort key.
-- Ideally, we need a schema that serves our needs, i.e. with a sort key on the _timestamp field used for batch exports.
-- As a side-effect, this heuristic will discard historical loads older than 2 days.
timestamp >= toDateTime64({data_interval_start}, 6, 'UTC') - INTERVAL 2 DAY
AND timestamp < toDateTime64({data_interval_end}, 6, 'UTC') + INTERVAL 1 DAY
AND COALESCE(inserted_at, _timestamp) >= toDateTime64({data_interval_start}, 6, 'UTC')
AND COALESCE(inserted_at, _timestamp) < toDateTime64({data_interval_end}, 6, 'UTC')
AND team_id = {team_id}
$exclude_events
$order_by
$format
"""
)
async def get_rows_count(
client,
team_id: int,
interval_start: str,
interval_end: str,
exclude_events: collections.abc.Iterable[str] | None = None,
) -> int:
data_interval_start_ch = dt.datetime.fromisoformat(interval_start).strftime(
"%Y-%m-%d %H:%M:%S"
)
data_interval_end_ch = dt.datetime.fromisoformat(interval_end).strftime(
"%Y-%m-%d %H:%M:%S"
)
if exclude_events:
exclude_events_statement = "AND event NOT IN {exclude_events}"
events_to_exclude_tuple = tuple(exclude_events)
else:
exclude_events_statement = ""
events_to_exclude_tuple = ()
query = SELECT_QUERY_TEMPLATE.substitute(
fields="count(DISTINCT event, cityHash64(distinct_id), cityHash64(uuid)) as count",
order_by="",
format="",
exclude_events=exclude_events_statement,
)
count = await client.read_query(
query,
query_parameters={
"team_id": team_id,
"data_interval_start": data_interval_start_ch,
"data_interval_end": data_interval_end_ch,
"exclude_events": events_to_exclude_tuple,
},
)
if count is None or len(count) == 0:
raise ValueError(
"Unexpected result from ClickHouse: `None` returned for count query"
)
return int(count)
FIELDS = """
DISTINCT ON (event, cityHash64(distinct_id), cityHash64(uuid))
toString(uuid) as uuid,
team_id,
timestamp,
inserted_at,
created_at,
event,
properties,
-- Point in time identity fields
toString(distinct_id) as distinct_id,
toString(person_id) as person_id,
person_properties,
-- Autocapture fields
elements_chain
"""
def get_results_iterator(
client,
team_id: int,
interval_start: str,
interval_end: str,
exclude_events: collections.abc.Iterable[str] | None = None,
) -> typing.Generator[dict[str, typing.Any], None, None]:
data_interval_start_ch = dt.datetime.fromisoformat(interval_start).strftime(
"%Y-%m-%d %H:%M:%S"
)
data_interval_end_ch = dt.datetime.fromisoformat(interval_end).strftime(
"%Y-%m-%d %H:%M:%S"
)
if exclude_events:
exclude_events_statement = "AND event NOT IN {exclude_events}"
events_to_exclude_tuple = tuple(exclude_events)
else:
exclude_events_statement = ""
events_to_exclude_tuple = ()
query = SELECT_QUERY_TEMPLATE.substitute(
fields=FIELDS,
order_by="ORDER BY inserted_at",
format="FORMAT ArrowStream",
exclude_events=exclude_events_statement,
)
for batch in client.stream_query_as_arrow(
query,
query_parameters={
"team_id": team_id,
"data_interval_start": data_interval_start_ch,
"data_interval_end": data_interval_end_ch,
"exclude_events": events_to_exclude_tuple,
},
):
yield from iter_batch_records(batch)
def iter_batch_records(batch) -> typing.Generator[dict[str, typing.Any], None, None]:
"""Iterate over records of a batch.
During iteration, we yield dictionaries with all fields used by PostHog BatchExports.
Args:
batch: A record batch of rows.
"""
for record in batch.to_pylist():
properties = record.get("properties")
person_properties = record.get("person_properties")
properties = json.loads(properties) if properties else None
# This is not backwards compatible, as elements should contain a parsed array.
# However, parsing elements_chain is a mess, so we json.dump to at least be compatible with
# schemas that use JSON-like types.
elements = json.dumps(record.get("elements_chain").decode())
record = {
"created_at": record.get("created_at").isoformat(),
"distinct_id": record.get("distinct_id").decode(),
"elements": elements,
"elements_chain": record.get("elements_chain").decode(),
"event": record.get("event").decode(),
"inserted_at": record.get("inserted_at").isoformat()
if record.get("inserted_at")
else None,
"ip": properties.get("$ip", None) if properties else None,
"person_id": record.get("person_id").decode(),
"person_properties": json.loads(person_properties)
if person_properties
else None,
"set": properties.get("$set", None) if properties else None,
"set_once": properties.get("$set_once", None) if properties else None,
"properties": properties,
# Kept for backwards compatibility, but not exported anymore.
"site_url": "",
"team_id": record.get("team_id"),
"timestamp": record.get("timestamp").isoformat(),
"uuid": record.get("uuid").decode(),
}
yield record
def get_data_interval(
interval: str, data_interval_end: str | None
) -> tuple[dt.datetime, dt.datetime]:
"""Return the start and end of an export's data interval.
Args:
interval: The interval of the BatchExport associated with this Workflow.
data_interval_end: The optional end of the BatchExport period. If not included, we will
attempt to extract it from Temporal SearchAttributes.
Raises:
TypeError: If when trying to obtain the data interval end we run into non-str types.
ValueError: If passing an unsupported interval value.
Returns:
A tuple of two dt.datetime indicating start and end of the data_interval.
"""
data_interval_end_str = data_interval_end
if not data_interval_end_str:
data_interval_end_search_attr = workflow.info().search_attributes.get(
"TemporalScheduledStartTime"
)
# These two if-checks are a bit pedantic, but Temporal SDK is heavily typed.
# So, they exist to make mypy happy.
if data_interval_end_search_attr is None:
msg = (
"Expected 'TemporalScheduledStartTime' of type 'list[str]' or 'list[datetime], found 'NoneType'."
"This should be set by the Temporal Schedule unless triggering workflow manually."
"In the latter case, ensure 'S3BatchExportInputs.data_interval_end' is set."
)
raise TypeError(msg)
# Failing here would perhaps be a bug in Temporal.
if isinstance(data_interval_end_search_attr[0], str):
data_interval_end_str = data_interval_end_search_attr[0]
data_interval_end_dt = dt.datetime.fromisoformat(data_interval_end_str)
elif isinstance(data_interval_end_search_attr[0], dt.datetime):
data_interval_end_dt = data_interval_end_search_attr[0]
else:
msg = (
f"Expected search attribute to be of type 'str' or 'datetime' found '{data_interval_end_search_attr[0]}' "
f"of type '{type(data_interval_end_search_attr[0])}'."
)
raise TypeError(msg)
else:
data_interval_end_dt = dt.datetime.fromisoformat(data_interval_end_str)
if interval == "hour":
data_interval_start_dt = data_interval_end_dt - dt.timedelta(hours=1)
elif interval == "day":
data_interval_start_dt = data_interval_end_dt - dt.timedelta(days=1)
elif interval == "every-5-minutes":
data_interval_start_dt = data_interval_end_dt - dt.timedelta(minutes=5)
elif interval == "every-10-minutes":
data_interval_start_dt = data_interval_end_dt - dt.timedelta(minutes=10)
else:
raise ValueError(f"Unsupported interval: '{interval}'")
return (data_interval_start_dt, data_interval_end_dt)
def json_dumps_bytes(d, encoding="utf-8") -> bytes:
return json.dumps(d).encode(encoding)
class BatchExportTemporaryFile:
"""A TemporaryFile used to as an intermediate step while exporting data.
This class does not implement the file-like interface but rather passes any calls
to the underlying tempfile.NamedTemporaryFile. We do override 'write' methods
to allow tracking bytes and records.
"""
def __init__(
self,
mode: str = "w+b",
buffering=-1,
compression: str | None = None,
encoding: str | None = None,
newline: str | None = None,
suffix: str | None = None,
prefix: str | None = None,
dir: str | None = None,
*,
errors: str | None = None,
):
self._file = tempfile.NamedTemporaryFile(
mode=mode,
encoding=encoding,
newline=newline,
buffering=buffering,
suffix=suffix,
prefix=prefix,
dir=dir,
errors=errors,
)
self.compression = compression
self.bytes_total = 0
self.records_total = 0
self.bytes_since_last_reset = 0
self.records_since_last_reset = 0
self._brotli_compressor = None
def __getattr__(self, name):
"""Pass get attr to underlying tempfile.NamedTemporaryFile."""
return self._file.__getattr__(name)
def __enter__(self):
"""Context-manager protocol enter method."""
self._file.__enter__()
return self
def __exit__(self, exc, value, tb):
"""Context-manager protocol exit method."""
return self._file.__exit__(exc, value, tb)
def __iter__(self):
yield from self._file
@property
def brotli_compressor(self):
if self._brotli_compressor is None:
self._brotli_compressor = brotli.Compressor()
return self._brotli_compressor
def compress(self, content: bytes | str) -> bytes:
if isinstance(content, str):
encoded = content.encode("utf-8")
else:
encoded = content
match self.compression:
case "gzip":
return gzip.compress(encoded)
case "brotli":
self.brotli_compressor.process(encoded)
return self.brotli_compressor.flush()
case None:
return encoded
case _:
raise ValueError(f"Unsupported compression: '{self.compression}'")
def write(self, content: bytes | str):
"""Write bytes to underlying file keeping track of how many bytes were written."""
compressed_content = self.compress(content)
if "b" in self.mode:
result = self._file.write(compressed_content)
else:
result = self._file.write(compressed_content.decode("utf-8"))
self.bytes_total += result
self.bytes_since_last_reset += result
return result
def write_records_to_jsonl(self, records):
"""Write records to a temporary file as JSONL."""
jsonl_dump = b"\n".join(map(json_dumps_bytes, records))
if len(records) == 1:
jsonl_dump += b"\n"
result = self.write(jsonl_dump)
self.records_total += len(records)
self.records_since_last_reset += len(records)
return result
def write_records_to_csv(
self,
records,
fieldnames: None | collections.abc.Sequence[str] = None,
extrasaction: typing.Literal["raise", "ignore"] = "ignore",
delimiter: str = ",",
quotechar: str = '"',
escapechar: str = "\\",
quoting=csv.QUOTE_NONE,
):
"""Write records to a temporary file as CSV."""
if len(records) == 0:
return
if fieldnames is None:
fieldnames = list(records[0].keys())
writer = csv.DictWriter(
self,
fieldnames=fieldnames,
extrasaction=extrasaction,
delimiter=delimiter,
quotechar=quotechar,
escapechar=escapechar,
quoting=quoting,
)
writer.writerows(records)
self.records_total += len(records)
self.records_since_last_reset += len(records)
def write_records_to_tsv(
self,
records,
fieldnames: None | list[str] = None,
extrasaction: typing.Literal["raise", "ignore"] = "ignore",
quotechar: str = '"',
escapechar: str = "\\",
quoting=csv.QUOTE_NONE,
):
"""Write records to a temporary file as TSV."""
return self.write_records_to_csv(
records,
fieldnames=fieldnames,
extrasaction=extrasaction,
delimiter="\t",
quotechar=quotechar,
escapechar=escapechar,
quoting=quoting,
)
def rewind(self):
"""Rewind the file before reading it."""
if self.compression == "brotli":
result = self._file.write(self.brotli_compressor.finish())
self.bytes_total += result
self.bytes_since_last_reset += result
self._brotli_compressor = None
self._file.seek(0)
def reset(self):
"""Reset underlying file by truncating it.
Also resets the tracker attributes for bytes and records since last reset.
"""
self._file.seek(0)
self._file.truncate()
self.bytes_since_last_reset = 0
self.records_since_last_reset = 0
class BatchExportLoggerAdapter(logging.LoggerAdapter):
"""Adapter that adds batch export details to log records."""
def __init__(
self,
logger: logging.Logger,
extra=None,
) -> None:
"""Create the logger adapter."""
super().__init__(logger, extra or {})
def process(
self, msg: str, kwargs
) -> tuple[typing.Any, collections.abc.MutableMapping[str, typing.Any]]:
"""Override to add batch exports details."""
workflow_id = None
workflow_run_id = None
attempt = None
try:
activity_info = activity.info()
except RuntimeError:
pass
else:
workflow_run_id = activity_info.workflow_run_id
workflow_id = activity_info.workflow_id
attempt = activity_info.attempt
try:
workflow_info = workflow.info()
except RuntimeError:
pass
else:
workflow_run_id = workflow_info.run_id
workflow_id = workflow_info.workflow_id
attempt = workflow_info.attempt
if workflow_id is None or workflow_run_id is None or attempt is None:
return (None, {})
# This works because the WorkflowID is made up like f"{batch_export_id}-{data_interval_end}"
# Since {data_interval_date} is an iso formatted datetime string, it has two '-' to separate the
# date. Plus one more leaves us at the end of {batch_export_id}.
batch_export_id = workflow_id.rsplit("-", maxsplit=3)[0]
extra = kwargs.get("extra", None) or {}
extra["workflow_id"] = workflow_id
extra["batch_export_id"] = batch_export_id
extra["workflow_run_id"] = workflow_run_id
extra["attempt"] = attempt
if isinstance(self.extra, dict):
extra = extra | self.extra
kwargs["extra"] = extra
return (msg, kwargs)
@property
def base_logger(self) -> logging.Logger:
"""Underlying logger usable for actions such as adding handlers/formatters."""
return self.logger
class BatchExportsLogRecord(logging.LogRecord):
team_id: int
batch_export_id: str
workflow_run_id: str
attempt: int
class KafkaLoggingHandler(logging.Handler):
def __init__(self, topic, key=None):
super().__init__()
self.producer = KafkaProducer()
self.topic = topic
self.key = key
def emit(self, record):
if record.name == "kafka":
return
# This is a lie, but as long as this handler is used together
# with BatchExportLoggerAdapter we should be fine.
# This is definitely cheaper than a bunch if checks for attributes.
record = typing.cast(BatchExportsLogRecord, record)
msg = self.format(record)
data = {
"instance_id": record.workflow_run_id,
"level": record.levelname,
"log_source": "batch_exports",
"log_source_id": record.batch_export_id,
"message": msg,
"team_id": record.team_id,
"timestamp": dt.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S.%f"),
}
try:
future = self.producer.produce(topic=self.topic, data=data, key=self.key)
future.get(timeout=1)
except Exception as e:
logging.exception(
"Failed to produce log to Kafka topic %s", self.topic, exc_info=e
)
def close(self):
self.producer.close()
logging.Handler.close(self)
LOG_QUEUE: queue.Queue = queue.Queue(-1)
QUEUE_HANDLER = logging.handlers.QueueHandler(LOG_QUEUE)
QUEUE_HANDLER.setLevel(logging.DEBUG)
KAFKA_HANDLER = KafkaLoggingHandler(topic=KAFKA_LOG_ENTRIES)
KAFKA_HANDLER.setLevel(logging.DEBUG)
QUEUE_LISTENER = logging.handlers.QueueListener(LOG_QUEUE, KAFKA_HANDLER)
logger = logging.getLogger(__name__)
logger.addHandler(QUEUE_HANDLER)
logger.setLevel(logging.DEBUG)
def get_batch_exports_logger(
inputs: BatchExportsInputsProtocol
) -> BatchExportLoggerAdapter:
"""Return a logger for BatchExports."""
# Need a type comment as _thread is private.
if QUEUE_LISTENER._thread is None: # type: ignore
QUEUE_LISTENER.start()
adapter = BatchExportLoggerAdapter(logger, {"team_id": inputs.team_id})
return adapter
@dataclasses.dataclass
class CreateBatchExportRunInputs:
"""Inputs to the create_export_run activity.
Attributes:
team_id: The id of the team the BatchExportRun belongs to.
batch_export_id: The id of the BatchExport this BatchExportRun belongs to.
data_interval_start: Start of this BatchExportRun's data interval.
data_interval_end: End of this BatchExportRun's data interval.
"""
team_id: int
batch_export_id: str
data_interval_start: str
data_interval_end: str
status: str = "Starting"
@activity.defn
async def create_export_run(inputs: CreateBatchExportRunInputs) -> str:
"""Activity that creates an BatchExportRun.
Intended to be used in all export workflows, usually at the start, to create a model
instance to represent them in our database.
"""
logger = get_batch_exports_logger(inputs=inputs)
logger.info(f"Creating BatchExportRun model instance in team {inputs.team_id}.")
# 'sync_to_async' type hints are fixed in asgiref>=3.4.1
# But one of our dependencies is pinned to asgiref==3.3.2.
# Remove these comments once we upgrade.
run = await sync_to_async(create_batch_export_run)( # type: ignore
batch_export_id=uuid.UUID(inputs.batch_export_id),
data_interval_start=inputs.data_interval_start,
data_interval_end=inputs.data_interval_end,
status=inputs.status,
)
logger.info(f"Created BatchExportRun {run.id} in team {inputs.team_id}.")
return str(run.id)
@dataclasses.dataclass
class UpdateBatchExportRunStatusInputs:
"""Inputs to the update_export_run_status activity."""
id: str
status: str
latest_error: str | None = None
@activity.defn
async def update_export_run_status(inputs: UpdateBatchExportRunStatusInputs):
"""Activity that updates the status of an BatchExportRun."""
await sync_to_async(update_batch_export_run_status)(
run_id=uuid.UUID(inputs.id),
status=inputs.status,
latest_error=inputs.latest_error,
) # type: ignore
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.