section stringlengths 2 30 | filename stringlengths 1 82 | text stringlengths 783 28M |
|---|---|---|
controllers | buttons | # The contents of this file are subject to the Common Public Attribution
# License Version 1.0. (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://code.reddit.com/LICENSE. The License is based on the Mozilla Public
# License Version 1.1, but Sections 14 and 15 have been added to cover use of
# software over a computer network and provide for limited attribution for the
# Original Developer. In addition, Exhibit A has been modified to be consistent
# with Exhibit B.
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for
# the specific language governing rights and limitations under the License.
#
# The Original Code is reddit.
#
# The Original Developer is the Initial Developer. The Initial Developer of
# the Original Code is reddit Inc.
#
# All portions of the code written by reddit are Copyright (c) 2006-2015 reddit
# Inc. All Rights Reserved.
###############################################################################
from pylons import request, response
from pylons import tmpl_context as c
from pylons.i18n import _
from r2.lib.pages import BoringPage, ButtonDemoPanel, ButtonLite, WidgetDemoPanel
from r2.lib.pages.things import wrap_links
from r2.lib.validator import *
from r2.models import *
from reddit_base import RedditController, UnloggedUser
class ButtonsController(RedditController):
def get_wrapped_link(self, url, link=None, wrapper=None):
try:
links = []
if link:
links = [link]
else:
sr = None if isinstance(c.site, FakeSubreddit) else c.site
try:
links = Link._by_url(url, sr)
except NotFound:
pass
if links:
kw = {}
if wrapper:
links = wrap_links(links, wrapper=wrapper)
else:
links = wrap_links(links)
links = list(links)
links = max(links, key=lambda x: x._score) if links else None
if not links and wrapper:
return wrapper(None)
return links
# note: even if _by_url successed or a link was passed in,
# it is possible link_listing.things is empty if the
# link(s) is/are members of a private reddit
# return the link with the highest score (if more than 1)
except:
# we don't want to return 500s in other people's pages.
import traceback
g.log.debug("FULLPATH: get_link error in buttons code")
g.log.debug(traceback.format_exc())
if wrapper:
return wrapper(None)
@validate(buttontype=VInt("t", 1, 5))
def GET_button_embed(self, buttontype):
if not buttontype:
abort(404)
return self.redirect("/static/button/button%s.js" % buttontype, code=301)
@validate(
buttonimage=VInt("i", 0, 14),
title=nop("title"),
url=VSanitizedUrl("url"),
newwindow=VBoolean("newwindow", default=False),
styled=VBoolean("styled", default=True),
)
def GET_button_lite(self, buttonimage, title, url, styled, newwindow):
c.user = UnloggedUser([c.lang])
c.user_is_loggedin = False
c.render_style = "js"
if not url:
url = request.referer
def builder_wrapper(thing=None):
kw = {}
if not thing:
kw["url"] = url
kw["title"] = title
return ButtonLite(
thing,
image=1 if buttonimage is None else buttonimage,
target="_new" if newwindow else "_parent",
styled=styled,
**kw,
)
bjs = self.get_wrapped_link(url, wrapper=builder_wrapper)
response.content_type = "text/javascript"
return bjs.render()
def GET_button_demo_page(self):
# no buttons for domain listings -> redirect to top level
if isinstance(c.site, DomainSR):
return self.redirect("/buttons")
return BoringPage(
_("reddit buttons"), show_sidebar=False, content=ButtonDemoPanel()
).render()
def GET_widget_demo_page(self):
return BoringPage(
_("reddit widget"), show_sidebar=False, content=WidgetDemoPanel()
).render()
|
PeerDb | PeerDbPlugin | import atexit
import random
import sqlite3
import time
import gevent
from Plugin import PluginManager
@PluginManager.registerTo("ContentDb")
class ContentDbPlugin(object):
def __init__(self, *args, **kwargs):
atexit.register(self.saveAllPeers)
super(ContentDbPlugin, self).__init__(*args, **kwargs)
def getSchema(self):
schema = super(ContentDbPlugin, self).getSchema()
schema["tables"]["peer"] = {
"cols": [
["site_id", "INTEGER REFERENCES site (site_id) ON DELETE CASCADE"],
["address", "TEXT NOT NULL"],
["port", "INTEGER NOT NULL"],
["hashfield", "BLOB"],
["reputation", "INTEGER NOT NULL"],
["time_added", "INTEGER NOT NULL"],
["time_found", "INTEGER NOT NULL"],
],
"indexes": [
"CREATE UNIQUE INDEX peer_key ON peer (site_id, address, port)"
],
"schema_changed": 2,
}
return schema
def loadPeers(self, site):
s = time.time()
site_id = self.site_ids.get(site.address)
res = self.execute(
"SELECT * FROM peer WHERE site_id = :site_id", {"site_id": site_id}
)
num = 0
num_hashfield = 0
for row in res:
peer = site.addPeer(str(row["address"]), row["port"])
if not peer: # Already exist
continue
if row["hashfield"]:
peer.hashfield.replaceFromBytes(row["hashfield"])
num_hashfield += 1
peer.time_added = row["time_added"]
peer.time_found = row["time_found"]
peer.reputation = row["reputation"]
if row["address"].endswith(".onion"):
peer.reputation = (
peer.reputation / 2 - 1
) # Onion peers less likely working
num += 1
if num_hashfield:
site.content_manager.has_optional_files = True
site.log.debug(
"%s peers (%s with hashfield) loaded in %.3fs"
% (num, num_hashfield, time.time() - s)
)
def iteratePeers(self, site):
site_id = self.site_ids.get(site.address)
for key, peer in list(site.peers.items()):
address, port = key.rsplit(":", 1)
if peer.has_hashfield:
hashfield = sqlite3.Binary(peer.hashfield.tobytes())
else:
hashfield = ""
yield (
site_id,
address,
port,
hashfield,
peer.reputation,
int(peer.time_added),
int(peer.time_found),
)
def savePeers(self, site, spawn=False):
if spawn:
# Save peers every hour (+random some secs to not update very site at same time)
site.greenlet_manager.spawnLater(
60 * 60 + random.randint(0, 60), self.savePeers, site, spawn=True
)
if not site.peers:
site.log.debug("Peers not saved: No peers found")
return
s = time.time()
site_id = self.site_ids.get(site.address)
cur = self.getCursor()
try:
cur.execute(
"DELETE FROM peer WHERE site_id = :site_id", {"site_id": site_id}
)
cur.executemany(
"INSERT INTO peer (site_id, address, port, hashfield, reputation, time_added, time_found) VALUES (?, ?, ?, ?, ?, ?, ?)",
self.iteratePeers(site),
)
except Exception as err:
site.log.error("Save peer error: %s" % err)
site.log.debug("Peers saved in %.3fs" % (time.time() - s))
def initSite(self, site):
super(ContentDbPlugin, self).initSite(site)
site.greenlet_manager.spawnLater(0.5, self.loadPeers, site)
site.greenlet_manager.spawnLater(60 * 60, self.savePeers, site, spawn=True)
def saveAllPeers(self):
for site in list(self.sites.values()):
try:
self.savePeers(site)
except Exception as err:
site.log.error("Save peer error: %s" % err)
|
extractor | libraryofcongress | # coding: utf-8
from __future__ import unicode_literals
import re
from ..utils import determine_ext, float_or_none, int_or_none, parse_filesize
from .common import InfoExtractor
class LibraryOfCongressIE(InfoExtractor):
IE_NAME = "loc"
IE_DESC = "Library of Congress"
_VALID_URL = r"https?://(?:www\.)?loc\.gov/(?:item/|today/cyberlc/feature_wdesc\.php\?.*\brec=)(?P<id>[0-9a-z_.]+)"
_TESTS = [
{
# embedded via <div class="media-player"
"url": "http://loc.gov/item/90716351/",
"md5": "6ec0ae8f07f86731b1b2ff70f046210a",
"info_dict": {
"id": "90716351",
"ext": "mp4",
"title": "Pa's trip to Mars",
"duration": 0,
"view_count": int,
},
},
{
# webcast embedded via mediaObjectId
"url": "https://www.loc.gov/today/cyberlc/feature_wdesc.php?rec=5578",
"info_dict": {
"id": "5578",
"ext": "mp4",
"title": "Help! Preservation Training Needs Here, There & Everywhere",
"duration": 3765,
"view_count": int,
"subtitles": "mincount:1",
},
"params": {
"skip_download": True,
},
},
{
# with direct download links
"url": "https://www.loc.gov/item/78710669/",
"info_dict": {
"id": "78710669",
"ext": "mp4",
"title": "La vie et la passion de Jesus-Christ",
"duration": 0,
"view_count": int,
"formats": "mincount:4",
},
"params": {
"skip_download": True,
},
},
{
"url": "https://www.loc.gov/item/ihas.200197114/",
"only_matching": True,
},
{
"url": "https://www.loc.gov/item/afc1981005_afs20503/",
"only_matching": True,
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
media_id = self._search_regex(
(
r'id=(["\'])media-player-(?P<id>.+?)\1',
r'<video[^>]+id=(["\'])uuid-(?P<id>.+?)\1',
r'<video[^>]+data-uuid=(["\'])(?P<id>.+?)\1',
r'mediaObjectId\s*:\s*(["\'])(?P<id>.+?)\1',
r'data-tab="share-media-(?P<id>[0-9A-F]{32})"',
),
webpage,
"media id",
group="id",
)
data = self._download_json(
"https://media.loc.gov/services/v1/media?id=%s&context=json" % media_id,
media_id,
)["mediaObject"]
derivative = data["derivatives"][0]
media_url = derivative["derivativeUrl"]
title = (
derivative.get("shortName")
or data.get("shortName")
or self._og_search_title(webpage)
)
# Following algorithm was extracted from setAVSource js function
# found in webpage
media_url = media_url.replace("rtmp", "https")
is_video = data.get("mediaType", "v").lower() == "v"
ext = determine_ext(media_url)
if ext not in ("mp4", "mp3"):
media_url += ".mp4" if is_video else ".mp3"
formats = []
if "/vod/mp4:" in media_url:
formats.append(
{
"url": media_url.replace("/vod/mp4:", "/hls-vod/media/") + ".m3u8",
"format_id": "hls",
"ext": "mp4",
"protocol": "m3u8_native",
"quality": 1,
}
)
http_format = {
"url": re.sub(r"(://[^/]+/)(?:[^/]+/)*(?:mp4|mp3):", r"\1", media_url),
"format_id": "http",
"quality": 1,
}
if not is_video:
http_format["vcodec"] = "none"
formats.append(http_format)
download_urls = set()
for m in re.finditer(
r'<option[^>]+value=(["\'])(?P<url>.+?)\1[^>]+data-file-download=[^>]+>\s*(?P<id>.+?)(?:(?: |\s+)\((?P<size>.+?)\))?\s*<',
webpage,
):
format_id = m.group("id").lower()
if format_id in ("gif", "jpeg"):
continue
download_url = m.group("url")
if download_url in download_urls:
continue
download_urls.add(download_url)
formats.append(
{
"url": download_url,
"format_id": format_id,
"filesize_approx": parse_filesize(m.group("size")),
}
)
self._sort_formats(formats)
duration = float_or_none(data.get("duration"))
view_count = int_or_none(data.get("viewCount"))
subtitles = {}
cc_url = data.get("ccUrl")
if cc_url:
subtitles.setdefault("en", []).append(
{
"url": cc_url,
"ext": "ttml",
}
)
return {
"id": video_id,
"title": title,
"thumbnail": self._og_search_thumbnail(webpage, default=None),
"duration": duration,
"view_count": view_count,
"formats": formats,
"subtitles": subtitles,
}
|
model | group_extra | # encoding: utf-8
from typing import Any
import ckan.model.core as core
import ckan.model.domain_object as domain_object
import ckan.model.group as group
import ckan.model.meta as meta
import ckan.model.types as _types
from sqlalchemy import Column, ForeignKey, Table, orm, types
from sqlalchemy.ext.associationproxy import association_proxy
__all__ = ["GroupExtra", "group_extra_table"]
group_extra_table = Table(
"group_extra",
meta.metadata,
Column("id", types.UnicodeText, primary_key=True, default=_types.make_uuid),
Column("group_id", types.UnicodeText, ForeignKey("group.id")),
Column("key", types.UnicodeText),
Column("value", types.UnicodeText),
Column("state", types.UnicodeText, default=core.State.ACTIVE),
)
class GroupExtra(core.StatefulObjectMixin, domain_object.DomainObject):
id: str
group_id: str
key: str
value: str
state: str
group: group.Group
# type_ignore_reason: incomplete SQLAlchemy types
meta.mapper(
GroupExtra,
group_extra_table,
properties={
"group": orm.relation(
group.Group,
backref=orm.backref(
"_extras",
collection_class=orm.collections.attribute_mapped_collection("key"), # type: ignore
cascade="all, delete, delete-orphan",
),
)
},
)
def _create_extra(key: str, value: Any):
return GroupExtra(key=str(key), value=value)
group.Group.extras = association_proxy("_extras", "value", creator=_create_extra)
|
migrations | 0012_attachment | # Generated by Django 3.0.7 on 2020-11-24 19:39
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("bookwyrm", "0011_auto_20201113_1727"),
]
operations = [
migrations.CreateModel(
name="Attachment",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("created_date", models.DateTimeField(auto_now_add=True)),
("updated_date", models.DateTimeField(auto_now=True)),
("remote_id", models.CharField(max_length=255, null=True)),
(
"image",
models.ImageField(blank=True, null=True, upload_to="status/"),
),
("caption", models.TextField(blank=True, null=True)),
(
"status",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="attachments",
to="bookwyrm.Status",
),
),
],
options={
"abstract": False,
},
),
]
|
draftutils | utils | # -*- coding: utf-8 -*-
# ***************************************************************************
# * (c) 2009, 2010 *
# * Yorik van Havre <yorik@uncreated.net>, Ken Cline <cline@frii.com> *
# * (c) 2019 Eliud Cabrera Castillo <e.cabrera-castillo@tum.de> *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * FreeCAD is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with FreeCAD; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
"""Provides general utility functions used throughout the workbench.
This module contains auxiliary functions which can be used
in other modules of the workbench, and which don't require
the graphical user interface (GUI).
"""
## @package utils
# \ingroup draftutils
# \brief Provides general utility functions used throughout the workbench.
## \addtogroup draftutils
# @{
import os
import FreeCAD as App
import PySide.QtCore as QtCore
from draftutils.messages import _err, _log, _msg, _wrn
from draftutils.translate import translate
# TODO: move the functions that require the graphical interface
# This module should not import any graphical commands; those should be
# in gui_utils
if App.GuiUp:
import Draft_rc
import FreeCADGui as Gui
# The module is used to prevent complaints from code checkers (flake8)
True if Draft_rc else False
ARROW_TYPES = ["Dot", "Circle", "Arrow", "Tick", "Tick-2"]
arrowtypes = ARROW_TYPES
param_draft = App.ParamGet("User parameter:BaseApp/Preferences/Mod/Draft")
param_view = App.ParamGet("User parameter:BaseApp/Preferences/View")
ANNOTATION_STYLE = {
"ArrowSize": ("float", param_draft.GetFloat("arrowsize", 20)),
"ArrowType": ("index", param_draft.GetInt("dimsymbol", 0)),
"Decimals": ("int", param_draft.GetInt("dimPrecision", 2)),
"DimOvershoot": ("float", param_draft.GetFloat("dimovershoot", 20)),
"ExtLines": ("float", param_draft.GetFloat("extlines", 300)),
"ExtOvershoot": ("float", param_draft.GetFloat("extovershoot", 20)),
"FontName": ("font", param_draft.GetString("textfont", "Sans")),
"FontSize": ("float", param_draft.GetFloat("textheight", 100)),
"LineColor": ("color", param_view.GetUnsigned("DefaultShapeLineColor", 255)),
"LineSpacing": ("float", param_draft.GetFloat("LineSpacing", 1)),
"LineWidth": ("int", param_view.GetInt("DefaultShapeLineWidth", 1)),
"ScaleMultiplier": ("float", 1),
"ShowLine": ("bool", True),
"ShowUnit": ("bool", param_draft.GetBool("showUnit", True)),
"TextColor": ("color", param_draft.GetUnsigned("DefaultTextColor", 255)),
"TextSpacing": ("float", param_draft.GetFloat("dimspacing", 20)),
"UnitOverride": ("str", param_draft.GetString("overrideUnit", "")),
}
def string_encode_coin(ustr):
"""Encode a unicode object to be used as a string in coin.
Parameters
----------
ustr : str
A string to be encoded
Returns
-------
str
Encoded string. If the coin version is >= 4
it will encode the string to `'utf-8'`, otherwise
it will encode it to `'latin-1'`.
"""
try:
from pivy import coin
coin4 = coin.COIN_MAJOR_VERSION >= 4
except (ImportError, AttributeError):
coin4 = False
if coin4:
return ustr.encode("utf-8")
else:
return ustr.encode("latin1")
stringencodecoin = string_encode_coin
def type_check(args_and_types, name="?"):
"""Check that the arguments are instances of certain types.
Parameters
----------
args_and_types : list
A list of tuples. The first element of a tuple is tested as being
an instance of the second element.
::
args_and_types = [(a, Type), (b, Type2), ...]
Then
::
isinstance(a, Type)
isinstance(b, Type2)
A `Type` can also be a tuple of many types, in which case
the check is done for any of them.
::
args_and_types = [(a, (Type3, int, float)), ...]
isinstance(a, (Type3, int, float))
name : str, optional
Defaults to `'?'`. The name of the check.
Raises
------
TypeError
If the first element in the tuple is not an instance of the second
element, it raises `Draft.name`.
"""
for v, t in args_and_types:
if not isinstance(v, t):
w = "typecheck[{}]: '{}' is not {}".format(name, v, t)
_wrn(w)
raise TypeError("Draft." + str(name))
typecheck = type_check
def get_param_type(param):
"""Return the type of the parameter entered.
Parameters
----------
param : str
A string that indicates a parameter in the parameter database.
Returns
-------
str or None
The returned string could be `'int'`, `'string'`, `'float'`,
`'bool'`, `'unsigned'`, depending on the parameter.
It returns `None` for unhandled situations.
"""
if param in (
"dimsymbol",
"dimPrecision",
"dimorientation",
"precision",
"defaultWP",
"snapRange",
"gridEvery",
"linewidth",
"modconstrain",
"modsnap",
"maxSnapEdges",
"modalt",
"HatchPatternResolution",
"snapStyle",
"dimstyle",
"gridSize",
"gridTransparency",
):
return "int"
elif (
param
in (
"constructiongroupname",
"textfont",
"patternFile",
"snapModes",
"FontFile",
"ClonePrefix",
"overrideUnit",
"labeltype",
"gridSpacing",
)
or "inCommandShortcut" in param
):
return "string"
elif param in (
"textheight",
"tolerance",
"arrowsize",
"extlines",
"dimspacing",
"dimovershoot",
"extovershoot",
"HatchPatternSize",
):
return "float"
elif param in (
"selectBaseObjects",
"alwaysSnap",
"grid",
"fillmode",
"saveonexit",
"maxSnap",
"SvgLinesBlack",
"dxfStdSize",
"showSnapBar",
"hideSnapBar",
"alwaysShowGrid",
"renderPolylineWidth",
"showPlaneTracker",
"UsePartPrimitives",
"DiscretizeEllipses",
"showUnit",
"coloredGridAxes",
"Draft_array_fuse",
"Draft_array_Link",
"gridBorder",
):
return "bool"
elif param in ("color", "constructioncolor", "snapcolor", "gridColor"):
return "unsigned"
else:
return None
getParamType = get_param_type
def get_param(param, default=None):
"""Return a parameter value from the current parameter database.
The parameter database is located in the tree
::
'User parameter:BaseApp/Preferences/Mod/Draft'
In the case that `param` is `'linewidth'` or `'color'` it will get
the values from the View parameters
::
'User parameter:BaseApp/Preferences/View/DefaultShapeLineWidth'
'User parameter:BaseApp/Preferences/View/DefaultShapeLineColor'
Parameters
----------
param : str
A string that indicates a parameter in the parameter database.
default : optional
It indicates the default value of the given parameter.
It defaults to `None`, in which case it will use a specific
value depending on the type of parameter determined
with `get_param_type`.
Returns
-------
int, or str, or float, or bool
Depending on `param` and its type, by returning `ParameterGrp.GetInt`,
`ParameterGrp.GetString`, `ParameterGrp.GetFloat`,
`ParameterGrp.GetBool`, or `ParameterGrp.GetUnsinged`.
"""
draft_params = "User parameter:BaseApp/Preferences/Mod/Draft"
view_params = "User parameter:BaseApp/Preferences/View"
p = App.ParamGet(draft_params)
v = App.ParamGet(view_params)
t = getParamType(param)
# print("getting param ",param, " of type ",t, " default: ",str(default))
if t == "int":
if default is None:
default = 0
if param == "linewidth":
return v.GetInt("DefaultShapeLineWidth", default)
return p.GetInt(param, default)
elif t == "string":
if default is None:
default = ""
return p.GetString(param, default)
elif t == "float":
if default is None:
default = 0
return p.GetFloat(param, default)
elif t == "bool":
if default is None:
default = False
return p.GetBool(param, default)
elif t == "unsigned":
if default is None:
default = 0
if param == "color":
return v.GetUnsigned("DefaultShapeLineColor", default)
return p.GetUnsigned(param, default)
else:
return None
getParam = get_param
def set_param(param, value):
"""Set a Draft parameter with the given value.
The parameter database is located in the tree
::
'User parameter:BaseApp/Preferences/Mod/Draft'
In the case that `param` is `'linewidth'` or `'color'` it will set
the View parameters
::
'User parameter:BaseApp/Preferences/View/DefaultShapeLineWidth'
'User parameter:BaseApp/Preferences/View/DefaultShapeLineColor'
Parameters
----------
param : str
A string that indicates a parameter in the parameter database.
value : int, or str, or float, or bool
The appropriate value of the parameter.
Depending on `param` and its type, determined with `get_param_type`,
it sets the appropriate value by calling `ParameterGrp.SetInt`,
`ParameterGrp.SetString`, `ParameterGrp.SetFloat`,
`ParameterGrp.SetBool`, or `ParameterGrp.SetUnsinged`.
"""
draft_params = "User parameter:BaseApp/Preferences/Mod/Draft"
view_params = "User parameter:BaseApp/Preferences/View"
p = App.ParamGet(draft_params)
v = App.ParamGet(view_params)
t = getParamType(param)
if t == "int":
if param == "linewidth":
v.SetInt("DefaultShapeLineWidth", value)
else:
p.SetInt(param, value)
elif t == "string":
p.SetString(param, value)
elif t == "float":
p.SetFloat(param, value)
elif t == "bool":
p.SetBool(param, value)
elif t == "unsigned":
if param == "color":
v.SetUnsigned("DefaultShapeLineColor", value)
else:
p.SetUnsigned(param, value)
setParam = set_param
def precision():
"""Return the precision value from the parameter database.
It is the number of decimal places that a float will have.
Example
::
precision=6, 0.123456
precision=5, 0.12345
precision=4, 0.1234
Due to floating point operations there may be rounding errors.
Therefore, this precision number is used to round up values
so that all operations are consistent.
By default the precision is 6 decimal places.
Returns
-------
int
get_param("precision", 6)
"""
return getParam("precision", 6)
def tolerance():
"""Return the tolerance value from the parameter database.
This specifies a tolerance around a quantity.
::
value + tolerance
value - tolerance
By default the tolerance is 0.05.
Returns
-------
float
get_param("tolerance", 0.05)
"""
return getParam("tolerance", 0.05)
def epsilon():
"""Return a small number based on the tolerance for use in comparisons.
The epsilon value is used in floating point comparisons. Use with caution.
::
denom = 10**tolerance
num = 1
epsilon = num/denom
Returns
-------
float
1/(10**tolerance)
"""
return 1.0 / (10.0 ** tolerance())
def get_real_name(name):
"""Strip the trailing numbers from a string to get only the letters.
Parameters
----------
name : str
A string that may have a number at the end, `Line001`.
Returns
-------
str
A string without the numbers at the end, `Line`.
The returned string cannot be empty; it will have
at least one letter.
"""
for i in range(1, len(name) + 1):
if name[-i] not in "1234567890":
return name[: len(name) - (i - 1)]
return name
getRealName = get_real_name
def get_type(obj):
"""Return a string indicating the type of the given object.
Parameters
----------
obj : App::DocumentObject
Any type of scripted object created with Draft,
or any other workbench.
Returns
-------
str
If `obj` has a `Proxy`, it will return the value of `obj.Proxy.Type`.
* If `obj` is a `Part.Shape`, returns `'Shape'`
* If `obj` has a `TypeId`, returns `obj.TypeId`
In other cases, it will return `'Unknown'`,
or `None` if `obj` is `None`.
"""
import Part
if not obj:
return None
if isinstance(obj, Part.Shape):
return "Shape"
if hasattr(obj, "Proxy") and hasattr(obj.Proxy, "Type"):
return obj.Proxy.Type
if hasattr(obj, "TypeId"):
return obj.TypeId
return "Unknown"
getType = get_type
def get_objects_of_type(objects, typ):
"""Return only the objects that match the type in the list of objects.
Parameters
----------
objects : list of App::DocumentObject
A list of objects which will be tested.
typ : str
A string that indicates a type. This should be one of the types
that can be returned by `get_type`.
Returns
-------
list of objects
Only the objects that match `typ` will be added to the output list.
"""
objs = []
for o in objects:
if getType(o) == typ:
objs.append(o)
return objs
getObjectsOfType = get_objects_of_type
def is_clone(obj, objtype=None, recursive=False):
"""Return True if the given object is a clone of a certain type.
A clone is of type `'Clone'`, and has a reference
to the original object inside its `Objects` attribute,
which is an `'App::PropertyLinkListGlobal'`.
The `Objects` attribute can point to another `'Clone'` object.
If `recursive` is `True`, the function will be called recursively
to further test this clone, until the type of the original object
can be compared to `objtype`.
Parameters
----------
obj : App::DocumentObject
The clone object that will be tested for a certain type.
objtype : str or list of str
A type string such as one obtained from `get_type`.
Or a list of such types.
recursive : bool, optional
It defaults to `False`.
If it is `True`, this same function will be called recursively
with `obj.Object[0]` as input.
This option only works if `obj.Object[0]` is of type `'Clone'`,
that is, if `obj` is a clone of a clone.
Returns
-------
bool
Returns `True` if `obj` is of type `'Clone'`,
and `obj.Object[0]` is of type `objtype`.
If `objtype` is a list, then `obj.Objects[0]`
will be tested against each of the elements in the list,
and it will return `True` if at least one element matches the type.
If `obj` isn't of type `'Clone'` but has the `CloneOf` attribute,
it will also return `True`.
It returns `False` otherwise, for example,
if `obj` is not even a clone.
"""
if isinstance(objtype, list):
return any([is_clone(obj, t, recursive) for t in objtype])
if getType(obj) == "Clone":
if len(obj.Objects) == 1:
if objtype:
if getType(obj.Objects[0]) == objtype:
return True
elif recursive and (getType(obj.Objects[0]) == "Clone"):
return is_clone(obj.Objects[0], objtype, recursive)
elif hasattr(obj, "CloneOf"):
if obj.CloneOf:
if objtype:
if getType(obj.CloneOf) == objtype:
return True
else:
return True
return False
isClone = is_clone
def get_clone_base(obj, strict=False, recursive=True):
"""Return the object cloned by this object, if any.
Parameters
----------
obj: App::DocumentObject
Any type of object.
strict: bool, optional
It defaults to `False`.
If it is `True`, and this object is not a clone,
this function will return `False`.
recursive: bool, optional
It defaults to `True`
If it is `True`, it call recursively to itself to
get base object and if it is `False` then it just
return base object, not call recursively to find
base object.
Returns
-------
App::DocumentObject
It `obj` is a `Draft Clone`, it will return the first object
that is in its `Objects` property.
If `obj` has a `CloneOf` property, it will search iteratively
inside the object pointed to by this property.
obj
If `obj` is not a `Draft Clone`, nor it has a `CloneOf` property,
it will return the same `obj`, as long as `strict` is `False`.
False
It will return `False` if `obj` is not a clone,
and `strict` is `True`.
"""
if hasattr(obj, "CloneOf") and obj.CloneOf:
if recursive:
return get_clone_base(obj.CloneOf)
return obj.CloneOf
if get_type(obj) == "Clone" and obj.Objects:
if recursive:
return get_clone_base(obj.Objects[0])
return obj.Objects[0]
if strict:
return False
return obj
getCloneBase = get_clone_base
def shapify(obj):
"""Transform a parametric object into a static, non-parametric shape.
Parameters
----------
obj : App::DocumentObject
Any type of scripted object.
This object will be removed, and a non-parametric object
with the same topological shape (`Part::TopoShape`)
will be created.
Returns
-------
Part::Feature
The new object that takes `obj.Shape` as its own.
Depending on the contents of the Shape, the resulting object
will be named `'Face'`, `'Solid'`, `'Compound'`,
`'Shell'`, `'Wire'`, `'Line'`, `'Circle'`,
or the name returned by `get_real_name(obj.Name)`.
If there is a problem with `obj.Shape`, it will return `None`,
and the original object will not be modified.
"""
try:
shape = obj.Shape
except Exception:
return None
if len(shape.Faces) == 1:
name = "Face"
elif len(shape.Solids) == 1:
name = "Solid"
elif len(shape.Solids) > 1:
name = "Compound"
elif len(shape.Faces) > 1:
name = "Shell"
elif len(shape.Wires) == 1:
name = "Wire"
elif len(shape.Edges) == 1:
import DraftGeomUtils
if DraftGeomUtils.geomType(shape.Edges[0]) == "Line":
name = "Line"
else:
name = "Circle"
else:
name = getRealName(obj.Name)
App.ActiveDocument.removeObject(obj.Name)
newobj = App.ActiveDocument.addObject("Part::Feature", name)
newobj.Shape = shape
return newobj
def print_shape(shape):
"""Print detailed information of a topological shape.
Parameters
----------
shape : Part::TopoShape
Any topological shape in an object, usually obtained from `obj.Shape`.
"""
_msg(translate("draft", "Solids:") + " {}".format(len(shape.Solids)))
_msg(translate("draft", "Faces:") + " {}".format(len(shape.Faces)))
_msg(translate("draft", "Wires:") + " {}".format(len(shape.Wires)))
_msg(translate("draft", "Edges:") + " {}".format(len(shape.Edges)))
_msg(translate("draft", "Vertices:") + " {}".format(len(shape.Vertexes)))
if shape.Faces:
for f in range(len(shape.Faces)):
_msg(translate("draft", "Face") + " {}:".format(f))
for v in shape.Faces[f].Vertexes:
_msg(" {}".format(v.Point))
elif shape.Wires:
for w in range(len(shape.Wires)):
_msg(translate("draft", "Wire") + " {}:".format(w))
for v in shape.Wires[w].Vertexes:
_msg(" {}".format(v.Point))
else:
for v in shape.Vertexes:
_msg(" {}".format(v.Point))
printShape = print_shape
def compare_objects(obj1, obj2):
"""Print the differences between 2 objects.
The two objects are compared through their `TypeId` attribute,
or by using the `get_type` function.
If they are the same type their properties are compared
looking for differences.
Neither `Shape` nor `Label` attributes are compared.
Parameters
----------
obj1 : App::DocumentObject
Any type of scripted object.
obj2 : App::DocumentObject
Any type of scripted object.
"""
if obj1.TypeId != obj2.TypeId:
_msg(
"'{0}' ({1}), '{2}' ({3}): ".format(
obj1.Name, obj1.TypeId, obj2.Name, obj2.TypeId
)
+ translate("draft", "different types")
+ " (TypeId)"
)
elif getType(obj1) != getType(obj2):
_msg(
"'{0}' ({1}), '{2}' ({3}): ".format(
obj1.Name, get_type(obj1), obj2.Name, get_type(obj2)
)
+ translate("draft", "different types")
+ " (Proxy.Type)"
)
else:
for p in obj1.PropertiesList:
if p in obj2.PropertiesList:
if p in ("Shape", "Label"):
pass
elif p == "Placement":
delta = obj1.Placement.Base.sub(obj2.Placement.Base)
text = translate(
"draft",
"Objects have different placements. "
"Distance between the two base points: ",
)
_msg(text + str(delta.Length))
else:
if getattr(obj1, p) != getattr(obj2, p):
_msg(
"'{}' ".format(p)
+ translate("draft", "has a different value")
)
else:
_msg(
"{} ".format(p)
+ translate("draft", "doesn't exist in one of the objects")
)
compareObjects = compare_objects
def load_svg_patterns():
"""Load the default Draft SVG patterns and user defined patterns.
The SVG patterns are added as a dictionary to the `App.svgpatterns`
attribute.
"""
import importSVG
App.svgpatterns = {}
# Get default patterns in the resource file
patfiles = QtCore.QDir(":/patterns").entryList()
for fn in patfiles:
file = ":/patterns/" + str(fn)
f = QtCore.QFile(file)
f.open(QtCore.QIODevice.ReadOnly)
p = importSVG.getContents(str(f.readAll()), "pattern", True)
if p:
for k in p:
p[k] = [p[k], file]
App.svgpatterns.update(p)
# Get patterns in a user defined file
altpat = getParam("patternFile", "")
if os.path.isdir(altpat):
for f in os.listdir(altpat):
if f[-4:].upper() == ".SVG":
file = os.path.join(altpat, f)
p = importSVG.getContents(file, "pattern")
if p:
for k in p:
p[k] = [p[k], file]
App.svgpatterns.update(p)
# Get TechDraw patterns
altpat = os.path.join(App.getResourceDir(), "Mod", "TechDraw", "Patterns")
if os.path.isdir(altpat):
for f in os.listdir(altpat):
if f[-4:].upper() == ".SVG":
file = os.path.join(altpat, f)
p = importSVG.getContents(file, "pattern")
if p:
for k in p:
p[k] = [p[k], file]
else:
# some TD pattern files have no <pattern> definition but can still be used by Draft
p = {f[:-4]: ["<pattern></pattern>", file]}
App.svgpatterns.update(p)
loadSvgPatterns = load_svg_patterns
def svg_patterns():
"""Return a dictionary with installed SVG patterns.
Returns
-------
dict
Returns `App.svgpatterns` if it exists.
Otherwise it calls `load_svg_patterns` to create it
before returning it.
"""
if hasattr(App, "svgpatterns"):
return App.svgpatterns
else:
loadSvgPatterns()
if hasattr(App, "svgpatterns"):
return App.svgpatterns
return {}
svgpatterns = svg_patterns
def get_rgb(color, testbw=True):
"""Return an RRGGBB value #000000 from a FreeCAD color.
Parameters
----------
color : list or tuple with RGB values
The values must be in the 0.0-1.0 range.
testwb : bool (default = True)
Pure white will be converted into pure black.
"""
r = str(hex(int(color[0] * 255)))[2:].zfill(2)
g = str(hex(int(color[1] * 255)))[2:].zfill(2)
b = str(hex(int(color[2] * 255)))[2:].zfill(2)
col = "#" + r + g + b
if testbw:
if col == "#ffffff":
# print(getParam('SvgLinesBlack'))
if getParam("SvgLinesBlack", True):
col = "#000000"
return col
getrgb = get_rgb
def argb_to_rgba(color):
"""Change byte order of a 4 byte color int from ARGB (Qt) to RGBA (FreeCAD).
Alpha in both integers is always 255.
Alpha in color properties, although ignored, is always zero however.
Usage:
qt_int = self.form.ShapeColor.property("color").rgba() # Note: returns ARGB int
qt_int = self.form.ShapeColor.property("color").rgb() # Note: returns ARGB int
fc_int = argb_to_rgba(qt_int)
FreeCAD.ParamGet("User parameter:BaseApp/Preferences/View")\
.SetUnsigned("DefaultShapeColor", fc_int)
obj.ViewObject.ShapeColor = fc_int & 0xFFFFFF00
Related:
getRgbF() returns an RGBA tuple. 4 floats in the range 0.0 - 1.0. Alpha is always 1.
Alpha should be set to zero or removed before using the tuple to change a color property:
obj.ViewObject.ShapeColor = self.form.ShapeColor.property("color").getRgbF()[:3]
"""
return ((color & 0xFFFFFF) << 8) + ((color & 0xFF000000) >> 24)
def rgba_to_argb(color):
"""Change byte order of a 4 byte color int from RGBA (FreeCAD) to ARGB (Qt)."""
return ((color & 0xFFFFFF00) >> 8) + ((color & 0xFF) << 24)
def filter_objects_for_modifiers(objects, isCopied=False):
filteredObjects = []
for obj in objects:
if hasattr(obj, "MoveBase") and obj.MoveBase and obj.Base:
parents = []
for parent in obj.Base.InList:
if parent.isDerivedFrom("Part::Feature"):
parents.append(parent.Name)
if len(parents) > 1:
warningMessage = translate(
"draft",
"%s shares a base with %d other objects. Please check if you want to modify this.",
) % (obj.Name, len(parents) - 1)
App.Console.PrintError(warningMessage)
if App.GuiUp:
Gui.getMainWindow().showMessage(warningMessage, 0)
filteredObjects.append(obj.Base)
elif (
hasattr(obj, "Placement")
and obj.getEditorMode("Placement") == ["ReadOnly"]
and not isCopied
):
App.Console.PrintError(
translate(
"draft", "%s cannot be modified because its placement is readonly."
)
% obj.Name
)
continue
else:
filteredObjects.append(obj)
return filteredObjects
filterObjectsForModifiers = filter_objects_for_modifiers
def is_closed_edge(edge_index, object):
return edge_index + 1 >= len(object.Points)
isClosedEdge = is_closed_edge
def utf8_decode(text):
r"""Decode the input string and return a unicode string.
Python 2:
::
str -> unicode
unicode -> unicode
Python 3:
::
str -> str
bytes -> str
It runs
::
try:
return text.decode("utf-8")
except AttributeError:
return text
Parameters
----------
text : str, unicode or bytes
A str, unicode, or bytes object that may have unicode characters
like accented characters.
In Python 2, a `bytes` object can include accented characters,
but in Python 3 it must only contain ASCII literal characters.
Returns
-------
unicode or str
In Python 2 it will try decoding the `bytes` string
and return a `'utf-8'` decoded string.
>>> "Aá".decode("utf-8")
>>> b"Aá".decode("utf-8")
u'A\xe1'
In Python 2 the unicode string is prefixed with `u`,
and unicode characters are replaced by their two-digit hexadecimal
representation, or four digit unicode escape.
>>> "AáBẃCñ".decode("utf-8")
u'A\xe1B\u1e83C\xf1'
In Python 2 it will always return a `unicode` object.
In Python 3 a regular string is already unicode encoded,
so strings have no `decode` method. In this case, `text`
will be returned as is.
In Python 3, if `text` is a `bytes` object, then it will be converted
to `str`; in this case, the `bytes` object cannot have accents,
it must only contain ASCII literal characters.
>>> b"ABC".decode("utf-8")
'ABC'
In Python 3 it will always return a `str` object, with no prefix.
"""
try:
return text.decode("utf-8")
except AttributeError:
return text
def print_header(name, description, debug=True):
"""Print a line to the console when something is called, and log it.
Parameters
----------
name: str
The name of the function or class that is being called.
This `name` will be logged in the log file, so if there are problems
the log file can be investigated for clues.
description: str
Arbitrary text that will be printed to the console
when the function or class is called.
debug: bool, optional
It defaults to `True`.
If it is `False` the `description` will not be printed
to the console.
On the other hand the `name` will always be logged.
"""
_log(name)
if debug:
_msg(16 * "-")
_msg(description)
def find_doc(doc=None):
"""Return the active document or find a document by name.
Parameters
----------
doc: App::Document or str, optional
The document that will be searched in the session.
It defaults to `None`, in which case it tries to find
the active document.
If `doc` is a string, it will try to get the document by `Name`.
Returns
-------
bool, App::Document
A tuple containing the information on whether the search
was successful. In this case, the boolean is `True`,
and the second value is the document instance.
False, None
If there is no active document, or the string in `doc`
doesn't correspond to an open document in the session.
"""
FOUND = True
if not doc:
doc = App.activeDocument()
if not doc:
return not FOUND, None
if isinstance(doc, str):
try:
doc = App.getDocument(doc)
except NameError:
_msg("document: {}".format(doc))
_err(translate("draft", "Wrong input: unknown document."))
return not FOUND, None
return FOUND, doc
def find_object(obj, doc=None):
"""Find object in the document, inclusive by Label.
Parameters
----------
obj: App::DocumentObject or str
The object to search in `doc`.
Or if the `obj` is a string, it will search the object by `Label`.
Since Labels are not guaranteed to be unique, it will get the first
object with that label in the document.
doc: App::Document or str, optional
The document in which the object will be searched.
It defaults to `None`, in which case it tries to search in the
active document.
If `doc` is a string, it will search the document by `Name`.
Returns
-------
bool, App::DocumentObject
A tuple containing the information on whether the search
was successful. In this case, the boolean is `True`,
and the second value is the object found.
False, None
If the object doesn't exist in the document.
"""
FOUND = True
found, doc = find_doc(doc)
if not found:
_err(translate("draft", "No active document. Aborting."))
return not FOUND, None
if isinstance(obj, str):
try:
obj = doc.getObjectsByLabel(obj)[0]
except IndexError:
return not FOUND, None
if obj not in doc.Objects:
return not FOUND, None
return FOUND, obj
def use_instead(function, version=""):
"""Print a deprecation message and suggest another function.
This function must be used inside the definition of a function
that has been considered for deprecation, so we must provide
an alternative.
::
def old_function():
use_instead('new_function', 1.0)
def someFunction():
use_instead('some_function')
Parameters
----------
function: str
The name of the function to use instead of the current one.
version: float or str, optional
It defaults to the empty string `''`.
The version where this command is to be deprecated, if it is known.
If we don't know when this command will be deprecated
then we should not give a version.
"""
if version:
_wrn(
translate("draft", "This function will be deprecated in ")
+ "{}. ".format(version)
+ translate("draft", "Please use ")
+ "'{}'.".format(function)
)
else:
_wrn(
translate("draft", "This function will be deprecated. ")
+ translate("draft", "Please use ")
+ "'{}'.".format(function)
)
## @}
|
qltk | showfiles | # Copyright 2012-2020 Nick Boultbee
# 2012,2014,2018 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Show directories and files in the default system file browser"""
import os
import subprocess
from gi.repository import Gio, GLib, Gtk
from quodlibet import print_d
from quodlibet.util import is_osx, is_windows
from senf import fsn2uri, fsnative
def show_files(dirname, entries=[]):
"""Shows the directory in the default file browser and if passed
a list of directory entries will highlight those.
Depending on the system/platform this might highlight all files passed,
or only one of them, or none at all.
Args:
dirname (fsnative): Path to the directory
entries (List[fsnative]): List of (relative) filenames in the directory
entries (List[fsnative]): List of (relative) filenames in the directory
Returns:
bool: if the action was successful or not
"""
assert isinstance(dirname, fsnative)
assert all(isinstance(e, fsnative) and os.path.basename(e) == e for e in entries)
dirname = os.path.abspath(dirname)
if is_windows():
implementations = [_show_files_win32]
elif is_osx():
implementations = [_show_files_finder]
else:
implementations = [
_show_files_fdo,
_show_files_thunar,
_show_files_xdg_open,
_show_files_gnome_open,
]
for impl in implementations:
try:
impl(dirname, entries)
except BrowseError as e:
print_d("Couldn't show files with %s (%s), ignoring." % (impl, e))
continue
else:
return True
return False
def show_songs(songs):
"""Returns False if showing any of them failed"""
dirs = {}
for s in songs:
dirs.setdefault(s("~dirname"), []).append(s("~basename"))
for dirname, entries in sorted(dirs.items()):
status = show_files(dirname, entries)
if not status:
return False
return True
class BrowseError(Exception):
pass
def _get_startup_id():
from quodlibet import app
app_name = type(app.window).__name__
return "%s_TIME%d" % (app_name, Gtk.get_current_event_time())
def _get_dbus_proxy(name, path, iface):
bus = Gio.bus_get_sync(Gio.BusType.SESSION, None)
return Gio.DBusProxy.new_sync(
bus, Gio.DBusProxyFlags.NONE, None, name, path, iface, None
)
def _show_files_fdo(dirname, entries):
# https://www.freedesktop.org/wiki/Specifications/file-manager-interface/
FDO_PATH = "/org/freedesktop/FileManager1"
FDO_NAME = "org.freedesktop.FileManager1"
FDO_IFACE = "org.freedesktop.FileManager1"
try:
dbus_proxy = _get_dbus_proxy(FDO_NAME, FDO_PATH, FDO_IFACE)
if not entries:
dbus_proxy.ShowFolders("(ass)", [fsn2uri(dirname)], _get_startup_id())
else:
item_uri = fsn2uri(os.path.join(dirname, entries[0]))
dbus_proxy.ShowItems("(ass)", [item_uri], _get_startup_id())
except GLib.Error as e:
raise BrowseError(e)
def _show_files_thunar(dirname, entries):
# https://git.xfce.org/xfce/thunar/tree/thunar/thunar-dbus-service-infos.xml
XFCE_PATH = "/org/xfce/FileManager"
XFCE_NAME = "org.xfce.FileManager"
XFCE_IFACE = "org.xfce.FileManager"
try:
dbus_proxy = _get_dbus_proxy(XFCE_NAME, XFCE_PATH, XFCE_IFACE)
if not entries:
dbus_proxy.DisplayFolder("(sss)", fsn2uri(dirname), "", _get_startup_id())
else:
dbus_proxy.DisplayFolderAndSelect(
"(ssss)", fsn2uri(dirname), entries[0], "", _get_startup_id()
)
except GLib.Error as e:
raise BrowseError(e)
def _show_files_gnome_open(dirname, *args):
try:
if subprocess.call(["gnome-open", dirname]) != 0:
raise EnvironmentError("gnome-open error return status")
except EnvironmentError as e:
raise BrowseError(e)
def _show_files_xdg_open(dirname, *args):
try:
if subprocess.call(["xdg-open", dirname]) != 0:
raise EnvironmentError("xdg-open error return status")
except EnvironmentError as e:
raise BrowseError(e)
def _show_files_win32(dirname, entries):
if not is_windows():
raise BrowseError("windows only")
if not entries:
# open_folder_and_select_items will open the parent if no items
# are passed, so execute explorer directly for that case
try:
if subprocess.call(["explorer", dirname]) != 0:
raise EnvironmentError("explorer error return status")
except EnvironmentError as e:
raise BrowseError(e)
else:
from quodlibet.util.windows import open_folder_and_select_items
try:
open_folder_and_select_items(dirname, entries)
except WindowsError as e:
raise BrowseError(e)
def _show_files_finder(dirname, *args):
if not is_osx():
raise BrowseError("OS X only")
try:
if subprocess.call(["open", "-R", dirname]) != 0:
raise EnvironmentError("open error return status")
except EnvironmentError as e:
raise BrowseError(e)
|
Gui | Waterline | # -*- coding: utf-8 -*-
# ***************************************************************************
# * Copyright (c) 2020 sliptonic <shopinthewoods@gmail.com> *
# * Copyright (c) 2020 russ4262 <russ4262@gmail.com> *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
import FreeCAD
import FreeCADGui
import Path
import Path.Base.Gui.Util as PathGuiUtil
import Path.Op.Gui.Base as PathOpGui
import Path.Op.Waterline as PathWaterline
from PySide import QtCore
from PySide.QtCore import QT_TRANSLATE_NOOP
__title__ = "Path Waterline Operation UI"
__author__ = "sliptonic (Brad Collette), russ4262 (Russell Johnson)"
__url__ = "http://www.freecad.org"
__doc__ = "Waterline operation page controller and command implementation."
translate = FreeCAD.Qt.translate
if False:
Path.Log.setLevel(Path.Log.Level.DEBUG, Path.Log.thisModule())
Path.Log.trackModule(Path.Log.thisModule())
else:
Path.Log.setLevel(Path.Log.Level.INFO, Path.Log.thisModule())
class TaskPanelOpPage(PathOpGui.TaskPanelPage):
"""Page controller class for the Waterline operation."""
def initPage(self, obj):
self.setTitle("Waterline - " + obj.Label)
self.updateVisibility()
def getForm(self):
"""getForm() ... returns UI"""
form = FreeCADGui.PySideUic.loadUi(":/panels/PageOpWaterlineEdit.ui")
comboToPropertyMap = [
("algorithmSelect", "Algorithm"),
("boundBoxSelect", "BoundBox"),
("layerMode", "LayerMode"),
("cutPattern", "CutPattern"),
]
enumTups = PathWaterline.ObjectWaterline.propertyEnumerations(dataType="raw")
PathGuiUtil.populateCombobox(form, enumTups, comboToPropertyMap)
return form
def getFields(self, obj):
"""getFields(obj) ... transfers values from UI to obj's properties"""
self.updateToolController(obj, self.form.toolController)
self.updateCoolant(obj, self.form.coolantController)
if obj.Algorithm != str(self.form.algorithmSelect.currentData()):
obj.Algorithm = str(self.form.algorithmSelect.currentData())
if obj.BoundBox != str(self.form.boundBoxSelect.currentData()):
obj.BoundBox = str(self.form.boundBoxSelect.currentData())
if obj.LayerMode != str(self.form.layerMode.currentData()):
obj.LayerMode = str(self.form.layerMode.currentData())
if obj.CutPattern != str(self.form.cutPattern.currentData()):
obj.CutPattern = str(self.form.cutPattern.currentData())
PathGuiUtil.updateInputField(
obj, "BoundaryAdjustment", self.form.boundaryAdjustment
)
if obj.StepOver != self.form.stepOver.value():
obj.StepOver = self.form.stepOver.value()
PathGuiUtil.updateInputField(obj, "SampleInterval", self.form.sampleInterval)
if obj.OptimizeLinearPaths != self.form.optimizeEnabled.isChecked():
obj.OptimizeLinearPaths = self.form.optimizeEnabled.isChecked()
def setFields(self, obj):
"""setFields(obj) ... transfers obj's property values to UI"""
self.setupToolController(obj, self.form.toolController)
self.setupCoolant(obj, self.form.coolantController)
self.selectInComboBox(obj.Algorithm, self.form.algorithmSelect)
self.selectInComboBox(obj.BoundBox, self.form.boundBoxSelect)
self.selectInComboBox(obj.LayerMode, self.form.layerMode)
self.selectInComboBox(obj.CutPattern, self.form.cutPattern)
self.form.boundaryAdjustment.setText(
FreeCAD.Units.Quantity(
obj.BoundaryAdjustment.Value, FreeCAD.Units.Length
).UserString
)
self.form.stepOver.setValue(obj.StepOver)
self.form.sampleInterval.setText(
FreeCAD.Units.Quantity(
obj.SampleInterval.Value, FreeCAD.Units.Length
).UserString
)
if obj.OptimizeLinearPaths:
self.form.optimizeEnabled.setCheckState(QtCore.Qt.Checked)
else:
self.form.optimizeEnabled.setCheckState(QtCore.Qt.Unchecked)
self.updateVisibility()
def getSignalsForUpdate(self, obj):
"""getSignalsForUpdate(obj) ... return list of signals for updating obj"""
signals = []
signals.append(self.form.toolController.currentIndexChanged)
signals.append(self.form.coolantController.currentIndexChanged)
signals.append(self.form.algorithmSelect.currentIndexChanged)
signals.append(self.form.boundBoxSelect.currentIndexChanged)
signals.append(self.form.layerMode.currentIndexChanged)
signals.append(self.form.cutPattern.currentIndexChanged)
signals.append(self.form.boundaryAdjustment.editingFinished)
signals.append(self.form.stepOver.editingFinished)
signals.append(self.form.sampleInterval.editingFinished)
signals.append(self.form.optimizeEnabled.stateChanged)
return signals
def updateVisibility(self, sentObj=None):
"""updateVisibility(sentObj=None)... Updates visibility of Tasks panel objects."""
Algorithm = self.form.algorithmSelect.currentData()
self.form.optimizeEnabled.hide() # Has no independent QLabel object
if Algorithm == "OCL Dropcutter":
self.form.cutPattern.hide()
self.form.cutPattern_label.hide()
self.form.boundaryAdjustment.hide()
self.form.boundaryAdjustment_label.hide()
self.form.stepOver.hide()
self.form.stepOver_label.hide()
self.form.sampleInterval.show()
self.form.sampleInterval_label.show()
elif Algorithm == "Experimental":
self.form.cutPattern.show()
self.form.boundaryAdjustment.show()
self.form.cutPattern_label.show()
self.form.boundaryAdjustment_label.show()
if self.form.cutPattern.currentData() == "None":
self.form.stepOver.hide()
self.form.stepOver_label.hide()
else:
self.form.stepOver.show()
self.form.stepOver_label.show()
self.form.sampleInterval.hide()
self.form.sampleInterval_label.hide()
def registerSignalHandlers(self, obj):
self.form.algorithmSelect.currentIndexChanged.connect(self.updateVisibility)
self.form.cutPattern.currentIndexChanged.connect(self.updateVisibility)
Command = PathOpGui.SetupOperation(
"Waterline",
PathWaterline.Create,
TaskPanelOpPage,
"Path_Waterline",
QT_TRANSLATE_NOOP("Path_Waterline", "Waterline"),
QT_TRANSLATE_NOOP("Path_Waterline", "Create a Waterline Operation from a model"),
PathWaterline.SetupProperties,
)
FreeCAD.Console.PrintLog("Loading PathWaterlineGui... done\n")
|
builtinContextMenus | shipJump | # noinspection PyPackageRequirements
import gui.mainFrame
import wx
from gui.builtinShipBrowser.events import Stage3Selected
from gui.contextMenu import ContextMenuUnconditional
from service.fit import Fit
_t = wx.GetTranslation
class JumpToShip(ContextMenuUnconditional):
def __init__(self):
self.mainFrame = gui.mainFrame.MainFrame.getInstance()
def display(self, callingWindow, srcContext):
if srcContext != "fittingShip":
return False
fitTabSelected = self.mainFrame.notebookBrowsers.GetSelection() == 1
if not fitTabSelected:
return True
browsingStage = self.mainFrame.shipBrowser.GetActiveStage()
if browsingStage != 3:
return True
fitID = self.mainFrame.getActiveFit()
ship = Fit.getInstance().getFit(fitID).ship
browsingShipID = self.mainFrame.shipBrowser.GetStageData(browsingStage)
if browsingShipID != ship.item.ID:
return True
return False
def getText(self, callingWindow, itmContext):
return _t("Open in Fitting Browser")
def activate(self, callingWindow, fullContext, i):
fitID = self.mainFrame.getActiveFit()
ship = Fit.getInstance().getFit(fitID).ship
self.mainFrame.notebookBrowsers.SetSelection(1)
wx.PostEvent(
self.mainFrame.shipBrowser, Stage3Selected(shipID=ship.item.ID, back=True)
)
JumpToShip.register()
|
extractor | cwtv | # coding: utf-8
from __future__ import unicode_literals
from ..utils import (
ExtractorError,
int_or_none,
parse_age_limit,
parse_iso8601,
smuggle_url,
str_or_none,
)
from .common import InfoExtractor
class CWTVIE(InfoExtractor):
_VALID_URL = r"https?://(?:www\.)?cw(?:tv(?:pr)?|seed)\.com/(?:shows/)?(?:[^/]+/)+[^?]*\?.*\b(?:play|watch)=(?P<id>[a-z0-9]{8}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{12})"
_TESTS = [
{
"url": "http://cwtv.com/shows/arrow/legends-of-yesterday/?play=6b15e985-9345-4f60-baf8-56e96be57c63",
"info_dict": {
"id": "6b15e985-9345-4f60-baf8-56e96be57c63",
"ext": "mp4",
"title": "Legends of Yesterday",
"description": "Oliver and Barry Allen take Kendra Saunders and Carter Hall to a remote location to keep them hidden from Vandal Savage while they figure out how to defeat him.",
"duration": 2665,
"series": "Arrow",
"season_number": 4,
"season": "4",
"episode_number": 8,
"upload_date": "20151203",
"timestamp": 1449122100,
},
"params": {
# m3u8 download
"skip_download": True,
},
"skip": "redirect to http://cwtv.com/shows/arrow/",
},
{
"url": "http://www.cwseed.com/shows/whose-line-is-it-anyway/jeff-davis-4/?play=24282b12-ead2-42f2-95ad-26770c2c6088",
"info_dict": {
"id": "24282b12-ead2-42f2-95ad-26770c2c6088",
"ext": "mp4",
"title": "Jeff Davis 4",
"description": "Jeff Davis is back to make you laugh.",
"duration": 1263,
"series": "Whose Line Is It Anyway?",
"season_number": 11,
"episode_number": 20,
"upload_date": "20151006",
"timestamp": 1444107300,
"age_limit": 14,
"uploader": "CWTV",
},
"params": {
# m3u8 download
"skip_download": True,
},
},
{
"url": "http://cwtv.com/thecw/chroniclesofcisco/?play=8adebe35-f447-465f-ab52-e863506ff6d6",
"only_matching": True,
},
{
"url": "http://cwtvpr.com/the-cw/video?watch=9eee3f60-ef4e-440b-b3b2-49428ac9c54e",
"only_matching": True,
},
{
"url": "http://cwtv.com/shows/arrow/legends-of-yesterday/?watch=6b15e985-9345-4f60-baf8-56e96be57c63",
"only_matching": True,
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
data = self._download_json(
"http://images.cwtv.com/feed/mobileapp/video-meta/apiversion_8/guid_"
+ video_id,
video_id,
)
if data.get("result") != "ok":
raise ExtractorError(data["msg"], expected=True)
video_data = data["video"]
title = video_data["title"]
mpx_url = (
video_data.get("mpx_url")
or "http://link.theplatform.com/s/cwtv/media/guid/2703454149/%s?formats=M3U"
% video_id
)
season = str_or_none(video_data.get("season"))
episode = str_or_none(video_data.get("episode"))
if episode and season:
episode = episode[len(season) :]
return {
"_type": "url_transparent",
"id": video_id,
"title": title,
"url": smuggle_url(mpx_url, {"force_smil_url": True}),
"description": video_data.get("description_long"),
"duration": int_or_none(video_data.get("duration_secs")),
"series": video_data.get("series_name"),
"season_number": int_or_none(season),
"episode_number": int_or_none(episode),
"timestamp": parse_iso8601(video_data.get("start_time")),
"age_limit": parse_age_limit(video_data.get("rating")),
"ie_key": "ThePlatform",
}
|
accounts | FileboomMe | # -*- coding: utf-8 -*-
import json
import re
from pyload.core.datatypes.pyfile import PyFile
from pyload.core.network.http.exceptions import BadHeader
from ..anticaptchas.ReCaptcha import ReCaptcha
from ..base.account import BaseAccount
from ..base.captcha import BaseCaptcha
class FileboomMe(BaseAccount):
__name__ = "FileboomMe"
__type__ = "account"
__version__ = "0.04"
__status__ = "testing"
__description__ = """Fileboom.me account plugin"""
__license__ = "GPLv3"
__authors__ = [("GammaC0de", "nitzo2001[AT]yahoo[DOT]com")]
RECAPTCHA_KEY = "6LcYcN0SAAAAABtMlxKj7X0hRxOY8_2U86kI1vbb"
API_URL = "https://fileboom.me/api/v2/"
#: Actually this is Keep2ShareCc API, see https://keep2share.github.io/api/ https://github.com/keep2share/api
def api_request(self, method, **kwargs):
html = self.load(self.API_URL + method, post=json.dumps(kwargs))
return json.loads(html)
def grab_info(self, user, password, data):
json_data = self.api_request("AccountInfo", auth_token=data["token"])
return {
"validuntil": json_data["account_expires"],
"trafficleft": json_data["available_traffic"],
"premium": True if json_data["account_expires"] else False,
}
def signin(self, user, password, data):
if "token" in data:
try:
json_data = self.api_request("test", auth_token=data["token"])
except BadHeader as exc:
if exc.code == 403: #: Session expired
pass
else:
raise
else:
self.skip_login()
try:
json_data = self.api_request("login", username=user, password=password)
except BadHeader as exc:
if exc.code == 406: #: Captcha needed
# dummy pyfile
pyfile = PyFile(
self.pyload.files,
-1,
"https://fileboom.me",
"https://fileboom.me",
0,
0,
"",
self.classname,
-1,
-1,
)
pyfile.plugin = self
errors = [
json.loads(m.group(0)).get("errorCode", 0)
for m in re.finditer(r"{[^}]+}", exc.content)
]
if 33 in errors: #: ERROR_RE_CAPTCHA_REQUIRED
#: Recaptcha
self.captcha = ReCaptcha(pyfile)
for i in range(10):
json_data = self.api_request("RequestReCaptcha")
if json_data["code"] != 200:
self.log_error(_("Request reCAPTCHA API failed"))
self.fail_login(_("Request reCAPTCHA API failed"))
re_captcha_response = self.captcha.challenge(
self.RECAPTCHA_KEY, version="2js", secure_token=False
)
try:
json_data = self.api_request(
"login",
username=user,
password=password,
re_captcha_challenge=json_data["challenge"],
re_captcha_response=re_captcha_response,
)
except BadHeader as exc:
if exc.code == 406:
errors = [
json.loads(m.group(0)).get("errorCode", 0)
for m in re.finditer(r"{[^}]+}", exc.content)
]
if 31 in errors: #: ERROR_CAPTCHA_INVALID
self.captcha.invalid()
continue
else:
self.log_error(exc.content)
self.fail_login(exc.content)
else:
self.log_error(exc.content)
self.fail_login(exc.content)
else:
self.captcha.correct()
data["token"] = json_data["auth_token"]
break
else:
self.log_error(_("Max captcha retries reached"))
self.fail_login(_("Max captcha retries reached"))
elif 30 in errors: #: ERROR_CAPTCHA_REQUIRED
#: Normal captcha
self.captcha = BaseCaptcha(pyfile)
for i in range(10):
json_data = self.api_request("RequestCaptcha")
if json_data["code"] != 200:
self.log_error(self._("Request captcha API failed"))
self.fail_login(self._("Request captcha API failed"))
captcha_response = self.captcha.decrypt(
json_data["captcha_url"]
)
try:
json_data = self.api_request(
"login",
username=user,
password=password,
captcha_challenge=json_data["challenge"],
captcha_response=captcha_response,
)
except BadHeader as exc:
if exc.code == 406:
errors = [
json.loads(m.group(0)).get("errorCode", 0)
for m in re.finditer(r"{[^}]+}", exc.content)
]
if 31 in errors: #: ERROR_CAPTCHA_INVALID
self.captcha.invalid()
continue
else:
self.log_error(exc.content)
self.fail_login(exc.content)
else:
self.log_error(exc.content)
self.fail_login(exc.content)
else:
self.captcha.correct()
data["token"] = json_data["auth_token"]
break
else:
self.log_error(self._("Max captcha retries reached"))
self.fail_login(self._("Max captcha retries reached"))
else:
self.log_error(exc.content)
self.fail_login(exc.content)
else:
self.log_error(exc.content)
self.fail_login(exc.content)
else:
#: No captcha
data["token"] = json_data["auth_token"]
"""
@NOTE: below are methods
necessary for captcha to work with account plugins
"""
def check_status(self):
pass
def retry_captcha(self, attempts=10, wait=1, msg="Max captcha retries reached"):
self.captcha.invalid()
self.fail_login(msg=self._("Invalid captcha"))
|
extractor | beeg | from __future__ import unicode_literals
from ..compat import compat_str, compat_urlparse
from ..utils import int_or_none, unified_timestamp
from .common import InfoExtractor
class BeegIE(InfoExtractor):
_VALID_URL = r"https?://(?:www\.)?beeg\.(?:com|porn(?:/video)?)/(?P<id>\d+)"
_TESTS = [
{
# api/v6 v1
"url": "http://beeg.com/5416503",
"md5": "a1a1b1a8bc70a89e49ccfd113aed0820",
"info_dict": {
"id": "5416503",
"ext": "mp4",
"title": "Sultry Striptease",
"description": "md5:d22219c09da287c14bed3d6c37ce4bc2",
"timestamp": 1391813355,
"upload_date": "20140207",
"duration": 383,
"tags": list,
"age_limit": 18,
},
},
{
# api/v6 v2
"url": "https://beeg.com/1941093077?t=911-1391",
"only_matching": True,
},
{
# api/v6 v2 w/o t
"url": "https://beeg.com/1277207756",
"only_matching": True,
},
{
"url": "https://beeg.porn/video/5416503",
"only_matching": True,
},
{
"url": "https://beeg.porn/5416503",
"only_matching": True,
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
beeg_version = self._search_regex(
r"beeg_version\s*=\s*([\da-zA-Z_-]+)",
webpage,
"beeg version",
default="1546225636701",
)
if len(video_id) >= 10:
query = {
"v": 2,
}
qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
t = qs.get("t", [""])[0].split("-")
if len(t) > 1:
query.update(
{
"s": t[0],
"e": t[1],
}
)
else:
query = {"v": 1}
for api_path in ("", "api."):
video = self._download_json(
"https://%sbeeg.com/api/v6/%s/video/%s"
% (api_path, beeg_version, video_id),
video_id,
fatal=api_path == "api.",
query=query,
)
if video:
break
formats = []
for format_id, video_url in video.items():
if not video_url:
continue
height = self._search_regex(
r"^(\d+)[pP]$", format_id, "height", default=None
)
if not height:
continue
formats.append(
{
"url": self._proto_relative_url(
video_url.replace(
"{DATA_MARKERS}", "data=pc_XX__%s_0" % beeg_version
),
"https:",
),
"format_id": format_id,
"height": int(height),
}
)
self._sort_formats(formats)
title = video["title"]
video_id = compat_str(video.get("id") or video_id)
display_id = video.get("code")
description = video.get("desc")
series = video.get("ps_name")
timestamp = unified_timestamp(video.get("date"))
duration = int_or_none(video.get("duration"))
tags = (
[tag.strip() for tag in video["tags"].split(",")]
if video.get("tags")
else None
)
return {
"id": video_id,
"display_id": display_id,
"title": title,
"description": description,
"series": series,
"timestamp": timestamp,
"duration": duration,
"tags": tags,
"formats": formats,
"age_limit": self._rta_search(webpage),
}
|
blocks | qa_stream_demux | #!/usr/bin/env python
#
# Copyright 2020 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
#
import os
import pmt
from gnuradio import blocks, gr, gr_unittest
class qa_stream_demux(gr_unittest.TestCase):
def setUp(self):
os.environ["GR_CONF_CONTROLPORT_ON"] = "False"
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def help_stream_2ff(self, N, stream_sizes):
v = blocks.vector_source_f(
N
* [
1,
]
+ N
* [
2,
],
False,
)
demux = blocks.stream_demux(gr.sizeof_float, stream_sizes)
dst0 = blocks.vector_sink_f()
dst1 = blocks.vector_sink_f()
self.tb.connect(v, demux)
self.tb.connect((demux, 0), dst0)
self.tb.connect((demux, 1), dst1)
self.tb.run()
return (dst0.data(), dst1.data())
def help_stream_ramp_2ff(self, N, stream_sizes):
r = list(range(N)) + list(reversed(range(N)))
v = blocks.vector_source_f(r, False)
demux = blocks.stream_demux(gr.sizeof_float, stream_sizes)
dst0 = blocks.vector_sink_f()
dst1 = blocks.vector_sink_f()
self.tb.connect(v, demux)
self.tb.connect((demux, 0), dst0)
self.tb.connect((demux, 1), dst1)
self.tb.run()
return (dst0.data(), dst1.data())
def help_stream_tag_propagation(self, N, stream_sizes):
src_data = (
stream_sizes[0]
* [
1,
]
+ stream_sizes[1]
* [
2,
]
+ stream_sizes[2]
* [
3,
]
) * N
src = blocks.vector_source_f(src_data, False)
tag_stream1 = blocks.stream_to_tagged_stream(
gr.sizeof_float, 1, stream_sizes[0], "src1"
)
tag_stream2 = blocks.stream_to_tagged_stream(
gr.sizeof_float, 1, stream_sizes[1], "src2"
)
tag_stream3 = blocks.stream_to_tagged_stream(
gr.sizeof_float, 1, stream_sizes[2], "src3"
)
demux = blocks.stream_demux(gr.sizeof_float, stream_sizes)
dst0 = blocks.vector_sink_f()
dst1 = blocks.vector_sink_f()
dst2 = blocks.vector_sink_f()
self.tb.connect(src, tag_stream1)
self.tb.connect(tag_stream1, tag_stream2)
self.tb.connect(tag_stream2, tag_stream3)
self.tb.connect(tag_stream3, demux)
self.tb.connect((demux, 0), dst0)
self.tb.connect((demux, 1), dst1)
self.tb.connect((demux, 2), dst2)
self.tb.run()
return (dst0, dst1, dst2)
def test_stream_2NN_ff(self):
N = 40
stream_sizes = [10, 10]
result_data = self.help_stream_2ff(N, stream_sizes)
exp_data0 = [
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
]
exp_data1 = [
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
]
self.assertEqual(exp_data0, result_data[0])
self.assertEqual(exp_data1, result_data[1])
def test_stream_ramp_2NN_ff(self):
N = 40
stream_sizes = [10, 10]
result_data = self.help_stream_ramp_2ff(N, stream_sizes)
exp_data0 = [
0.0,
1.0,
2.0,
3.0,
4.0,
5.0,
6.0,
7.0,
8.0,
9.0,
20.0,
21.0,
22.0,
23.0,
24.0,
25.0,
26.0,
27.0,
28.0,
29.0,
39.0,
38.0,
37.0,
36.0,
35.0,
34.0,
33.0,
32.0,
31.0,
30.0,
19.0,
18.0,
17.0,
16.0,
15.0,
14.0,
13.0,
12.0,
11.0,
10.0,
]
exp_data1 = [
10.0,
11.0,
12.0,
13.0,
14.0,
15.0,
16.0,
17.0,
18.0,
19.0,
30.0,
31.0,
32.0,
33.0,
34.0,
35.0,
36.0,
37.0,
38.0,
39.0,
29.0,
28.0,
27.0,
26.0,
25.0,
24.0,
23.0,
22.0,
21.0,
20.0,
9.0,
8.0,
7.0,
6.0,
5.0,
4.0,
3.0,
2.0,
1.0,
0.0,
]
self.assertEqual(exp_data0, result_data[0])
self.assertEqual(exp_data1, result_data[1])
def test_stream_2NM_ff(self):
N = 40
stream_sizes = [7, 9]
self.help_stream_2ff(N, stream_sizes)
result_data = self.help_stream_2ff(N, stream_sizes)
exp_data0 = [
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
]
exp_data1 = [
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
]
self.assertEqual(exp_data0, result_data[0])
self.assertEqual(exp_data1, result_data[1])
def test_stream_2MN_ff(self):
N = 37
stream_sizes = [7, 9]
self.help_stream_2ff(N, stream_sizes)
result_data = self.help_stream_2ff(N, stream_sizes)
exp_data0 = [
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
]
exp_data1 = [
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
]
self.assertEqual(exp_data0, result_data[0])
self.assertEqual(exp_data1, result_data[1])
def test_stream_2N0_ff(self):
N = 30
stream_sizes = [7, 0]
self.help_stream_2ff(N, stream_sizes)
result_data = self.help_stream_2ff(N, stream_sizes)
exp_data0 = [
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
]
exp_data1 = []
self.assertEqual(exp_data0, result_data[0])
self.assertEqual(exp_data1, result_data[1])
def test_stream_20N_ff(self):
N = 30
stream_sizes = [0, 9]
self.help_stream_2ff(N, stream_sizes)
result_data = self.help_stream_2ff(N, stream_sizes)
exp_data0 = []
exp_data1 = [
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
2.0,
]
self.assertEqual(exp_data0, result_data[0])
self.assertEqual(exp_data1, result_data[1])
def test_largeN_ff(self):
stream_sizes = [3, 8191]
r0 = [
1.0,
] * stream_sizes[0]
r1 = [
2.0,
] * stream_sizes[1]
v = blocks.vector_source_f(r0 + r1, repeat=False)
demux = blocks.stream_demux(gr.sizeof_float, stream_sizes)
dst0 = blocks.vector_sink_f()
dst1 = blocks.vector_sink_f()
self.tb.connect(v, demux)
self.tb.connect((demux, 0), dst0)
self.tb.connect((demux, 1), dst1)
self.tb.run()
self.assertEqual(r0, dst0.data())
self.assertEqual(r1, dst1.data())
def test_tag_propagation(self):
N = 10 # Block length
stream_sizes = [1, 2, 3]
expected_result0 = N * (
stream_sizes[0]
* [
1,
]
)
expected_result1 = N * (
stream_sizes[1]
* [
2,
]
)
expected_result2 = N * (
stream_sizes[2]
* [
3,
]
)
# check the data
(result0, result1, result2) = self.help_stream_tag_propagation(N, stream_sizes)
self.assertFloatTuplesAlmostEqual(expected_result0, result0.data(), places=6)
self.assertFloatTuplesAlmostEqual(expected_result1, result1.data(), places=6)
self.assertFloatTuplesAlmostEqual(expected_result2, result2.data(), places=6)
# check the tags - result0
tags = result0.tags()
expected_tag_offsets_src1 = list(range(0, stream_sizes[0] * N, stream_sizes[0]))
expected_tag_offsets_src2 = list(range(0, stream_sizes[0] * N, stream_sizes[0]))
expected_tag_offsets_src3 = list(range(0, stream_sizes[0] * N, stream_sizes[0]))
tags_src1 = [tag for tag in tags if pmt.eq(tag.key, pmt.intern("src1"))]
tags_src2 = [tag for tag in tags if pmt.eq(tag.key, pmt.intern("src2"))]
tags_src3 = [tag for tag in tags if pmt.eq(tag.key, pmt.intern("src3"))]
for i in range(len(expected_tag_offsets_src1)):
self.assertEqual(expected_tag_offsets_src1[i], tags_src1[i].offset)
for i in range(len(expected_tag_offsets_src2)):
self.assertEqual(expected_tag_offsets_src2[i], tags_src2[i].offset)
for i in range(len(expected_tag_offsets_src3)):
self.assertEqual(expected_tag_offsets_src3[i], tags_src3[i].offset)
# check the tags - result1
tags = result1.tags()
expected_tag_offsets_src1 = list(range(0, stream_sizes[1] * N, stream_sizes[0]))
expected_tag_offsets_src2 = list(range(1, stream_sizes[1] * N, stream_sizes[1]))
expected_tag_offsets_src3 = list()
tags_src1 = [tag for tag in tags if pmt.eq(tag.key, pmt.intern("src1"))]
tags_src2 = [tag for tag in tags if pmt.eq(tag.key, pmt.intern("src2"))]
tags_src3 = [tag for tag in tags if pmt.eq(tag.key, pmt.intern("src3"))]
for i in range(len(expected_tag_offsets_src1)):
self.assertEqual(expected_tag_offsets_src1[i], tags_src1[i].offset)
for i in range(len(expected_tag_offsets_src2)):
self.assertEqual(expected_tag_offsets_src2[i], tags_src2[i].offset)
for i in range(len(expected_tag_offsets_src3)):
self.assertEqual(expected_tag_offsets_src3[i], tags_src3[i].offset)
# check the tags - result2
tags = result2.tags()
expected_tag_offsets_src1 = list(range(0, stream_sizes[2] * N, stream_sizes[0]))
expected_tag_offsets_src2 = list(range(1, stream_sizes[2] * N, stream_sizes[2]))
expected_tag_offsets_src3 = list(range(0, stream_sizes[2] * N, stream_sizes[2]))
tags_src1 = [tag for tag in tags if pmt.eq(tag.key, pmt.intern("src1"))]
tags_src2 = [tag for tag in tags if pmt.eq(tag.key, pmt.intern("src2"))]
tags_src3 = [tag for tag in tags if pmt.eq(tag.key, pmt.intern("src3"))]
for i in range(len(expected_tag_offsets_src1)):
self.assertEqual(expected_tag_offsets_src1[i], tags_src1[i].offset)
for i in range(len(expected_tag_offsets_src2)):
self.assertEqual(expected_tag_offsets_src2[i], tags_src2[i].offset)
for i in range(len(expected_tag_offsets_src3)):
self.assertEqual(expected_tag_offsets_src3[i], tags_src3[i].offset)
if __name__ == "__main__":
gr_unittest.run(qa_stream_demux)
|
migrations | 0145_eventdefinition_propertydefinition | # Generated by Django 3.1.8 on 2021-04-22 01:45
import django.db.models.deletion
import posthog.models.utils
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("posthog", "0144_update_django_3_1_8"),
]
operations = [
migrations.CreateModel(
name="PropertyDefinition",
fields=[
(
"id",
models.UUIDField(
default=posthog.models.utils.UUIDT,
editable=False,
primary_key=True,
serialize=False,
),
),
("name", models.CharField(max_length=400)),
("is_numerical", models.BooleanField(default=False)),
("volume_30_day", models.IntegerField(default=None, null=True)),
("query_usage_30_day", models.IntegerField(default=None, null=True)),
(
"team",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="property_definitions",
related_query_name="team",
to="posthog.team",
),
),
],
options={
"unique_together": {("team", "name")},
},
),
migrations.CreateModel(
name="EventDefinition",
fields=[
(
"id",
models.UUIDField(
default=posthog.models.utils.UUIDT,
editable=False,
primary_key=True,
serialize=False,
),
),
("name", models.CharField(max_length=400)),
("volume_30_day", models.IntegerField(default=None, null=True)),
("query_usage_30_day", models.IntegerField(default=None, null=True)),
(
"team",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="event_definitions",
related_query_name="team",
to="posthog.team",
),
),
],
options={
"unique_together": {("team", "name")},
},
),
]
|
stream | ffmpegmux | import concurrent.futures
import logging
import re
import subprocess
import sys
import threading
from contextlib import suppress
from functools import lru_cache
from pathlib import Path
from shutil import which
from typing import Any, Dict, Generic, List, Optional, Sequence, TextIO, TypeVar, Union
from streamlink import StreamError
from streamlink.stream.stream import Stream, StreamIO
from streamlink.utils.named_pipe import NamedPipe, NamedPipeBase
from streamlink.utils.processoutput import ProcessOutput
log = logging.getLogger(__name__)
_lock_resolve_command = threading.Lock()
TSubstreams = TypeVar("TSubstreams", bound=Stream)
class MuxedStream(Stream, Generic[TSubstreams]):
"""
Muxes multiple streams into one output stream.
"""
__shortname__ = "muxed-stream"
def __init__(
self,
session,
*substreams: TSubstreams,
**options,
):
"""
:param streamlink.Streamlink session: Streamlink session instance
:param substreams: Video and/or audio streams
:param options: Additional keyword arguments passed to :class:`ffmpegmux.FFMPEGMuxer`.
Subtitle streams need to be set via the ``subtitles`` keyword.
"""
super().__init__(session)
self.substreams: Sequence[TSubstreams] = substreams
self.subtitles: Dict[str, Stream] = options.pop("subtitles", {})
self.options: Dict[str, Any] = options
def open(self):
fds = []
metadata = self.options.get("metadata", {})
maps = self.options.get("maps", [])
# only update the maps values if they haven't been set
update_maps = not maps
for substream in self.substreams:
log.debug("Opening {0} substream".format(substream.shortname()))
if update_maps:
maps.append(len(fds))
fds.append(substream and substream.open())
for i, subtitle in enumerate(self.subtitles.items()):
language, substream = subtitle
log.debug("Opening {0} subtitle stream".format(substream.shortname()))
if update_maps:
maps.append(len(fds))
fds.append(substream and substream.open())
metadata["s:s:{0}".format(i)] = ["language={0}".format(language)]
self.options["metadata"] = metadata
self.options["maps"] = maps
return FFMPEGMuxer(self.session, *fds, **self.options).open()
@classmethod
def is_usable(cls, session):
return FFMPEGMuxer.is_usable(session)
class FFMPEGMuxer(StreamIO):
__commands__ = ["ffmpeg"]
DEFAULT_OUTPUT_FORMAT = "matroska"
DEFAULT_VIDEO_CODEC = "copy"
DEFAULT_AUDIO_CODEC = "copy"
FFMPEG_VERSION: Optional[str] = None
FFMPEG_VERSION_TIMEOUT = 4.0
errorlog: Union[int, TextIO]
@classmethod
def is_usable(cls, session):
return cls.command(session) is not None
@classmethod
def command(cls, session):
with _lock_resolve_command:
return cls._resolve_command(
session.options.get("ffmpeg-ffmpeg"),
not session.options.get("ffmpeg-no-validation"),
)
@classmethod
@lru_cache(maxsize=128)
def _resolve_command(cls, command: Optional[str] = None, validate: bool = True) -> Optional[str]:
if command:
resolved = which(command)
else:
resolved = None
for cmd in cls.__commands__:
resolved = which(cmd)
if resolved:
break
if resolved and validate:
log.trace(f"Querying FFmpeg version: {[resolved, '-version']}") # type: ignore[attr-defined]
versionoutput = FFmpegVersionOutput([resolved, "-version"], timeout=cls.FFMPEG_VERSION_TIMEOUT)
if not versionoutput.run():
log.error("Could not validate FFmpeg!")
log.error(f"Unexpected FFmpeg version output while running {[resolved, '-version']}")
resolved = None
else:
cls.FFMPEG_VERSION = versionoutput.version
for i, line in enumerate(versionoutput.output):
log.debug(f" {line}" if i > 0 else line)
if not resolved:
log.warning("No valid FFmpeg binary was found. See the --ffmpeg-ffmpeg option.")
log.warning("Muxing streams is unsupported! Only a subset of the available streams can be returned!")
return resolved
@staticmethod
def copy_to_pipe(stream: StreamIO, pipe: NamedPipeBase):
log.debug(f"Starting copy to pipe: {pipe.path}")
# TODO: catch OSError when creating/opening pipe fails and close entire output stream
pipe.open()
while True:
try:
data = stream.read(8192)
except (OSError, ValueError) as err:
log.error(f"Error while reading from substream: {err}")
break
if data == b"":
log.debug(f"Pipe copy complete: {pipe.path}")
break
try:
pipe.write(data)
except OSError as err:
log.error(f"Error while writing to pipe {pipe.path}: {err}")
break
with suppress(OSError):
pipe.close()
def __init__(self, session, *streams, **options):
if not self.is_usable(session):
raise StreamError("cannot use FFMPEG")
self.session = session
self.process = None
self.streams = streams
self.pipes = [NamedPipe() for _ in self.streams]
self.pipe_threads = [
threading.Thread(target=self.copy_to_pipe, args=(stream, np)) for stream, np in zip(self.streams, self.pipes)
]
ofmt = session.options.get("ffmpeg-fout") or options.pop("format", self.DEFAULT_OUTPUT_FORMAT)
outpath = options.pop("outpath", "pipe:1")
videocodec = session.options.get("ffmpeg-video-transcode") or options.pop("vcodec", self.DEFAULT_VIDEO_CODEC)
audiocodec = session.options.get("ffmpeg-audio-transcode") or options.pop("acodec", self.DEFAULT_AUDIO_CODEC)
metadata = options.pop("metadata", {})
maps = options.pop("maps", [])
copyts = session.options.get("ffmpeg-copyts") or options.pop("copyts", False)
start_at_zero = session.options.get("ffmpeg-start-at-zero") or options.pop("start_at_zero", False)
self._cmd = [self.command(session), "-nostats", "-y"]
for np in self.pipes:
self._cmd.extend(["-i", str(np.path)])
self._cmd.extend(["-c:v", videocodec])
self._cmd.extend(["-c:a", audiocodec])
for m in maps:
self._cmd.extend(["-map", str(m)])
if copyts:
self._cmd.extend(["-copyts"])
if start_at_zero:
self._cmd.extend(["-start_at_zero"])
for stream, data in metadata.items():
for datum in data:
stream_id = ":{0}".format(stream) if stream else ""
self._cmd.extend(["-metadata{0}".format(stream_id), datum])
self._cmd.extend(["-f", ofmt, outpath])
log.debug("ffmpeg command: {0}".format(" ".join(self._cmd)))
if session.options.get("ffmpeg-verbose-path"):
self.errorlog = Path(session.options.get("ffmpeg-verbose-path")).expanduser().open("w")
elif session.options.get("ffmpeg-verbose"):
self.errorlog = sys.stderr
else:
self.errorlog = subprocess.DEVNULL
def open(self):
for t in self.pipe_threads:
t.daemon = True
t.start()
self.process = subprocess.Popen(self._cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=self.errorlog)
return self
def read(self, size=-1):
return self.process.stdout.read(size)
def close(self):
if self.closed:
return
log.debug("Closing ffmpeg thread")
if self.process:
# kill ffmpeg
self.process.kill()
self.process.stdout.close()
executor = concurrent.futures.ThreadPoolExecutor()
# close the substreams
futures = [
executor.submit(stream.close) for stream in self.streams if hasattr(stream, "close") and callable(stream.close)
]
concurrent.futures.wait(futures, return_when=concurrent.futures.ALL_COMPLETED)
log.debug("Closed all the substreams")
# wait for substream copy-to-pipe threads to terminate and clean up the opened pipes
timeout = self.session.options.get("stream-timeout")
futures = [executor.submit(thread.join, timeout=timeout) for thread in self.pipe_threads]
concurrent.futures.wait(futures, return_when=concurrent.futures.ALL_COMPLETED)
if self.errorlog is not sys.stderr and self.errorlog is not subprocess.DEVNULL:
with suppress(OSError):
self.errorlog.close()
super().close()
class FFmpegVersionOutput(ProcessOutput):
# The version output format of the fftools hasn't been changed since n0.7.1 (2011-04-23):
# https://github.com/FFmpeg/FFmpeg/blame/n5.1.1/fftools/ffmpeg.c#L110
# https://github.com/FFmpeg/FFmpeg/blame/n5.1.1/fftools/opt_common.c#L201
# https://github.com/FFmpeg/FFmpeg/blame/c99b93c5d53d8f4a4f1fafc90f3dfc51467ee02e/fftools/cmdutils.c#L1156
# https://github.com/FFmpeg/FFmpeg/commit/89b503b55f2b2713f1c3cc8981102c1a7b663281
_re_version = re.compile(r"ffmpeg version (?P<version>\S+)")
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.version: Optional[str] = None
self.output: List[str] = []
def onexit(self, code: int) -> bool:
return code == 0 and self.version is not None
def onstdout(self, idx: int, line: str) -> Optional[bool]:
# only validate the very first line of the stdout stream
if idx == 0:
match = self._re_version.match(line)
# abort if the very first line of stdout doesn't match the expected format
if not match:
return False
self.version = match["version"]
self.output.append(line)
|
TemplatePyMod | DocumentObject | # FreeCAD module providing base classes for document objects and view provider
# (c) 2011 Werner Mayer LGPL
import FreeCAD
class DocumentObject(object):
"""The Document object is the base class for all FreeCAD objects."""
def __init__(self):
self.__object__ = None
self.initialised = False
# ------------------------------Methods for the user to override :
def execute(self):
"this method is executed on object creation and whenever the document is recomputed"
raise NotImplementedError("Not yet implemented")
def init(self):
# will be called just after object creation, you can use this for example to create properties
pass
def propertyChanged(self, prop):
# will be called each time a property is changed
pass
# --------------------------------
def __getattr__(self, attr):
if attr != "__object__" and hasattr(self.__object__, attr):
return getattr(self.__object__, attr)
else:
return object.__getattribute__(self, attr)
def __setattr__(self, attr, value):
if attr != "__object__" and hasattr(self.__object__, attr):
setattr(self.__object__, attr, value)
else:
object.__setattr__(self, attr, value)
def onChanged(self, prop):
if prop == "Proxy":
# recreate the functions in the __object__
d = self.__class__.__dict__
for key in d:
item = d[key]
# check if the function is valid
if (
hasattr(item, "__call__")
and key != "onChanged"
and key != "execute"
and key != "init"
and key[0] != "_"
):
# check if the function doesn't already exist in the object:
if not (hasattr(self.__object__, key)):
# add a link to the Proxy function in the __object__ :
self.addProperty("App::PropertyPythonObject", key, "", "", 2)
setattr(self.__object__, key, getattr(self, key))
else:
FreeCAD.Console.PrintWarning(
'!!! The function : "'
+ key
+ '" already exist in the object, cannot override. !!!\n'
)
# call the init function
if hasattr(self, "initialised"):
if self.initialised == False:
self.init()
self.initialised = True
self.propertyChanged(prop)
def addProperty(
self, typ, name="", group="", doc="", attr=0, readonly=False, hidden=False
):
"adds a new property to this object"
return self.__object__.addProperty(
typ, name, group, doc, attr, readonly, hidden
)
def supportedProperties(self):
"lists the property types supported by this object"
return self.__object__.supportedProperties()
def isDerivedFrom(self, obj):
"""returns True if this object is derived from the given C++ class, for
example Part::Feature"""
return self.__object__.isDerivedFrom(obj)
def getAllDerivedFrom(self):
"returns all parent C++ classes of this object"
return self.__object__.getAllDerivedFrom()
def getProperty(self, attr):
"returns the value of a given property"
return self.__object__.getPropertyByName(attr)
def getTypeOfProperty(self, attr):
"returns the type of a given property"
return self.__object__.getTypeOfProperty(attr)
def getGroupOfProperty(self, attr):
"returns the group of a given property"
return self.__object__.getGroupOfProperty(attr)
def getDocumentationOfProperty(self, attr):
"returns the documentation string of a given property"
return self.__object__.getDocumentationOfProperty(attr)
def getEnumerationsOfProperty(self, attr):
"returns the documentation string of a given property"
return self.__object__.getEnumerationsOfProperty(attr)
def touch(self):
"marks this object to be recomputed"
return self.__object__.touch()
def purgeTouched(self):
"removes the to-be-recomputed flag of this object"
return self.__object__.purgeTouched()
def __setstate__(self, value):
"""allows to save custom attributes of this object as strings, so
they can be saved when saving the FreeCAD document"""
return None
def __getstate__(self):
"""reads values previously saved with __setstate__()"""
return None
@property
def PropertiesList(self):
"lists the current properties of this object"
return self.__object__.PropertiesList
@property
def Type(self):
"shows the C++ class of this object"
return self.__object__.Type
@property
def Module(self):
"gives the module this object is defined in"
return self.__object__.Module
@property
def Content(self):
"""shows the contents of the properties of this object as an xml string.
This is the content that is saved when the file is saved by FreeCAD"""
return self.__object__.Content
@property
def MemSize(self):
"shows the amount of memory this object uses"
return self.__object__.MemSize
@property
def Name(self):
"the name ofthis object, unique in the FreeCAD document"
return self.__object__.Name
@property
def Document(self):
"the document this object is part of"
return self.__object__.Document
@property
def State(self):
"shows if this object is valid (presents no errors)"
return self.__object__.State
@property
def ViewObject(self):
return self.__object__.ViewObject
@ViewObject.setter
def ViewObject(self, value):
"""returns or sets the ViewObject associated with this object. Returns
None if FreeCAD is running in console mode"""
self.__object__.ViewObject = value
@property
def InList(self):
"lists the parents of this object"
return self.__object__.InList
@property
def OutList(self):
"lists the children of this object"
return self.__object__.OutList
class ViewProvider(object):
"""The ViewProvider is the counterpart of the DocumentObject in
the GUI space. It is only present when FreeCAD runs in GUI mode.
It contains all that is needed to represent the DocumentObject in
the 3D view and the FreeCAD interface"""
def __init__(self):
self.__vobject__ = None
# def getIcon(self):
# return ""
# def claimChildren(self):
# return self.__vobject__.Object.OutList
# def setEdit(self,mode):
# return False
# def unsetEdit(self,mode):
# return False
# def attach(self):
# return None
# def updateData(self, prop):
# return None
# def onChanged(self, prop):
# return None
def addDisplayMode(self, node, mode):
"adds a coin node as a display mode to this object"
self.__vobject__.addDisplayMode(node, mode)
# def getDefaultDisplayMode(self):
# return ""
# def getDisplayModes(self):
# return []
# def setDisplayMode(self,mode):
# return mode
def addProperty(
self, type, name="", group="", doc="", attr=0, readonly=False, hidden=False
):
"adds a new property to this object"
self.__vobject__.addProperty(type, name, group, doc, attr, readonly, hidden)
def update(self):
"this method is executed whenever any of the properties of this ViewProvider changes"
self.__vobject__.update()
def show(self):
"switches this object to visible"
self.__vobject__.show()
def hide(self):
"switches this object to invisible"
self.__vobject__.hide()
def isVisible(self):
"shows whether this object is visible or invisible"
return self.__vobject__.isVisible()
def toString(self):
"returns a string representation of the coin node of this object"
return self.__vobject__.toString()
def setTransformation(self, trsf):
"defines a transformation for this object"
return self.__vobject__.setTransformation(trsf)
def supportedProperties(self):
"lists the property types this ViewProvider supports"
return self.__vobject__.supportedProperties()
def isDerivedFrom(self, obj):
"""returns True if this object is derived from the given C++ class, for
example Part::Feature"""
return self.__vobject__.isDerivedFrom(obj)
def getAllDerivedFrom(self):
"returns all parent C++ classes of this object"
return self.__vobject__.getAllDerivedFrom()
def getProperty(self, attr):
"returns the value of a given property"
return self.__vobject__.getPropertyByName(attr)
def getTypeOfProperty(self, attr):
"returns the type of a given property"
return self.__vobject__.getTypeOfProperty(attr)
def getGroupOfProperty(self, attr):
"returns the group of a given property"
return self.__vobject__.getGroupOfProperty(attr)
def getDocumentationOfProperty(self, attr):
"returns the documentation string of a given property"
return self.__vobject__.getDocumentationOfProperty(attr)
def __setstate__(self, value):
"""allows to save custom attributes of this object as strings, so
they can be saved when saving the FreeCAD document"""
return None
def __getstate__(self):
"""reads values previously saved with __setstate__()"""
return None
@property
def Annotation(self):
"returns the Annotation coin node of this object"
return self.__vobject__.Annotation
@property
def RootNode(self):
"returns the Root coin node of this object"
return self.__vobject__.RootNode
@property
def DisplayModes(self):
"lists the display modes of this object"
return self.__vobject__.listDisplayModes()
@property
def PropertiesList(self):
"lists the current properties of this object"
return self.__vobject__.PropertiesList
@property
def Type(self):
"shows the C++ class of this object"
return self.__vobject__.Type
@property
def Module(self):
"gives the module this object is defined in"
return self.__vobject__.Module
@property
def Content(self):
"""shows the contents of the properties of this object as an xml string.
This is the content that is saved when the file is saved by FreeCAD"""
return self.__vobject__.Content
@property
def MemSize(self):
"shows the amount of memory this object uses"
return self.__vobject__.MemSize
@property
def Object(self):
"returns the DocumentObject this ViewProvider is associated to"
return self.__vobject__.Object
# Example :
import Part
class Box(DocumentObject):
# type :
type = "Part::FeaturePython"
# -----------------------------INIT----------------------------------------
def init(self):
self.addProperty(
"App::PropertyLength", "Length", "Box", "Length of the box"
).Length = 1.0
self.addProperty(
"App::PropertyLength", "Width", "Box", "Width of the box"
).Width = 1.0
self.addProperty(
"App::PropertyLength", "Height", "Box", "Height of the box"
).Height = 1.0
# -----------------------------BEHAVIOR------------------------------------
def propertyChanged(self, prop):
FreeCAD.Console.PrintMessage("Box property changed : " + prop + "\n")
if prop == "Length" or prop == "Width" or prop == "Height":
self._recomputeShape()
def execute(self):
FreeCAD.Console.PrintMessage("Recompute Python Box feature\n")
self._recomputeShape()
# ---------------------------PUBLIC FUNCTIONS-------------------------------
# These functions will be present in the object
def customFunctionSetLength(self, attr):
self.Length = attr
self._privateFunctionExample(attr)
# ---------------------------PRIVATE FUNCTIONS------------------------------
# These function won't be present in the object (begin with '_')
def _privateFunctionExample(self, attr):
FreeCAD.Console.PrintMessage("The length : " + str(attr) + "\n")
def _recomputeShape(self):
if (
hasattr(self, "Length")
and hasattr(self, "Width")
and hasattr(self, "Height")
):
self.Shape = Part.makeBox(self.Length, self.Width, self.Height)
def makeBox():
FreeCAD.newDocument()
box = FreeCAD.ActiveDocument.addObject(Box.type, "MyBox", Box(), None)
box.customFunctionSetLength(4)
|
migrate | regenerate_query_cache | #!/usr/bin/python
from org.apache.pig.scripting import Pig
SCRIPT_ROOT = "udfs/dist/lib/"
INPUT_ROOT = "input/"
OUTPUT_ROOT = "output"
relations = {
"savehide": ("UserQueryCache", "link"),
"inbox_account_comment": ("UserQueryCache", "comment"),
"inbox_account_message": ("UserQueryCache", "message"),
"moderatorinbox": ("SubredditQueryCache", "message"),
"vote_account_link": ("UserQueryCache", "link"),
}
####### Pig script fragments
load_things = """
things =
LOAD '$THINGS'
USING PigStorage()
AS (id:long,
ups:int,
downs:int,
deleted:chararray,
spam:chararray,
timestamp:double);
"""
make_things_items = """
items =
FOREACH things GENERATE *;
"""
load_rels = """
items =
LOAD '$RELS'
USING PigStorage()
AS (id:long,
thing1_id:long,
thing2_id:long,
name:chararray,
timestamp:double);
"""
load_and_map_data = """
data =
LOAD '$DATA'
USING PigStorage()
AS (id:long,
key:chararray,
value);
grouped_with_data =
COGROUP items BY id, data BY id;
items_with_data =
FOREACH grouped_with_data
GENERATE FLATTEN(items),
com.reddit.pig.MAKE_MAP(data.(key, value)) AS data;
"""
add_unread = """
SPLIT items_with_data
INTO inbox IF 1 == 1,
unread IF (chararray)data#'new' == 't';
inbox_with_relname =
FOREACH inbox GENERATE '$RELATION' AS relation, *;
unread_with_relname =
FOREACH unread GENERATE '$RELATION:unread' AS relation, *;
rels_with_relname =
UNION ONSCHEMA inbox_with_relname,
unread_with_relname;
"""
add_relname = """
rels_with_relname =
FOREACH items GENERATE '$RELATION' AS relation, *;
"""
generate_rel_items = """
minimal_things =
FOREACH things GENERATE id, deleted;
joined =
JOIN rels_with_relname BY thing2_id LEFT OUTER,
minimal_things BY id;
only_valid =
FILTER joined BY minimal_things::id IS NOT NULL AND
deleted == 'f';
potential_columns =
FOREACH only_valid
GENERATE com.reddit.pig.MAKE_ROWKEY(relation, name, thing1_id) AS rowkey,
com.reddit.pig.MAKE_THING2_FULLNAME(relation, thing2_id) AS colkey,
timestamp AS value;
"""
store_top_1000_per_rowkey = """
non_null =
FILTER potential_columns BY rowkey IS NOT NULL AND colkey IS NOT NULL;
grouped =
GROUP non_null BY rowkey;
limited =
FOREACH grouped {
sorted = ORDER non_null BY value DESC;
limited = LIMIT sorted 1000;
GENERATE group AS rowkey, FLATTEN(limited.(colkey, value));
};
jsonified =
FOREACH limited GENERATE rowkey,
colkey,
com.reddit.pig.TO_JSON(value);
STORE jsonified INTO '$OUTPUT' USING PigStorage();
"""
###### run the jobs
# register the reddit udfs
Pig.registerJar(SCRIPT_ROOT + "reddit-pig-udfs.jar")
# process rels
for rel, (cf, thing2_type) in relations.iteritems():
# build source for a script
script = "SET default_parallel 10;"
script += load_rels
if "inbox" in rel:
script += load_and_map_data
script += add_unread
else:
script += add_relname
script += load_things
script += generate_rel_items
script += store_top_1000_per_rowkey
# run it
compiled = Pig.compile(script)
bound = compiled.bind(
{
"RELS": INPUT_ROOT + rel + ".dump",
"DATA": INPUT_ROOT + rel + "-data.dump",
"THINGS": INPUT_ROOT + thing2_type + ".dump",
"RELATION": rel,
"OUTPUT": "/".join((OUTPUT_ROOT, cf, rel)),
}
)
bound.runSingle()
# rebuild message-based queries (just get_sent right now)
if False:
script = "SET default_parallel 10;"
script += load_things
script += make_things_items
script += load_and_map_data
script += """
non_null =
FILTER items_with_data BY data#'author_id' IS NOT NULL;
potential_columns =
FOREACH non_null
GENERATE
CONCAT('sent.', com.reddit.pig.TO_36(data#'author_id')) AS rowkey,
com.reddit.pig.MAKE_FULLNAME('message', id) AS colkey,
timestamp AS value;
"""
script += store_top_1000_per_rowkey
compiled = Pig.compile(script)
bound = compiled.bind(
{
"THINGS": INPUT_ROOT + "message.dump",
"DATA": INPUT_ROOT + "message-data.dump",
"OUTPUT": "/".join((OUTPUT_ROOT, "UserQueryCache", "sent")),
}
)
result = bound.runSingle()
# rebuild comment-based queries
if True:
script = "SET default_parallel 10;"
script += load_things
script += make_things_items
script += load_and_map_data
script += """
SPLIT items_with_data INTO
spam_comments IF spam == 't',
ham_comments IF spam == 'f';
ham_comments_with_name =
FOREACH ham_comments GENERATE 'sr_comments' AS name, *;
reported_comments =
FILTER ham_comments BY (int)data#'reported' > 0;
reported_comments_with_name =
FOREACH reported_comments GENERATE 'reported_comments' AS name, *;
spam_comments_with_name =
FOREACH spam_comments GENERATE 'spam_comments' AS name, *;
comments_with_name =
UNION ONSCHEMA ham_comments_with_name,
reported_comments_with_name,
spam_comments_with_name;
potential_columns =
FOREACH comments_with_name GENERATE
CONCAT(name, CONCAT('.', com.reddit.pig.TO_36(data#'sr_id'))) AS rowkey,
com.reddit.pig.MAKE_FULLNAME('comment', id) AS colkey,
timestamp AS value;
"""
script += store_top_1000_per_rowkey
compiled = Pig.compile(script)
bound = compiled.bind(
{
"THINGS": INPUT_ROOT + "comment.dump",
"DATA": INPUT_ROOT + "comment-data.dump",
"OUTPUT": "/".join((OUTPUT_ROOT, "SubredditQueryCache", "comment")),
}
)
result = bound.runSingle()
|
mako | runtime | # mako/runtime.py
# Copyright (C) 2006-2016 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""provides runtime services for templates, including Context,
Namespace, and various helper functions."""
import sys
from mako import compat, exceptions, util
from mako.compat import compat_builtins
class Context(object):
"""Provides runtime namespace, output buffer, and various
callstacks for templates.
See :ref:`runtime_toplevel` for detail on the usage of
:class:`.Context`.
"""
def __init__(self, buffer, **data):
self._buffer_stack = [buffer]
self._data = data
self._kwargs = data.copy()
self._with_template = None
self._outputting_as_unicode = None
self.namespaces = {}
# "capture" function which proxies to the
# generic "capture" function
self._data["capture"] = compat.partial(capture, self)
# "caller" stack used by def calls with content
self.caller_stack = self._data["caller"] = CallerStack()
def _set_with_template(self, t):
self._with_template = t
illegal_names = t.reserved_names.intersection(self._data)
if illegal_names:
raise exceptions.NameConflictError(
"Reserved words passed to render(): %s" % ", ".join(illegal_names)
)
@property
def lookup(self):
"""Return the :class:`.TemplateLookup` associated
with this :class:`.Context`.
"""
return self._with_template.lookup
@property
def kwargs(self):
"""Return the dictionary of top level keyword arguments associated
with this :class:`.Context`.
This dictionary only includes the top-level arguments passed to
:meth:`.Template.render`. It does not include names produced within
the template execution such as local variable names or special names
such as ``self``, ``next``, etc.
The purpose of this dictionary is primarily for the case that
a :class:`.Template` accepts arguments via its ``<%page>`` tag,
which are normally expected to be passed via :meth:`.Template.render`,
except the template is being called in an inheritance context,
using the ``body()`` method. :attr:`.Context.kwargs` can then be
used to propagate these arguments to the inheriting template::
${next.body(**context.kwargs)}
"""
return self._kwargs.copy()
def push_caller(self, caller):
"""Push a ``caller`` callable onto the callstack for
this :class:`.Context`."""
self.caller_stack.append(caller)
def pop_caller(self):
"""Pop a ``caller`` callable onto the callstack for this
:class:`.Context`."""
del self.caller_stack[-1]
def keys(self):
"""Return a list of all names established in this :class:`.Context`."""
return list(self._data.keys())
def __getitem__(self, key):
if key in self._data:
return self._data[key]
else:
return compat_builtins.__dict__[key]
def _push_writer(self):
"""push a capturing buffer onto this Context and return
the new writer function."""
buf = util.FastEncodingBuffer()
self._buffer_stack.append(buf)
return buf.write
def _pop_buffer_and_writer(self):
"""pop the most recent capturing buffer from this Context
and return the current writer after the pop.
"""
buf = self._buffer_stack.pop()
return buf, self._buffer_stack[-1].write
def _push_buffer(self):
"""push a capturing buffer onto this Context."""
self._push_writer()
def _pop_buffer(self):
"""pop the most recent capturing buffer from this Context."""
return self._buffer_stack.pop()
def get(self, key, default=None):
"""Return a value from this :class:`.Context`."""
return self._data.get(key, compat_builtins.__dict__.get(key, default))
def write(self, string):
"""Write a string to this :class:`.Context` object's
underlying output buffer."""
self._buffer_stack[-1].write(string)
def writer(self):
"""Return the current writer function."""
return self._buffer_stack[-1].write
def _copy(self):
c = Context.__new__(Context)
c._buffer_stack = self._buffer_stack
c._data = self._data.copy()
c._kwargs = self._kwargs
c._with_template = self._with_template
c._outputting_as_unicode = self._outputting_as_unicode
c.namespaces = self.namespaces
c.caller_stack = self.caller_stack
return c
def _locals(self, d):
"""Create a new :class:`.Context` with a copy of this
:class:`.Context`'s current state,
updated with the given dictionary.
The :attr:`.Context.kwargs` collection remains
unaffected.
"""
if not d:
return self
c = self._copy()
c._data.update(d)
return c
def _clean_inheritance_tokens(self):
"""create a new copy of this :class:`.Context`. with
tokens related to inheritance state removed."""
c = self._copy()
x = c._data
x.pop("self", None)
x.pop("parent", None)
x.pop("next", None)
return c
class CallerStack(list):
def __init__(self):
self.nextcaller = None
def __nonzero__(self):
return self.__bool__()
def __bool__(self):
return len(self) and self._get_caller() and True or False
def _get_caller(self):
# this method can be removed once
# codegen MAGIC_NUMBER moves past 7
return self[-1]
def __getattr__(self, key):
return getattr(self._get_caller(), key)
def _push_frame(self):
frame = self.nextcaller or None
self.append(frame)
self.nextcaller = None
return frame
def _pop_frame(self):
self.nextcaller = self.pop()
class Undefined(object):
"""Represents an undefined value in a template.
All template modules have a constant value
``UNDEFINED`` present which is an instance of this
object.
"""
def __str__(self):
raise NameError("Undefined")
def __nonzero__(self):
return self.__bool__()
def __bool__(self):
return False
UNDEFINED = Undefined()
STOP_RENDERING = ""
class LoopStack(object):
"""a stack for LoopContexts that implements the context manager protocol
to automatically pop off the top of the stack on context exit
"""
def __init__(self):
self.stack = []
def _enter(self, iterable):
self._push(iterable)
return self._top
def _exit(self):
self._pop()
return self._top
@property
def _top(self):
if self.stack:
return self.stack[-1]
else:
return self
def _pop(self):
return self.stack.pop()
def _push(self, iterable):
new = LoopContext(iterable)
if self.stack:
new.parent = self.stack[-1]
return self.stack.append(new)
def __getattr__(self, key):
raise exceptions.RuntimeException("No loop context is established")
def __iter__(self):
return iter(self._top)
class LoopContext(object):
"""A magic loop variable.
Automatically accessible in any ``% for`` block.
See the section :ref:`loop_context` for usage
notes.
:attr:`parent` -> :class:`.LoopContext` or ``None``
The parent loop, if one exists.
:attr:`index` -> `int`
The 0-based iteration count.
:attr:`reverse_index` -> `int`
The number of iterations remaining.
:attr:`first` -> `bool`
``True`` on the first iteration, ``False`` otherwise.
:attr:`last` -> `bool`
``True`` on the last iteration, ``False`` otherwise.
:attr:`even` -> `bool`
``True`` when ``index`` is even.
:attr:`odd` -> `bool`
``True`` when ``index`` is odd.
"""
def __init__(self, iterable):
self._iterable = iterable
self.index = 0
self.parent = None
def __iter__(self):
for i in self._iterable:
yield i
self.index += 1
@util.memoized_instancemethod
def __len__(self):
return len(self._iterable)
@property
def reverse_index(self):
return len(self) - self.index - 1
@property
def first(self):
return self.index == 0
@property
def last(self):
return self.index == len(self) - 1
@property
def even(self):
return not self.odd
@property
def odd(self):
return bool(self.index % 2)
def cycle(self, *values):
"""Cycle through values as the loop progresses."""
if not values:
raise ValueError("You must provide values to cycle through")
return values[self.index % len(values)]
class _NSAttr(object):
def __init__(self, parent):
self.__parent = parent
def __getattr__(self, key):
ns = self.__parent
while ns:
if hasattr(ns.module, key):
return getattr(ns.module, key)
else:
ns = ns.inherits
raise AttributeError(key)
class Namespace(object):
"""Provides access to collections of rendering methods, which
can be local, from other templates, or from imported modules.
To access a particular rendering method referenced by a
:class:`.Namespace`, use plain attribute access:
.. sourcecode:: mako
${some_namespace.foo(x, y, z)}
:class:`.Namespace` also contains several built-in attributes
described here.
"""
def __init__(
self,
name,
context,
callables=None,
inherits=None,
populate_self=True,
calling_uri=None,
):
self.name = name
self.context = context
self.inherits = inherits
if callables is not None:
self.callables = dict([(c.__name__, c) for c in callables])
callables = ()
module = None
"""The Python module referenced by this :class:`.Namespace`.
If the namespace references a :class:`.Template`, then
this module is the equivalent of ``template.module``,
i.e. the generated module for the template.
"""
template = None
"""The :class:`.Template` object referenced by this
:class:`.Namespace`, if any.
"""
context = None
"""The :class:`.Context` object for this :class:`.Namespace`.
Namespaces are often created with copies of contexts that
contain slightly different data, particularly in inheritance
scenarios. Using the :class:`.Context` off of a :class:`.Namespace` one
can traverse an entire chain of templates that inherit from
one-another.
"""
filename = None
"""The path of the filesystem file used for this
:class:`.Namespace`'s module or template.
If this is a pure module-based
:class:`.Namespace`, this evaluates to ``module.__file__``. If a
template-based namespace, it evaluates to the original
template file location.
"""
uri = None
"""The URI for this :class:`.Namespace`'s template.
I.e. whatever was sent to :meth:`.TemplateLookup.get_template()`.
This is the equivalent of :attr:`.Template.uri`.
"""
_templateuri = None
@util.memoized_property
def attr(self):
"""Access module level attributes by name.
This accessor allows templates to supply "scalar"
attributes which are particularly handy in inheritance
relationships.
.. seealso::
:ref:`inheritance_attr`
:ref:`namespace_attr_for_includes`
"""
return _NSAttr(self)
def get_namespace(self, uri):
"""Return a :class:`.Namespace` corresponding to the given ``uri``.
If the given ``uri`` is a relative URI (i.e. it does not
contain a leading slash ``/``), the ``uri`` is adjusted to
be relative to the ``uri`` of the namespace itself. This
method is therefore mostly useful off of the built-in
``local`` namespace, described in :ref:`namespace_local`.
In
most cases, a template wouldn't need this function, and
should instead use the ``<%namespace>`` tag to load
namespaces. However, since all ``<%namespace>`` tags are
evaluated before the body of a template ever runs,
this method can be used to locate namespaces using
expressions that were generated within the body code of
the template, or to conditionally use a particular
namespace.
"""
key = (self, uri)
if key in self.context.namespaces:
return self.context.namespaces[key]
else:
ns = TemplateNamespace(
uri,
self.context._copy(),
templateuri=uri,
calling_uri=self._templateuri,
)
self.context.namespaces[key] = ns
return ns
def get_template(self, uri):
"""Return a :class:`.Template` from the given ``uri``.
The ``uri`` resolution is relative to the ``uri`` of this
:class:`.Namespace` object's :class:`.Template`.
"""
return _lookup_template(self.context, uri, self._templateuri)
def get_cached(self, key, **kwargs):
"""Return a value from the :class:`.Cache` referenced by this
:class:`.Namespace` object's :class:`.Template`.
The advantage to this method versus direct access to the
:class:`.Cache` is that the configuration parameters
declared in ``<%page>`` take effect here, thereby calling
up the same configured backend as that configured
by ``<%page>``.
"""
return self.cache.get(key, **kwargs)
@property
def cache(self):
"""Return the :class:`.Cache` object referenced
by this :class:`.Namespace` object's
:class:`.Template`.
"""
return self.template.cache
def include_file(self, uri, **kwargs):
"""Include a file at the given ``uri``."""
_include_file(self.context, uri, self._templateuri, **kwargs)
def _populate(self, d, l):
for ident in l:
if ident == "*":
for k, v in self._get_star():
d[k] = v
else:
d[ident] = getattr(self, ident)
def _get_star(self):
if self.callables:
for key in self.callables:
yield (key, self.callables[key])
def __getattr__(self, key):
if key in self.callables:
val = self.callables[key]
elif self.inherits:
val = getattr(self.inherits, key)
else:
raise AttributeError("Namespace '%s' has no member '%s'" % (self.name, key))
setattr(self, key, val)
return val
class TemplateNamespace(Namespace):
"""A :class:`.Namespace` specific to a :class:`.Template` instance."""
def __init__(
self,
name,
context,
template=None,
templateuri=None,
callables=None,
inherits=None,
populate_self=True,
calling_uri=None,
):
self.name = name
self.context = context
self.inherits = inherits
if callables is not None:
self.callables = dict([(c.__name__, c) for c in callables])
if templateuri is not None:
self.template = _lookup_template(context, templateuri, calling_uri)
self._templateuri = self.template.module._template_uri
elif template is not None:
self.template = template
self._templateuri = template.module._template_uri
else:
raise TypeError("'template' argument is required.")
if populate_self:
lclcallable, lclcontext = _populate_self_namespace(
context, self.template, self_ns=self
)
@property
def module(self):
"""The Python module referenced by this :class:`.Namespace`.
If the namespace references a :class:`.Template`, then
this module is the equivalent of ``template.module``,
i.e. the generated module for the template.
"""
return self.template.module
@property
def filename(self):
"""The path of the filesystem file used for this
:class:`.Namespace`'s module or template.
"""
return self.template.filename
@property
def uri(self):
"""The URI for this :class:`.Namespace`'s template.
I.e. whatever was sent to :meth:`.TemplateLookup.get_template()`.
This is the equivalent of :attr:`.Template.uri`.
"""
return self.template.uri
def _get_star(self):
if self.callables:
for key in self.callables:
yield (key, self.callables[key])
def get(key):
callable_ = self.template._get_def_callable(key)
return compat.partial(callable_, self.context)
for k in self.template.module._exports:
yield (k, get(k))
def __getattr__(self, key):
if key in self.callables:
val = self.callables[key]
elif self.template.has_def(key):
callable_ = self.template._get_def_callable(key)
val = compat.partial(callable_, self.context)
elif self.inherits:
val = getattr(self.inherits, key)
else:
raise AttributeError("Namespace '%s' has no member '%s'" % (self.name, key))
setattr(self, key, val)
return val
class ModuleNamespace(Namespace):
"""A :class:`.Namespace` specific to a Python module instance."""
def __init__(
self,
name,
context,
module,
callables=None,
inherits=None,
populate_self=True,
calling_uri=None,
):
self.name = name
self.context = context
self.inherits = inherits
if callables is not None:
self.callables = dict([(c.__name__, c) for c in callables])
mod = __import__(module)
for token in module.split(".")[1:]:
mod = getattr(mod, token)
self.module = mod
@property
def filename(self):
"""The path of the filesystem file used for this
:class:`.Namespace`'s module or template.
"""
return self.module.__file__
def _get_star(self):
if self.callables:
for key in self.callables:
yield (key, self.callables[key])
for key in dir(self.module):
if key[0] != "_":
callable_ = getattr(self.module, key)
if compat.callable(callable_):
yield key, compat.partial(callable_, self.context)
def __getattr__(self, key):
if key in self.callables:
val = self.callables[key]
elif hasattr(self.module, key):
callable_ = getattr(self.module, key)
val = compat.partial(callable_, self.context)
elif self.inherits:
val = getattr(self.inherits, key)
else:
raise AttributeError("Namespace '%s' has no member '%s'" % (self.name, key))
setattr(self, key, val)
return val
def supports_caller(func):
"""Apply a caller_stack compatibility decorator to a plain
Python function.
See the example in :ref:`namespaces_python_modules`.
"""
def wrap_stackframe(context, *args, **kwargs):
context.caller_stack._push_frame()
try:
return func(context, *args, **kwargs)
finally:
context.caller_stack._pop_frame()
return wrap_stackframe
def capture(context, callable_, *args, **kwargs):
"""Execute the given template def, capturing the output into
a buffer.
See the example in :ref:`namespaces_python_modules`.
"""
if not compat.callable(callable_):
raise exceptions.RuntimeException(
"capture() function expects a callable as "
"its argument (i.e. capture(func, *args, **kwargs))"
)
context._push_buffer()
try:
callable_(*args, **kwargs)
finally:
buf = context._pop_buffer()
return buf.getvalue()
def _decorate_toplevel(fn):
def decorate_render(render_fn):
def go(context, *args, **kw):
def y(*args, **kw):
return render_fn(context, *args, **kw)
try:
y.__name__ = render_fn.__name__[7:]
except TypeError:
# < Python 2.4
pass
return fn(y)(context, *args, **kw)
return go
return decorate_render
def _decorate_inline(context, fn):
def decorate_render(render_fn):
dec = fn(render_fn)
def go(*args, **kw):
return dec(context, *args, **kw)
return go
return decorate_render
def _include_file(context, uri, calling_uri, **kwargs):
"""locate the template from the given uri and include it in
the current output."""
template = _lookup_template(context, uri, calling_uri)
(callable_, ctx) = _populate_self_namespace(
context._clean_inheritance_tokens(), template
)
kwargs = _kwargs_for_include(callable_, context._data, **kwargs)
if template.include_error_handler:
try:
callable_(ctx, **kwargs)
except Exception:
result = template.include_error_handler(ctx, compat.exception_as())
if not result:
compat.reraise(*sys.exc_info())
else:
callable_(ctx, **kwargs)
def _inherit_from(context, uri, calling_uri):
"""called by the _inherit method in template modules to set
up the inheritance chain at the start of a template's
execution."""
if uri is None:
return None
template = _lookup_template(context, uri, calling_uri)
self_ns = context["self"]
ih = self_ns
while ih.inherits is not None:
ih = ih.inherits
lclcontext = context._locals({"next": ih})
ih.inherits = TemplateNamespace(
"self:%s" % template.uri, lclcontext, template=template, populate_self=False
)
context._data["parent"] = lclcontext._data["local"] = ih.inherits
callable_ = getattr(template.module, "_mako_inherit", None)
if callable_ is not None:
ret = callable_(template, lclcontext)
if ret:
return ret
gen_ns = getattr(template.module, "_mako_generate_namespaces", None)
if gen_ns is not None:
gen_ns(context)
return (template.callable_, lclcontext)
def _lookup_template(context, uri, relativeto):
lookup = context._with_template.lookup
if lookup is None:
raise exceptions.TemplateLookupException(
"Template '%s' has no TemplateLookup associated"
% context._with_template.uri
)
uri = lookup.adjust_uri(uri, relativeto)
try:
return lookup.get_template(uri)
except exceptions.TopLevelLookupException:
raise exceptions.TemplateLookupException(str(compat.exception_as()))
def _populate_self_namespace(context, template, self_ns=None):
if self_ns is None:
self_ns = TemplateNamespace(
"self:%s" % template.uri, context, template=template, populate_self=False
)
context._data["self"] = context._data["local"] = self_ns
if hasattr(template.module, "_mako_inherit"):
ret = template.module._mako_inherit(template, context)
if ret:
return ret
return (template.callable_, context)
def _render(template, callable_, args, data, as_unicode=False):
"""create a Context and return the string
output of the given template and template callable."""
if as_unicode:
buf = util.FastEncodingBuffer(as_unicode=True)
elif template.bytestring_passthrough:
buf = compat.StringIO()
else:
buf = util.FastEncodingBuffer(
as_unicode=as_unicode,
encoding=template.output_encoding,
errors=template.encoding_errors,
)
context = Context(buf, **data)
context._outputting_as_unicode = as_unicode
context._set_with_template(template)
_render_context(
template, callable_, context, *args, **_kwargs_for_callable(callable_, data)
)
return context._pop_buffer().getvalue()
def _kwargs_for_callable(callable_, data):
argspec = compat.inspect_func_args(callable_)
# for normal pages, **pageargs is usually present
if argspec[2]:
return data
# for rendering defs from the top level, figure out the args
namedargs = argspec[0] + [v for v in argspec[1:3] if v is not None]
kwargs = {}
for arg in namedargs:
if arg != "context" and arg in data and arg not in kwargs:
kwargs[arg] = data[arg]
return kwargs
def _kwargs_for_include(callable_, data, **kwargs):
argspec = compat.inspect_func_args(callable_)
namedargs = argspec[0] + [v for v in argspec[1:3] if v is not None]
for arg in namedargs:
if arg != "context" and arg in data and arg not in kwargs:
kwargs[arg] = data[arg]
return kwargs
def _render_context(tmpl, callable_, context, *args, **kwargs):
import mako.template as template
# create polymorphic 'self' namespace for this
# template with possibly updated context
if not isinstance(tmpl, template.DefTemplate):
# if main render method, call from the base of the inheritance stack
(inherit, lclcontext) = _populate_self_namespace(context, tmpl)
_exec_template(inherit, lclcontext, args=args, kwargs=kwargs)
else:
# otherwise, call the actual rendering method specified
(inherit, lclcontext) = _populate_self_namespace(context, tmpl.parent)
_exec_template(callable_, context, args=args, kwargs=kwargs)
def _exec_template(callable_, context, args=None, kwargs=None):
"""execute a rendering callable given the callable, a
Context, and optional explicit arguments
the contextual Template will be located if it exists, and
the error handling options specified on that Template will
be interpreted here.
"""
template = context._with_template
if template is not None and (template.format_exceptions or template.error_handler):
try:
callable_(context, *args, **kwargs)
except Exception:
_render_error(template, context, compat.exception_as())
except:
e = sys.exc_info()[0]
_render_error(template, context, e)
else:
callable_(context, *args, **kwargs)
def _render_error(template, context, error):
if template.error_handler:
result = template.error_handler(context, error)
if not result:
compat.reraise(*sys.exc_info())
else:
error_template = exceptions.html_error_template()
if context._outputting_as_unicode:
context._buffer_stack[:] = [util.FastEncodingBuffer(as_unicode=True)]
else:
context._buffer_stack[:] = [
util.FastEncodingBuffer(
error_template.output_encoding, error_template.encoding_errors
)
]
context._set_with_template(error_template)
error_template.render_context(context, error=error)
|
meta-architectures | rfcn_meta_arch_test | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.meta_architectures.rfcn_meta_arch."""
import tensorflow as tf
from app.object_detection.meta_architectures import (
faster_rcnn_meta_arch_test_lib,
rfcn_meta_arch,
)
class RFCNMetaArchTest(faster_rcnn_meta_arch_test_lib.FasterRCNNMetaArchTestBase):
def _get_second_stage_box_predictor_text_proto(self):
box_predictor_text_proto = """
rfcn_box_predictor {
conv_hyperparams {
op: CONV
activation: NONE
regularizer {
l2_regularizer {
weight: 0.0005
}
}
initializer {
variance_scaling_initializer {
factor: 1.0
uniform: true
mode: FAN_AVG
}
}
}
}
"""
return box_predictor_text_proto
def _get_model(self, box_predictor, **common_kwargs):
return rfcn_meta_arch.RFCNMetaArch(
second_stage_rfcn_box_predictor=box_predictor, **common_kwargs
)
def _get_box_classifier_features_shape(
self,
image_size,
batch_size,
max_num_proposals,
initial_crop_size,
maxpool_stride,
num_features,
):
return (batch_size, image_size, image_size, num_features)
if __name__ == "__main__":
tf.test.main()
|
PySide | ui_mainwindow | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mainwindow.ui'
#
# Created: Fri Nov 20 18:03:04 2015
# by: pyside-uic 0.2.13 running on PySide 1.1.0
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(800, 600)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 25))
self.menubar.setObjectName("menubar")
self.menuFreeCAD = QtGui.QMenu(self.menubar)
self.menuFreeCAD.setObjectName("menuFreeCAD")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actionEmbed = QtGui.QAction(MainWindow)
self.actionEmbed.setObjectName("actionEmbed")
self.actionDocument = QtGui.QAction(MainWindow)
self.actionDocument.setObjectName("actionDocument")
self.actionCube = QtGui.QAction(MainWindow)
self.actionCube.setObjectName("actionCube")
self.menuFreeCAD.addAction(self.actionEmbed)
self.menuFreeCAD.addAction(self.actionDocument)
self.menuFreeCAD.addAction(self.actionCube)
self.menubar.addAction(self.menuFreeCAD.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(
QtGui.QApplication.translate(
"MainWindow", "MainWindow", None, QtGui.QApplication.UnicodeUTF8
)
)
self.menuFreeCAD.setTitle(
QtGui.QApplication.translate(
"MainWindow", "FreeCAD", None, QtGui.QApplication.UnicodeUTF8
)
)
self.actionEmbed.setText(
QtGui.QApplication.translate(
"MainWindow", "Embed", None, QtGui.QApplication.UnicodeUTF8
)
)
self.actionDocument.setText(
QtGui.QApplication.translate(
"MainWindow", "Document", None, QtGui.QApplication.UnicodeUTF8
)
)
self.actionCube.setText(
QtGui.QApplication.translate(
"MainWindow", "Cube", None, QtGui.QApplication.UnicodeUTF8
)
)
|
transform-plugin | transforms | # -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2021 by Ihor E. Novikov
# Copyright (C) 2020 fixed by Michael Schorcht
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import math
import os
import wal
from sk1 import _
from sk1.pwidgets import AngleSpin, UnitLabel, UnitSpin
from sk1.resources import get_bmp
from uc2 import libgeom, sk2const
PLG_DIR = os.path.dirname(__file__)
IMG_DIR = os.path.join(PLG_DIR, "images")
def make_artid(name):
return os.path.join(IMG_DIR, name + ".png")
class AbstractTransform(wal.VPanel):
name = "Transform"
app = None
orientation = (0.0, 0.0)
callback = None
user_changes = False
active_widgets = []
def __init__(self, parent, app, onreset=None):
self.app = app
self.callback = onreset
wal.VPanel.__init__(self, parent)
self.caption = wal.Label(self, self.name, fontbold=True)
self.pack(self.caption, padding_all=5)
self.build()
def on_reset(self):
self.orientation = (0.0, 0.0)
self.user_changes = True
if self.callback:
self.callback()
def build(self):
pass
def update(self):
pass
def set_enable(self, state):
for widget in self.active_widgets:
widget.set_enable(state)
if state:
self.update()
def set_orientation(self, orientation=(0.0, 0.0)):
self.orientation = orientation
self.user_changes = False
self.update()
def get_trafo(self):
return [] + sk2const.NORMAL_TRAFO
def get_selection_bbox(self):
doc = self.app.current_doc
return [] + doc.selection.bbox
def get_selection_size(self):
bbox = self.get_selection_bbox()
return bbox[2] - bbox[0], bbox[3] - bbox[1]
def is_ll_coords(self):
doc = self.app.current_doc
return doc.methods.get_doc_origin() == sk2const.DOC_ORIGIN_LL
def is_lu_coords(self):
doc = self.app.current_doc
return doc.methods.get_doc_origin() == sk2const.DOC_ORIGIN_LU
def is_center_coords(self):
doc = self.app.current_doc
return doc.methods.get_doc_origin() == sk2const.DOC_ORIGIN_CENTER
def doc_to_coords(self, point):
x, y = point
pw, ph = self.app.current_doc.get_page_size()
if self.is_ll_coords():
x += pw / 2.0
y += ph / 2.0
elif self.is_lu_coords():
x += pw / 2.0
y -= ph / 2.0
y = -y if y else y
return [x, y]
def coords_to_doc(self, point):
x, y = point
pw, ph = self.app.current_doc.get_page_size()
if self.is_ll_coords():
x -= pw / 2.0
y -= ph / 2.0
elif self.is_lu_coords():
x -= pw / 2.0
y = -y
y += ph / 2.0
return [x, y]
class PositionTransform(AbstractTransform):
name = _("Position")
dx = 0.0
dy = 0.0
h_spin = None
v_spin = None
abs_pos = None
def build(self):
grid = wal.GridPanel(self, 2, 3, 2, 2)
grid.pack(get_bmp(grid, make_artid("h-sign")))
self.h_spin = UnitSpin(
self.app, grid, can_be_negative=True, onchange=self.on_reset
)
grid.pack(self.h_spin)
grid.pack(UnitLabel(self.app, grid))
grid.pack(get_bmp(grid, make_artid("v-sign")))
self.v_spin = UnitSpin(
self.app, grid, can_be_negative=True, onchange=self.on_reset
)
grid.pack(self.v_spin)
grid.pack(UnitLabel(self.app, grid))
self.pack(grid, align_center=False, padding=5)
self.abs_pos = wal.Checkbox(self, _("Absolute position"), onclick=self.update)
self.pack(self.abs_pos, align_center=False, padding=5)
self.active_widgets = [self.h_spin, self.v_spin, self.abs_pos]
def on_reset(self):
self.user_changes = True
if not self.abs_pos.get_value():
AbstractTransform.on_reset(self)
def update(self):
if self.app.insp.is_selection() and not self.user_changes:
bbox = self.get_selection_bbox()
w, h = self.get_selection_size()
if self.abs_pos.get_value():
new_x = bbox[2] + bbox[0] + self.orientation[0] * w
new_y = bbox[3] + bbox[1] + self.orientation[1] * h
dx, dy = self.doc_to_coords([new_x / 2.0, new_y / 2.0])
else:
dx = self.orientation[0] * w
dy = self.orientation[1] * h
dy *= -1.0 if self.is_lu_coords() else 1.0
self.h_spin.set_point_value(dx)
self.v_spin.set_point_value(dy)
def get_trafo(self):
trafo = [] + sk2const.NORMAL_TRAFO
if self.abs_pos.get_value():
bbox = self.get_selection_bbox()
w, h = self.get_selection_size()
new_x = self.h_spin.get_point_value()
new_y = self.v_spin.get_point_value()
new_x, new_y = self.coords_to_doc([new_x, new_y])
old_x = (bbox[2] + bbox[0] + self.orientation[0] * w) / 2.0
old_y = (bbox[3] + bbox[1] + self.orientation[1] * h) / 2.0
trafo[4] = new_x - old_x
trafo[5] = new_y - old_y
else:
trafo[4] = self.h_spin.get_point_value()
trafo[5] = self.v_spin.get_point_value()
trafo[5] *= -1.0 if self.is_lu_coords() else 1.0
return trafo
class ResizeTransform(AbstractTransform):
name = _("Resizing")
h_spin = None
v_spin = None
proportion = None
def build(self):
grid = wal.GridPanel(self, 2, 3, 2, 2)
grid.pack(get_bmp(grid, make_artid("h-sign")))
self.h_spin = UnitSpin(self.app, grid, onchange=self.on_reset)
grid.pack(self.h_spin)
grid.pack(UnitLabel(self.app, grid))
grid.pack(get_bmp(grid, make_artid("v-sign")))
self.v_spin = UnitSpin(self.app, grid, onchange=self.height_changed)
grid.pack(self.v_spin)
grid.pack(UnitLabel(self.app, grid))
self.pack(grid, align_center=False, padding=5)
self.proportion = wal.Checkbox(self, _("Keep ratio"), True)
self.pack(self.proportion, align_center=False, padding=5)
self.active_widgets = [self.h_spin, self.v_spin, self.proportion]
def height_changed(self):
self.on_reset(True)
def on_reset(self, height_changed=False):
self.user_changes = True
if not self.h_spin.get_point_value():
self.h_spin.set_point_value(1.0)
if not self.v_spin.get_point_value():
self.v_spin.set_point_value(1.0)
if self.proportion.get_value():
w, h = self.get_selection_size()
if height_changed:
new_h = self.v_spin.get_point_value()
self.h_spin.set_point_value(w * new_h / h)
else:
new_w = self.h_spin.get_point_value()
self.v_spin.set_point_value(h * new_w / w)
def set_enable(self, state):
self.user_changes = False
AbstractTransform.set_enable(self, state)
def set_orientation(self, orientation=(0.0, 0.0)):
self.orientation = orientation
self.update()
def update(self):
if not self.app.insp.is_selection():
return
if self.user_changes:
return
w, h = self.get_selection_size()
self.h_spin.set_point_value(w)
self.v_spin.set_point_value(h)
def get_trafo(self):
trafo = [] + sk2const.NORMAL_TRAFO
bbox = self.get_selection_bbox()
w, h = self.get_selection_size()
new_w = self.h_spin.get_point_value()
new_h = self.v_spin.get_point_value()
trafo[0] = new_w / w
trafo[3] = new_h / h
bp = [
bbox[0] + w * (1.0 + self.orientation[0]) / 2.0,
bbox[1] + h * (1.0 + self.orientation[1]) / 2.0,
]
new_bp = libgeom.apply_trafo_to_point(bp, trafo)
trafo[4] = bp[0] - new_bp[0]
trafo[5] = bp[1] - new_bp[1]
return trafo
class ScaleTransform(AbstractTransform):
name = _("Scale and mirror")
v_scale = 100.0
h_scale = 100.0
h_spin = None
h_mirror = None
v_spin = None
v_mirror = None
proportion = None
def build(self):
grid = wal.GridPanel(self, 2, 5, 2, 2)
grid.pack(get_bmp(grid, make_artid("h-sign")))
self.h_spin = wal.FloatSpin(
grid, 100.0, (0.01, 10000.0), 1.0, onchange=self.on_reset
)
grid.pack(self.h_spin)
grid.pack(wal.Label(grid, "%"))
grid.pack((5, 5))
self.h_mirror = wal.ImageToggleButton(
grid,
False,
make_artid("h-mirror"),
tooltip=_("Flip horizontal"),
flat=False,
)
grid.pack(self.h_mirror)
grid.pack(get_bmp(grid, make_artid("v-sign")))
self.v_spin = wal.FloatSpin(
grid, 100.0, (0.01, 10000.0), 1.0, onchange=self.height_changed
)
grid.pack(self.v_spin)
grid.pack(wal.Label(grid, "%"))
grid.pack((5, 5))
self.v_mirror = wal.ImageToggleButton(
grid, False, make_artid("v-mirror"), tooltip=_("Flip vertical"), flat=False
)
grid.pack(self.v_mirror)
self.pack(grid, align_center=False, padding=5)
self.proportion = wal.Checkbox(self, _("Keep ratio"), True)
self.pack(self.proportion, align_center=False, padding=5)
self.active_widgets = [
self.h_spin,
self.h_mirror,
self.v_spin,
self.v_mirror,
self.proportion,
]
def height_changed(self):
self.on_reset(True)
def on_reset(self, height_changed=False):
self.user_changes = True
if self.proportion.get_value():
h = self.h_spin.get_value()
v = self.v_spin.get_value()
if height_changed:
self.h_spin.set_value(v * h / self.v_scale)
else:
self.v_spin.set_value(v * h / self.h_scale)
self.v_scale = self.v_spin.get_value()
self.h_scale = self.h_spin.get_value()
def set_enable(self, state):
self.user_changes = False
AbstractTransform.set_enable(self, state)
def set_orientation(self, orientation=(0.0, 0.0)):
self.orientation = orientation
self.update()
def update(self):
if not self.app.insp.is_selection():
return
if self.user_changes:
return
self.h_spin.set_value(100.0)
self.v_spin.set_value(100.0)
self.v_scale = self.h_scale = 100.0
def get_trafo(self):
trafo = [] + sk2const.NORMAL_TRAFO
bbox = self.get_selection_bbox()
w, h = self.get_selection_size()
trafo[0] = self.h_spin.get_value() / 100.0
trafo[3] = self.v_spin.get_value() / 100.0
if self.h_mirror.get_value():
trafo[0] *= -1.0
if self.v_mirror.get_value():
trafo[3] *= -1.0
bp = [
bbox[0] + w * (1.0 + self.orientation[0]) / 2.0,
bbox[1] + h * (1.0 + self.orientation[1]) / 2.0,
]
new_bp = libgeom.apply_trafo_to_point(bp, trafo)
trafo[4] = bp[0] - new_bp[0]
trafo[5] = bp[1] - new_bp[1]
return trafo
class RotateTransform(AbstractTransform):
name = _("Rotation")
angle = None
h_spin = None
v_spin = None
center = None
def build(self):
grid = wal.GridPanel(self, 1, 3, 2, 2)
grid.pack(wal.Label(grid, _("Angle:")))
self.angle = AngleSpin(grid, val_range=(-360.0, 360.0), check_focus=True)
grid.pack(self.angle)
grid.pack(wal.Label(grid, "°"))
self.pack(grid, align_center=False, padding=5)
self.pack(wal.Label(grid, _("Center:")), align_center=False, padding=5)
grid = wal.GridPanel(self, 2, 3, 2, 2)
grid.pack(get_bmp(grid, make_artid("h-sign")))
self.h_spin = UnitSpin(self.app, grid, can_be_negative=True)
grid.pack(self.h_spin)
grid.pack(UnitLabel(self.app, grid))
grid.pack(get_bmp(grid, make_artid("v-sign")))
self.v_spin = UnitSpin(self.app, grid, can_be_negative=True)
grid.pack(self.v_spin)
grid.pack(UnitLabel(self.app, grid))
self.pack(grid, align_center=False, padding_all=5)
self.center = wal.Checkbox(self, _("Relative center"), onclick=self.on_click)
self.pack(self.center, align_center=False, padding=5)
self.active_widgets = [self.angle, self.center]
self.on_click()
def on_click(self):
state = False
if self.center.get_value():
state = True
self.user_changes = True
if self.callback:
self.callback()
self.v_spin.set_enable(state)
self.h_spin.set_enable(state)
def set_enable(self, state):
if self.center.get_value():
self.v_spin.set_enable(state)
self.h_spin.set_enable(state)
else:
self.user_changes = False
AbstractTransform.set_enable(self, state)
def update(self):
if not self.app.insp.is_selection():
return
if self.user_changes:
return
bbox = self.get_selection_bbox()
w, h = self.get_selection_size()
bp = self.doc_to_coords(
[
bbox[0] + w * (1.0 + self.orientation[0]) / 2.0,
bbox[1] + h * (1.0 + self.orientation[1]) / 2.0,
]
)
self.h_spin.set_point_value(bp[0])
self.v_spin.set_point_value(bp[1])
def get_trafo(self):
angle = self.angle.get_angle_value()
center_x = self.h_spin.get_point_value()
center_y = self.v_spin.get_point_value()
center_x, center_y = self.coords_to_doc([center_x, center_y])
m21 = math.sin(angle)
m11 = m22 = math.cos(angle)
m12 = -m21
dx = center_x - m11 * center_x + m21 * center_y
dy = center_y - m21 * center_x - m11 * center_y
return [m11, m21, m12, m22, dx, dy]
class ShearTransform(AbstractTransform):
name = _("Shearing")
h_shear = None
v_shear = None
def build(self):
grid = wal.GridPanel(self, 3, 3, 2, 2)
grid.pack(get_bmp(grid, make_artid("h-sign")))
self.h_shear = AngleSpin(grid, val_range=(-89.0, 89.0), check_focus=True)
grid.pack(self.h_shear)
grid.pack(wal.Label(grid, _("degrees")))
grid.pack(get_bmp(grid, make_artid("v-sign")))
self.v_shear = AngleSpin(grid, val_range=(-89.0, 89.0), check_focus=True)
grid.pack(self.v_shear)
grid.pack(wal.Label(grid, _("degrees")))
self.pack(grid, align_center=False, padding=5)
self.active_widgets = [self.h_shear, self.v_shear]
def get_trafo(self):
angle1 = self.h_shear.get_angle_value()
angle2 = self.v_shear.get_angle_value()
m12 = math.tan(angle1)
m21 = math.tan(angle2)
m11 = 1.0
m22 = 1.0 - (m12 * m21)
trafo = [m11, m21, -m12, m22, 0, 0]
bbox = self.get_selection_bbox()
w, h = self.get_selection_size()
bp = [
bbox[0] + w * (1.0 + self.orientation[0]) / 2.0,
bbox[1] + h * (1.0 + self.orientation[1]) / 2.0,
]
new_bp = libgeom.apply_trafo_to_point(bp, trafo)
trafo[4] = bp[0] - new_bp[0]
trafo[5] = bp[1] - new_bp[1]
return trafo
|
clientScripts | get_aip_storage_locations | #!/usr/bin/env python
import json
import django
# storageService requires Django to be set up
django.setup()
# archivematicaCommon
import storageService as storage_service
from custom_handlers import get_script_logger
logger = get_script_logger("archivematica.mcp.client.get_aip_storage_locations")
def get_aip_storage_locations(purpose, job):
"""Return a dict of AIP Storage Locations and their descriptions."""
storage_directories = storage_service.get_location(purpose=purpose)
logger.debug(
"Storage Directories: {}".format(
json.dumps(storage_directories, indent=4, sort_keys=True)
)
)
choices = {}
for storage_dir in storage_directories:
label = storage_dir["description"]
if not label:
label = storage_dir["relative_path"]
choices[storage_dir["uuid"]] = {
"description": label,
"uri": storage_dir["resource_uri"],
}
choices["default"] = {
"description": "Default Location",
"uri": f"/api/v2/location/default/{purpose}/",
}
job.pyprint(json.dumps(choices, indent=4, sort_keys=True))
def call(jobs):
for job in jobs:
with job.JobContext(logger=logger):
try:
purpose = job.args[1]
except IndexError:
purpose = "AS"
job.set_status(get_aip_storage_locations(purpose, job))
|
lib | cache_poisoning | import hashlib
import hmac
from pylons import app_globals as g
# A map of cache policies to their respective cache headers
# loggedout omitted because loggedout responses are intentionally cacheable
CACHE_POLICY_DIRECTIVES = {
"loggedin_www": {
"cache-control": {"private", "no-cache"},
"pragma": {"no-cache"},
"expires": set(),
},
"loggedin_www_new": {
"cache-control": {"private", "max-age=0", "must-revalidate"},
"pragma": set(),
"expires": {"-1"},
},
"loggedin_mweb": {
"cache-control": {"private", "no-cache"},
"pragma": set(),
"expires": set(),
},
}
def make_poisoning_report_mac(
poisoner_canary,
poisoner_name,
poisoner_id,
cache_policy,
source,
# Can't MAC based on URL, some caches don't care about the
# order of query params and suchlike.
route_name,
):
"""
Make a MAC to send with cache poisoning reports for this page
"""
mac_key = g.secrets["cache_poisoning"]
mac_data = (
poisoner_canary,
poisoner_name,
str(poisoner_id),
cache_policy,
source,
route_name,
)
return hmac.new(mac_key, "|".join(mac_data), hashlib.sha1).hexdigest()
def cache_headers_valid(policy_name, headers):
"""Check if a response's headers make sense given a cache policy"""
policy_headers = CACHE_POLICY_DIRECTIVES[policy_name]
for header_name, expected_vals in policy_headers.items():
# Cache-Control is a little special, you can have multiple directives
# in multiple headers
found_vals = set(headers.get(header_name, []))
if header_name == "cache-control":
parsed_cache_control = set()
for cache_header in found_vals:
for split_header in cache_header.split(","):
cache_directive = split_header.strip().lower()
parsed_cache_control.add(cache_directive)
if parsed_cache_control != expected_vals:
return False
elif found_vals != expected_vals:
return False
return True
|
snippet | template | # This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2008 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
Save the current document as a snippet that appears in File->New from Template.
"""
import app
import documentinfo
import widgets.dialog
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QCheckBox, QCompleter, QMessageBox, QVBoxLayout, QWidget
from . import model, snippets
class TemplateDialog(widgets.dialog.TextDialog):
def __init__(self, parent):
self._lineEdit = None
super().__init__(parent)
self.setWindowTitle(app.caption(_("Save as Template")))
self.setMessage(_("Please enter a template name:"))
self.setMinimumWidth(320)
self.setValidateRegExp(r"\w(.*\w)?")
e = self._lineEdit = self.mainWidget()
w = QWidget()
self.setMainWidget(w)
c = self._runCheck = QCheckBox(
_("Run LilyPond when creating a new document from this template")
)
layout = QVBoxLayout(margin=0)
w.setLayout(layout)
layout.addWidget(e)
layout.addWidget(c)
e.setFocus()
def lineEdit(self):
"""Return the QLineEdit widget."""
return self._lineEdit or self.mainWidget()
def runCheck(self):
"""Return the Run LilyPond checkbox."""
return self._runCheck
def save(mainwindow):
titles = {
snippets.title(name): name
for name in model.model().names()
if "template" in snippets.get(name).variables
}
# would it make sense to run LilyPond after creating a document from this
# template?
cursor = mainwindow.textCursor()
template_run = False
if documentinfo.mode(cursor.document()) == "lilypond":
dinfo = documentinfo.docinfo(cursor.document())
if dinfo.complete() and dinfo.has_output():
template_run = True
dlg = TemplateDialog(mainwindow)
c = QCompleter(sorted(titles), dlg.lineEdit())
dlg.lineEdit().setCompleter(c)
dlg.runCheck().setChecked(template_run)
result = dlg.exec_()
dlg.deleteLater()
if not result:
return # cancelled
title = dlg.text()
template_run = dlg.runCheck().isChecked()
if title in titles:
if (
QMessageBox.critical(
mainwindow,
_("Overwrite Template?"),
_(
'A template named "{name}" already exists.\n\n'
"Do you want to overwrite it?"
).format(name=title),
QMessageBox.Yes | QMessageBox.Cancel,
)
!= QMessageBox.Yes
):
return
name = titles[title]
else:
name = None
# get the text and insert cursor position or selection
text = cursor.document().toPlainText()
repls = [(cursor.position(), "${CURSOR}")]
if cursor.hasSelection():
repls.append((cursor.anchor(), "${ANCHOR}"))
repls.sort()
result = []
prev = 0
for pos, what in repls:
result.append(text[prev:pos].replace("$", "$$"))
result.append(what)
prev = pos
result.append(text[prev:].replace("$", "$$"))
text = "".join(result)
# add header line, if desired enable autorun
headerline = "-*- template; indent: no;"
if template_run:
headerline += " template-run;"
text = headerline + "\n" + text
# save the new snippet
model.model().saveSnippet(name, text, title)
|
equations | electrostatic_writer | # ***************************************************************************
# * Copyright (c) 2017 Markus Hovorka <m.hovorka@live.de> *
# * Copyright (c) 2020 Bernd Hahnebach <bernd@bimstatik.org> *
# * Copyright (c) 2022 Uwe Stöhr <uwestoehr@lyx.org> *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
__title__ = "FreeCAD FEM Electrostatics Elmer writer"
__author__ = "Markus Hovorka, Bernd Hahnebach, Uwe Stöhr"
__url__ = "https://www.freecad.org"
## \addtogroup FEM
# @{
from .. import sifio
class ESwriter:
def __init__(self, writer, solver):
self.write = writer
self.solver = solver
def getElectrostaticSolver(self, equation):
# check if we need to update the equation
self._updateElectrostaticSolver(equation)
# output the equation parameters
s = self.write.createLinearSolver(equation)
s["Equation"] = "Stat Elec Solver" # equation.Name
s["Procedure"] = sifio.FileAttr("StatElecSolve/StatElecSolver")
s["Variable"] = self.write.getUniqueVarName("Potential")
s["Variable DOFs"] = 1
if equation.CalculateCapacitanceMatrix is True:
s["Calculate Capacitance Matrix"] = equation.CalculateCapacitanceMatrix
s["Capacitance Matrix Filename"] = equation.CapacitanceMatrixFilename
if equation.CalculateElectricEnergy is True:
s["Calculate Electric Energy"] = equation.CalculateElectricEnergy
if equation.CalculateElectricField is True:
s["Calculate Electric Field"] = equation.CalculateElectricField
if equation.CalculateElectricFlux is True:
s["Calculate Electric Flux"] = equation.CalculateElectricFlux
if equation.CalculateSurfaceCharge is True:
s["Calculate Surface Charge"] = equation.CalculateSurfaceCharge
if equation.ConstantWeights is True:
s["Constant Weights"] = equation.ConstantWeights
s["Exec Solver"] = "Always"
s["Optimize Bandwidth"] = True
if equation.CalculateCapacitanceMatrix is False and (
equation.PotentialDifference != 0.0
):
s["Potential Difference"] = equation.PotentialDifference
s["Stabilize"] = equation.Stabilize
return s
def _updateElectrostaticSolver(self, equation):
# updates older Electrostatic equations
if not hasattr(equation, "CapacitanceMatrixFilename"):
equation.addProperty(
"App::PropertyFile",
"CapacitanceMatrixFilename",
"Electrostatic",
(
"File where capacitance matrix is being saved\n"
"Only used if 'CalculateCapacitanceMatrix' is true"
),
)
equation.CapacitanceMatrixFilename = "cmatrix.dat"
if not hasattr(equation, "ConstantWeights"):
equation.addProperty(
"App::PropertyBool",
"ConstantWeights",
"Electrostatic",
"Use constant weighting for results",
)
if not hasattr(equation, "PotentialDifference"):
equation.addProperty(
"App::PropertyFloat",
"PotentialDifference",
"Electrostatic",
(
"Potential difference in Volt for which capacitance is\n"
"calculated if 'CalculateCapacitanceMatrix' is false"
),
)
equation.PotentialDifference = 0.0
def handleElectrostaticConstants(self):
permittivity = self.write.convert(
self.write.constsdef["PermittivityOfVacuum"], "T^4*I^2/(L^3*M)"
)
permittivity = round(permittivity, 20) # to get rid of numerical artifacts
self.write.constant("Permittivity Of Vacuum", permittivity)
def handleElectrostaticMaterial(self, bodies):
for obj in self.write.getMember("App::MaterialObject"):
m = obj.Material
refs = obj.References[0][1] if obj.References else self.write.getAllBodies()
for name in (n for n in refs if n in bodies):
self.write.material(name, "Name", m["Name"])
if "RelativePermittivity" in m:
self.write.material(
name, "Relative Permittivity", float(m["RelativePermittivity"])
)
def handleElectrostaticBndConditions(self):
for obj in self.write.getMember("Fem::ConstraintElectrostaticPotential"):
if obj.References:
for name in obj.References[0][1]:
# output the FreeCAD label as comment
if obj.Label:
self.write.boundary(name, "! FreeCAD Name", obj.Label)
if obj.PotentialEnabled:
if hasattr(obj, "Potential"):
# Potential was once a float and scaled not fitting SI units
if isinstance(obj.Potential, float):
savePotential = obj.Potential
obj.removeProperty("Potential")
obj.addProperty(
"App::PropertyElectricPotential",
"Potential",
"Parameter",
"Electric Potential",
)
# scale to match SI units
obj.Potential = savePotential * 1e6
potential = float(obj.Potential.getValueAs("V"))
self.write.boundary(name, "Potential", potential)
if obj.PotentialConstant:
self.write.boundary(name, "Potential Constant", True)
if obj.ElectricInfinity:
self.write.boundary(name, "Electric Infinity BC", True)
if obj.ElectricForcecalculation:
self.write.boundary(name, "Calculate Electric Force", True)
if obj.CapacitanceBodyEnabled:
if hasattr(obj, "CapacitanceBody"):
self.write.boundary(
name, "Capacitance Body", obj.CapacitanceBody
)
self.write.handled(obj)
## @}
|
neubot | utils_random | #!/usr/bin/env python
#
# Copyright (c) 2011 Simone Basso <bassosimone@gmail.com>,
# NEXA Center for Internet & Society at Politecnico di Torino
#
# This file is part of Neubot <http://www.neubot.org/>.
#
# Neubot is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Neubot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Neubot. If not, see <http://www.gnu.org/licenses/>.
#
""" Unit test for neubot/utils_random.py """
import sys
sys.path.insert(0, ".")
from neubot import utils
BEFORE = utils.ticks()
from neubot.utils_random import RANDOMBLOCKS, RandomBody
ELAPSED = utils.ticks() - BEFORE
print("Time to import: %s" % (utils.time_formatter(ELAPSED)))
def main():
"""Unit test for neubot/utils_random.py"""
assert len(RANDOMBLOCKS.get_block()) == RANDOMBLOCKS.blocksiz
assert RANDOMBLOCKS.get_block() != RANDOMBLOCKS.get_block()
filep, total = RandomBody(RANDOMBLOCKS.blocksiz + 789), 0
while True:
block = filep.read(128)
if not block:
break
total += len(block)
assert total == RANDOMBLOCKS.blocksiz + 789
filep = RandomBody(RANDOMBLOCKS.blocksiz + 789)
assert len(filep.read()) == RANDOMBLOCKS.blocksiz
assert filep.tell() == 789
assert len(filep.read()) == 789
filep.seek(7)
begin, total = utils.ticks(), 0
while total < 1073741824:
total += len(RANDOMBLOCKS.get_block())
elapsed = utils.ticks() - begin
print("Elapsed: %s" % utils.time_formatter(elapsed))
print("Speed: %s" % utils.speed_formatter(total / elapsed))
if __name__ == "__main__":
main()
|
cura | ApplicationMetadata | # Copyright (c) 2022 UltiMaker
# Cura is released under the terms of the LGPLv3 or higher.
# ---------
# General constants used in Cura
# ---------
DEFAULT_CURA_APP_NAME = "cura"
DEFAULT_CURA_DISPLAY_NAME = "UltiMaker Cura"
DEFAULT_CURA_VERSION = "dev"
DEFAULT_CURA_BUILD_TYPE = ""
DEFAULT_CURA_DEBUG_MODE = False
DEFAULT_CURA_LATEST_URL = "https://software.ultimaker.com/latest.json"
# Each release has a fixed SDK version coupled with it. It doesn't make sense to make it configurable because, for
# example Cura 3.2 with SDK version 6.1 will not work. So the SDK version is hard-coded here and left out of the
# CuraVersion.py.in template.
CuraSDKVersion = "8.4.0"
try:
from cura.CuraVersion import CuraLatestURL
if CuraLatestURL == "":
CuraLatestURL = DEFAULT_CURA_LATEST_URL
except ImportError:
CuraLatestURL = DEFAULT_CURA_LATEST_URL
try:
from cura.CuraVersion import CuraAppName # type: ignore
if CuraAppName == "":
CuraAppName = DEFAULT_CURA_APP_NAME
except ImportError:
CuraAppName = DEFAULT_CURA_APP_NAME
try:
from cura.CuraVersion import CuraVersion # type: ignore
if CuraVersion == "":
CuraVersion = DEFAULT_CURA_VERSION
except ImportError:
CuraVersion = DEFAULT_CURA_VERSION # [CodeStyle: Reflecting imported value]
# CURA-6569
# This string indicates what type of version it is. For example, "enterprise". By default it's empty which indicates
# a default/normal Cura build.
try:
from cura.CuraVersion import CuraBuildType # type: ignore
except ImportError:
CuraBuildType = DEFAULT_CURA_BUILD_TYPE
try:
from cura.CuraVersion import CuraDebugMode # type: ignore
except ImportError:
CuraDebugMode = DEFAULT_CURA_DEBUG_MODE
# CURA-6569
# Various convenience flags indicating what kind of Cura build it is.
__ENTERPRISE_VERSION_TYPE = "enterprise"
IsEnterpriseVersion = CuraBuildType.lower() == __ENTERPRISE_VERSION_TYPE
IsAlternateVersion = CuraBuildType.lower() not in [
DEFAULT_CURA_BUILD_TYPE,
__ENTERPRISE_VERSION_TYPE,
]
# NOTE: IsAlternateVersion is to make it possibile to have 'non-numbered' versions, at least as presented to the user.
# (Internally, it'll still have some sort of version-number, but the user is never meant to see it in the GUI).
# Warning: This will also change (some of) the icons/splash-screen to the 'work in progress' alternatives!
try:
from cura.CuraVersion import CuraAppDisplayName # type: ignore
if CuraAppDisplayName == "":
CuraAppDisplayName = DEFAULT_CURA_DISPLAY_NAME
if IsEnterpriseVersion:
CuraAppDisplayName = CuraAppDisplayName
except ImportError:
CuraAppDisplayName = DEFAULT_CURA_DISPLAY_NAME
DEPENDENCY_INFO = {}
try:
from pathlib import Path
conan_install_info = Path(__file__).parent.parent.joinpath(
"conan_install_info.json"
)
if conan_install_info.exists():
import json
with open(conan_install_info, "r") as f:
DEPENDENCY_INFO = json.loads(f.read())
except:
pass
|
comictaggerlib | comicvinetalker | """A python class to manage communication with Comic Vine's REST API"""
# Copyright 2012-2014 Anthony Beville
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import json
import os
import re
import sys
import time
import urllib
import urllib2
from bs4 import BeautifulSoup
try:
from PyQt4.QtCore import QByteArray, QObject, QUrl, pyqtSignal
from PyQt4.QtNetwork import QNetworkAccessManager, QNetworkRequest
except ImportError:
# No Qt, so define a few dummy QObjects to help us compile
class QObject():
def __init__(self, *args):
pass
class pyqtSignal():
def __init__(self, *args):
pass
def emit(a, b, c):
pass
import ctversion
import utils
from comicvinecacher import ComicVineCacher
from genericmetadata import GenericMetadata
from issuestring import IssueString
from settings import ComicTaggerSettings
try:
import requests
except:
try:
lib_path = os.path.join(ComicTaggerSettings.baseDir(), '..')
sys.path.append(lib_path)
import requests
except ImportError:
print "Unable to use requests module. This is a CRITICAL error and ComicTagger cannot proceed. Exiting."
class CVTypeID:
Volume = "4050"
Issue = "4000"
class ComicVineTalkerException(Exception):
Unknown = -1
Network = -2
InvalidKey = 100
RateLimit = 107
def __init__(self, code=-1, desc=""):
self.desc = desc
self.code = code
def __str__(self):
if (self.code == ComicVineTalkerException.Unknown or
self.code == ComicVineTalkerException.Network):
return self.desc
else:
return "CV error #{0}: [{1}]. \n".format(self.code, self.desc)
class ComicVineTalker(QObject):
logo_url = "http://static.comicvine.com/bundles/comicvinesite/images/logo.png"
api_key = ""
@staticmethod
def getRateLimitMessage():
if ComicVineTalker.api_key == "":
return "Comic Vine rate limit exceeded. You should configue your own Comic Vine API key."
else:
return "Comic Vine rate limit exceeded. Please wait a bit."
def __init__(self):
QObject.__init__(self)
self.api_base_url = "http://comicvine.gamespot.com/api"
self.wait_for_rate_limit = False
# key that is registered to comictagger
default_api_key = '27431e6787042105bd3e47e169a624521f89f3a4'
if ComicVineTalker.api_key == "":
self.api_key = default_api_key
else:
self.api_key = ComicVineTalker.api_key
self.cv_headers = {'User-Agent': 'ComicTagger ' + str(ctversion.version) + ' [' + ctversion.fork + ' / ' + ctversion.fork_tag + ']'}
self.log_func = None
def setLogFunc(self, log_func):
self.log_func = log_func
def writeLog(self, text):
if self.log_func is None:
# sys.stdout.write(text.encode(errors='replace'))
# sys.stdout.flush()
print >> sys.stderr, text
else:
self.log_func(text)
def parseDateStr(self, date_str):
day = None
month = None
year = None
if date_str is not None:
parts = date_str.split('-')
year = parts[0]
if len(parts) > 1:
month = parts[1]
if len(parts) > 2:
day = parts[2]
return day, month, year
def testKey(self, key):
test_url = self.api_base_url + "/issue/1/?api_key=" + \
key + "&format=json&field_list=name"
r = requests.get(test_url, headers=self.cv_headers)
cv_response = r.json()
# Bogus request, but if the key is wrong, you get error 100: "Invalid
# API Key"
return cv_response['status_code'] != 100
"""
Get the contect from the CV server. If we're in "wait mode" and status code is a rate limit error
sleep for a bit and retry.
"""
def getCVContent(self, url):
total_time_waited = 0
limit_wait_time = 1
counter = 0
wait_times = [1, 2, 3, 4]
while True:
cv_response = self.getUrlContent(url)
if self.wait_for_rate_limit and cv_response[
'status_code'] == ComicVineTalkerException.RateLimit:
self.writeLog(
"Rate limit encountered. Waiting for {0} minutes\n".format(limit_wait_time))
time.sleep(limit_wait_time * 60)
total_time_waited += limit_wait_time
limit_wait_time = wait_times[counter]
if counter < 3:
counter += 1
# don't wait much more than 20 minutes
if total_time_waited < 20:
continue
if cv_response['status_code'] != 1:
self.writeLog(
"Comic Vine query failed with error #{0}: [{1}]. \n".format(
cv_response['status_code'],
cv_response['error']))
raise ComicVineTalkerException(
cv_response['status_code'], cv_response['error'])
else:
# it's all good
break
return cv_response
def getUrlContent(self, url):
# connect to server:
# if there is a 500 error, try a few more times before giving up
# any other error, just bail
# print "ATB---", url
for tries in range(3):
try:
r = requests.get(url, headers=self.cv_headers)
return r.json()
except Exception as e:
ecode = type(e).__name__
if ecode == 500:
self.writeLog("Try #{0}: ".format(tries + 1))
time.sleep(1)
self.writeLog(str(e) + "\n")
if ecode != 500:
break
except Exception as e:
self.writeLog(str(e) + "\n")
raise ComicVineTalkerException(
ComicVineTalkerException.Network, "Network Error!")
raise ComicVineTalkerException(
ComicVineTalkerException.Unknown, "Error on Comic Vine server")
def searchForSeries(self, series_name, callback=None, refresh_cache=False):
# remove cruft from the search string
series_name = utils.removearticles(series_name).lower().strip()
# before we search online, look in our cache, since we might have
# done this same search recently
cvc = ComicVineCacher()
if not refresh_cache:
cached_search_results = cvc.get_search_results(series_name)
if len(cached_search_results) > 0:
return cached_search_results
original_series_name = series_name
# We need to make the series name into an "AND"ed query list
query_word_list = series_name.split()
and_list = ['AND'] * (len(query_word_list) - 1)
and_list.append('')
# zipper up the two lists
query_list = zip(query_word_list, and_list)
# flatten the list
query_list = [item for sublist in query_list for item in sublist]
# convert back to a string
query_string = " ".join(query_list).strip()
# print "Query string = ", query_string
query_string = urllib.quote_plus(query_string.encode("utf-8"))
search_url = self.api_base_url + "/search/?api_key=" + self.api_key + "&format=json&resources=volume&query=" + \
query_string + \
"&field_list=name,id,start_year,publisher,image,description,count_of_issues"
cv_response = self.getCVContent(search_url + "&page=1")
search_results = list()
# see http://api.comicvine.com/documentation/#handling_responses
limit = cv_response['limit']
current_result_count = cv_response['number_of_page_results']
total_result_count = cv_response['number_of_total_results']
if callback is None:
self.writeLog(
"Found {0} of {1} results\n".format(
cv_response['number_of_page_results'],
cv_response['number_of_total_results']))
search_results.extend(cv_response['results'])
page = 1
if callback is not None:
callback(current_result_count, total_result_count)
# see if we need to keep asking for more pages...
while (current_result_count < total_result_count):
if callback is None:
self.writeLog(
"getting another page of results {0} of {1}...\n".format(
current_result_count,
total_result_count))
page += 1
cv_response = self.getCVContent(search_url + "&page=" + str(page))
search_results.extend(cv_response['results'])
current_result_count += cv_response['number_of_page_results']
if callback is not None:
callback(current_result_count, total_result_count)
# for record in search_results:
#print(u"{0}: {1} ({2})".format(record['id'], record['name'] , record['start_year']))
# print(record)
#record['count_of_issues'] = record['count_of_isssues']
#print(u"{0}: {1} ({2})".format(search_results['results'][0]['id'], search_results['results'][0]['name'] , search_results['results'][0]['start_year']))
# cache these search results
cvc.add_search_results(original_series_name, search_results)
return search_results
def fetchVolumeData(self, series_id):
# before we search online, look in our cache, since we might already
# have this info
cvc = ComicVineCacher()
cached_volume_result = cvc.get_volume_info(series_id)
if cached_volume_result is not None:
return cached_volume_result
volume_url = self.api_base_url + "/volume/" + CVTypeID.Volume + "-" + \
str(series_id) + "/?api_key=" + self.api_key + \
"&field_list=name,id,start_year,publisher,count_of_issues&format=json"
cv_response = self.getCVContent(volume_url)
volume_results = cv_response['results']
cvc.add_volume_info(volume_results)
return volume_results
def fetchIssuesByVolume(self, series_id):
# before we search online, look in our cache, since we might already
# have this info
cvc = ComicVineCacher()
cached_volume_issues_result = cvc.get_volume_issues_info(series_id)
if cached_volume_issues_result is not None:
return cached_volume_issues_result
#---------------------------------
issues_url = self.api_base_url + "/issues/" + "?api_key=" + self.api_key + "&filter=volume:" + \
str(series_id) + \
"&field_list=id,volume,issue_number,name,image,cover_date,site_detail_url,description&format=json"
cv_response = self.getCVContent(issues_url)
#------------------------------------
limit = cv_response['limit']
current_result_count = cv_response['number_of_page_results']
total_result_count = cv_response['number_of_total_results']
# print "ATB total_result_count", total_result_count
#print("ATB Found {0} of {1} results".format(cv_response['number_of_page_results'], cv_response['number_of_total_results']))
volume_issues_result = cv_response['results']
page = 1
offset = 0
# see if we need to keep asking for more pages...
while (current_result_count < total_result_count):
#print("ATB getting another page of issue results {0} of {1}...".format(current_result_count, total_result_count))
page += 1
offset += cv_response['number_of_page_results']
# print issues_url+ "&offset="+str(offset)
cv_response = self.getCVContent(
issues_url + "&offset=" + str(offset))
volume_issues_result.extend(cv_response['results'])
current_result_count += cv_response['number_of_page_results']
self.repairUrls(volume_issues_result)
cvc.add_volume_issues_info(series_id, volume_issues_result)
return volume_issues_result
def fetchIssuesByVolumeIssueNumAndYear(
self, volume_id_list, issue_number, year):
volume_filter = "volume:"
for vid in volume_id_list:
volume_filter += str(vid) + "|"
year_filter = ""
if year is not None and str(year).isdigit():
year_filter = ",cover_date:{0}-1-1|{1}-1-1".format(
year, int(year) + 1)
issue_number = urllib.quote_plus(unicode(issue_number).encode("utf-8"))
filter = "&filter=" + volume_filter + \
year_filter + ",issue_number:" + issue_number
issues_url = self.api_base_url + "/issues/" + "?api_key=" + self.api_key + filter + \
"&field_list=id,volume,issue_number,name,image,cover_date,site_detail_url,description&format=json"
cv_response = self.getCVContent(issues_url)
#------------------------------------
limit = cv_response['limit']
current_result_count = cv_response['number_of_page_results']
total_result_count = cv_response['number_of_total_results']
# print "ATB total_result_count", total_result_count
#print("ATB Found {0} of {1} results\n".format(cv_response['number_of_page_results'], cv_response['number_of_total_results']))
filtered_issues_result = cv_response['results']
page = 1
offset = 0
# see if we need to keep asking for more pages...
while (current_result_count < total_result_count):
#print("ATB getting another page of issue results {0} of {1}...\n".format(current_result_count, total_result_count))
page += 1
offset += cv_response['number_of_page_results']
# print issues_url+ "&offset="+str(offset)
cv_response = self.getCVContent(
issues_url + "&offset=" + str(offset))
filtered_issues_result.extend(cv_response['results'])
current_result_count += cv_response['number_of_page_results']
self.repairUrls(filtered_issues_result)
return filtered_issues_result
def fetchIssueData(self, series_id, issue_number, settings):
volume_results = self.fetchVolumeData(series_id)
issues_list_results = self.fetchIssuesByVolume(series_id)
found = False
for record in issues_list_results:
if IssueString(issue_number).asString() is None:
issue_number = 1
if IssueString(record['issue_number']).asString().lower() == IssueString(
issue_number).asString().lower():
found = True
break
if (found):
issue_url = self.api_base_url + "/issue/" + CVTypeID.Issue + "-" + \
str(record['id']) + "/?api_key=" + \
self.api_key + "&format=json"
cv_response = self.getCVContent(issue_url)
issue_results = cv_response['results']
else:
return None
# Now, map the Comic Vine data to generic metadata
return self.mapCVDataToMetadata(
volume_results, issue_results, settings)
def fetchIssueDataByIssueID(self, issue_id, settings):
issue_url = self.api_base_url + "/issue/" + CVTypeID.Issue + "-" + \
str(issue_id) + "/?api_key=" + self.api_key + "&format=json"
cv_response = self.getCVContent(issue_url)
issue_results = cv_response['results']
volume_results = self.fetchVolumeData(issue_results['volume']['id'])
# Now, map the Comic Vine data to generic metadata
md = self.mapCVDataToMetadata(volume_results, issue_results, settings)
md.isEmpty = False
return md
def mapCVDataToMetadata(self, volume_results, issue_results, settings):
# Now, map the Comic Vine data to generic metadata
metadata = GenericMetadata()
metadata.series = issue_results['volume']['name']
num_s = IssueString(issue_results['issue_number']).asString()
metadata.issue = num_s
metadata.title = issue_results['name']
metadata.publisher = volume_results['publisher']['name']
metadata.day, metadata.month, metadata.year = self.parseDateStr(
issue_results['cover_date'])
#metadata.issueCount = volume_results['count_of_issues']
metadata.comments = self.cleanup_html(
issue_results['description'], settings.remove_html_tables)
if settings.use_series_start_as_volume:
metadata.volume = volume_results['start_year']
metadata.notes = "Tagged with the {0} fork of ComicTagger {1} using info from Comic Vine on {2}. [Issue ID {3}]".format(
ctversion.fork,
ctversion.version,
datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
issue_results['id'])
#metadata.notes += issue_results['site_detail_url']
metadata.webLink = issue_results['site_detail_url']
person_credits = issue_results['person_credits']
for person in person_credits:
if 'role' in person:
roles = person['role'].split(',')
for role in roles:
# can we determine 'primary' from CV??
metadata.addCredit(
person['name'], role.title().strip(), False)
character_credits = issue_results['character_credits']
character_list = list()
for character in character_credits:
character_list.append(character['name'])
metadata.characters = utils.listToString(character_list)
team_credits = issue_results['team_credits']
team_list = list()
for team in team_credits:
team_list.append(team['name'])
metadata.teams = utils.listToString(team_list)
location_credits = issue_results['location_credits']
location_list = list()
for location in location_credits:
location_list.append(location['name'])
metadata.locations = utils.listToString(location_list)
story_arc_credits = issue_results['story_arc_credits']
arc_list = []
for arc in story_arc_credits:
arc_list.append(arc['name'])
if len(arc_list) > 0:
metadata.storyArc = utils.listToString(arc_list)
return metadata
def cleanup_html(self, string, remove_html_tables):
"""
converter = html2text.HTML2Text()
#converter.emphasis_mark = '*'
#converter.ignore_links = True
converter.body_width = 0
print(html2text.html2text(string))
return string
#return converter.handle(string)
"""
if string is None:
return ""
# find any tables
soup = BeautifulSoup(string, "html.parser")
tables = soup.findAll('table')
# remove all newlines first
string = string.replace("\n", "")
# put in our own
string = string.replace("<br>", "\n")
string = string.replace("</p>", "\n\n")
string = string.replace("<h4>", "*")
string = string.replace("</h4>", "*\n")
# remove the tables
p = re.compile(r'<table[^<]*?>.*?<\/table>')
if remove_html_tables:
string = p.sub('', string)
string = string.replace("*List of covers and their creators:*", "")
else:
string = p.sub('{}', string)
# now strip all other tags
p = re.compile(r'<[^<]*?>')
newstring = p.sub('', string)
newstring = newstring.replace(' ', ' ')
newstring = newstring.replace('&', '&')
newstring = newstring.strip()
if not remove_html_tables:
# now rebuild the tables into text from BSoup
try:
table_strings = []
for table in tables:
rows = []
hdrs = []
col_widths = []
for hdr in table.findAll('th'):
item = hdr.string.strip()
hdrs.append(item)
col_widths.append(len(item))
rows.append(hdrs)
for row in table.findAll('tr'):
cols = []
col = row.findAll('td')
i = 0
for c in col:
item = c.string.strip()
cols.append(item)
if len(item) > col_widths[i]:
col_widths[i] = len(item)
i += 1
if len(cols) != 0:
rows.append(cols)
# now we have the data, make it into text
fmtstr = ""
for w in col_widths:
fmtstr += " {{:{}}}|".format(w + 1)
width = sum(col_widths) + len(col_widths) * 2
print "width=", width
table_text = ""
counter = 0
for row in rows:
table_text += fmtstr.format(*row) + "\n"
if counter == 0 and len(hdrs) != 0:
table_text += "-" * width + "\n"
counter += 1
table_strings.append(table_text)
newstring = newstring.format(*table_strings)
except:
# we caught an error rebuilding the table.
# just bail and remove the formatting
print("table parse error")
newstring.replace("{}", "")
return newstring
def fetchIssueDate(self, issue_id):
details = self.fetchIssueSelectDetails(issue_id)
day, month, year = self.parseDateStr(details['cover_date'])
return month, year
def fetchIssueCoverURLs(self, issue_id):
details = self.fetchIssueSelectDetails(issue_id)
return details['image_url'], details['thumb_image_url']
def fetchIssuePageURL(self, issue_id):
details = self.fetchIssueSelectDetails(issue_id)
return details['site_detail_url']
def fetchIssueSelectDetails(self, issue_id):
#cached_image_url,cached_thumb_url,cached_month,cached_year = self.fetchCachedIssueSelectDetails(issue_id)
cached_details = self.fetchCachedIssueSelectDetails(issue_id)
if cached_details['image_url'] is not None:
return cached_details
issue_url = self.api_base_url + "/issue/" + CVTypeID.Issue + "-" + \
str(issue_id) + "/?api_key=" + self.api_key + \
"&format=json&field_list=image,cover_date,site_detail_url"
details = dict()
details['image_url'] = None
details['thumb_image_url'] = None
details['cover_date'] = None
details['site_detail_url'] = None
cv_response = self.getCVContent(issue_url)
details['image_url'] = cv_response['results']['image']['super_url']
details['thumb_image_url'] = cv_response[
'results']['image']['thumb_url']
details['cover_date'] = cv_response['results']['cover_date']
details['site_detail_url'] = cv_response['results']['site_detail_url']
if details['image_url'] is not None:
self.cacheIssueSelectDetails(issue_id,
details['image_url'],
details['thumb_image_url'],
details['cover_date'],
details['site_detail_url'])
# print(details['site_detail_url'])
return details
def fetchCachedIssueSelectDetails(self, issue_id):
# before we search online, look in our cache, since we might already
# have this info
cvc = ComicVineCacher()
return cvc.get_issue_select_details(issue_id)
def cacheIssueSelectDetails(
self, issue_id, image_url, thumb_url, cover_date, page_url):
cvc = ComicVineCacher()
cvc.add_issue_select_details(
issue_id, image_url, thumb_url, cover_date, page_url)
def fetchAlternateCoverURLs(self, issue_id, issue_page_url):
url_list = self.fetchCachedAlternateCoverURLs(issue_id)
if url_list is not None:
return url_list
# scrape the CV issue page URL to get the alternate cover URLs
resp = urllib2.urlopen(issue_page_url)
content = resp.read()
alt_cover_url_list = self.parseOutAltCoverUrls(content)
# cache this alt cover URL list
self.cacheAlternateCoverURLs(issue_id, alt_cover_url_list)
return alt_cover_url_list
def parseOutAltCoverUrls(self, page_html):
soup = BeautifulSoup(page_html, "html.parser")
alt_cover_url_list = []
# Using knowledge of the layout of the Comic Vine issue page here:
# look for the divs that are in the classes 'content-pod' and
# 'alt-cover'
div_list = soup.find_all('div')
covers_found = 0
for d in div_list:
if 'class' in d:
c = d['class']
if 'imgboxart' in c and 'issue-cover' in c:
covers_found += 1
if covers_found != 1:
alt_cover_url_list.append(d.img['src'])
return alt_cover_url_list
def fetchCachedAlternateCoverURLs(self, issue_id):
# before we search online, look in our cache, since we might already
# have this info
cvc = ComicVineCacher()
url_list = cvc.get_alt_covers(issue_id)
if url_list is not None:
return url_list
else:
return None
def cacheAlternateCoverURLs(self, issue_id, url_list):
cvc = ComicVineCacher()
cvc.add_alt_covers(issue_id, url_list)
#-------------------------------------------------------------------------
urlFetchComplete = pyqtSignal(str, str, int)
def asyncFetchIssueCoverURLs(self, issue_id):
self.issue_id = issue_id
details = self.fetchCachedIssueSelectDetails(issue_id)
if details['image_url'] is not None:
self.urlFetchComplete.emit(
details['image_url'],
details['thumb_image_url'],
self.issue_id)
return
issue_url = self.api_base_url + "/issue/" + CVTypeID.Issue + "-" + \
str(issue_id) + "/?api_key=" + self.api_key + \
"&format=json&field_list=image,cover_date,site_detail_url"
self.nam = QNetworkAccessManager()
self.nam.finished.connect(self.asyncFetchIssueCoverURLComplete)
self.nam.get(QNetworkRequest(QUrl(issue_url)))
def asyncFetchIssueCoverURLComplete(self, reply):
# read in the response
data = reply.readAll()
try:
cv_response = json.loads(str(data))
except:
print >> sys.stderr, "Comic Vine query failed to get JSON data"
print >> sys.stderr, str(data)
return
if cv_response['status_code'] != 1:
print >> sys.stderr, "Comic Vine query failed with error: [{0}]. ".format(
cv_response['error'])
return
image_url = cv_response['results']['image']['super_url']
thumb_url = cv_response['results']['image']['thumb_url']
cover_date = cv_response['results']['cover_date']
page_url = cv_response['results']['site_detail_url']
self.cacheIssueSelectDetails(
self.issue_id, image_url, thumb_url, cover_date, page_url)
self.urlFetchComplete.emit(image_url, thumb_url, self.issue_id)
altUrlListFetchComplete = pyqtSignal(list, int)
def asyncFetchAlternateCoverURLs(self, issue_id, issue_page_url):
# This async version requires the issue page url to be provided!
self.issue_id = issue_id
url_list = self.fetchCachedAlternateCoverURLs(issue_id)
if url_list is not None:
self.altUrlListFetchComplete.emit(url_list, int(self.issue_id))
return
self.nam = QNetworkAccessManager()
self.nam.finished.connect(self.asyncFetchAlternateCoverURLsComplete)
self.nam.get(QNetworkRequest(QUrl(str(issue_page_url))))
def asyncFetchAlternateCoverURLsComplete(self, reply):
# read in the response
html = str(reply.readAll())
alt_cover_url_list = self.parseOutAltCoverUrls(html)
# cache this alt cover URL list
self.cacheAlternateCoverURLs(self.issue_id, alt_cover_url_list)
self.altUrlListFetchComplete.emit(
alt_cover_url_list, int(self.issue_id))
def repairUrls(self, issue_list):
# make sure there are URLs for the image fields
for issue in issue_list:
if issue['image'] is None:
issue['image'] = dict()
issue['image']['super_url'] = ComicVineTalker.logo_url
issue['image']['thumb_url'] = ComicVineTalker.logo_url
|
ui | rich_progress | from dataclasses import dataclass
from typing import TYPE_CHECKING, Optional
from httpie.context import Environment
if TYPE_CHECKING:
from rich.console import Console
@dataclass
class BaseDisplay:
env: Environment
def start(self, *, total: Optional[float], at: float, description: str) -> None:
...
def update(self, steps: float) -> None:
...
def stop(self, time_spent: float) -> None:
...
@property
def console(self) -> "Console":
"""Returns the default console to be used with displays (stderr)."""
return self.env.rich_error_console
def _print_summary(self, is_finished: bool, observed_steps: int, time_spent: float):
from rich import filesize
if is_finished:
verb = "Done"
else:
verb = "Interrupted"
total_size = filesize.decimal(observed_steps)
avg_speed = filesize.decimal(observed_steps / time_spent)
minutes, seconds = divmod(time_spent, 60)
hours, minutes = divmod(int(minutes), 60)
if hours:
total_time = f"{hours:d}:{minutes:02d}:{seconds:0.5f}"
else:
total_time = f"{minutes:02d}:{seconds:0.5f}"
self.console.print(
f"[progress.description]{verb}. {total_size} in {total_time} ({avg_speed}/s)"
)
class DummyDisplay(BaseDisplay):
"""
A dummy display object to be used when the progress bars,
spinners etc. are disabled globally (or during tests).
"""
class StatusDisplay(BaseDisplay):
def start(self, *, total: Optional[float], at: float, description: str) -> None:
self.observed = at
self.description = f"[progress.description]{description}[/progress.description]"
self.status = self.console.status(self.description, spinner="line")
self.status.start()
def update(self, steps: float) -> None:
from rich import filesize
self.observed += steps
observed_amount, observed_unit = filesize.decimal(self.observed).split()
self.status.update(
status=f"{self.description} [progress.download]{observed_amount}/? {observed_unit}[/progress.download]"
)
def stop(self, time_spent: float) -> None:
self.status.stop()
self.console.print(self.description)
if time_spent:
self._print_summary(
is_finished=True,
observed_steps=self.observed,
time_spent=time_spent,
)
class ProgressDisplay(BaseDisplay):
def start(self, *, total: Optional[float], at: float, description: str) -> None:
from rich.progress import (
BarColumn,
DownloadColumn,
Progress,
TimeRemainingColumn,
TransferSpeedColumn,
)
assert total is not None
self.console.print(f"[progress.description]{description}")
self.progress_bar = Progress(
"[",
BarColumn(),
"]",
"[progress.percentage]{task.percentage:>3.0f}%",
"(",
DownloadColumn(),
")",
TimeRemainingColumn(),
TransferSpeedColumn(),
console=self.console,
transient=True,
)
self.progress_bar.start()
self.transfer_task = self.progress_bar.add_task(
description, completed=at, total=total
)
def update(self, steps: float) -> None:
self.progress_bar.advance(self.transfer_task, steps)
def stop(self, time_spent: Optional[float]) -> None:
self.progress_bar.stop()
if time_spent:
[task] = self.progress_bar.tasks
self._print_summary(
is_finished=task.finished,
observed_steps=task.completed,
time_spent=time_spent,
)
|
protos | square_box_coder_pb2 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: object_detection/protos/square_box_coder.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1"))
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pb2
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name="object_detection/protos/square_box_coder.proto",
package="object_detection.protos",
syntax="proto2",
serialized_pb=_b(
'\n.object_detection/protos/square_box_coder.proto\x12\x17object_detection.protos"S\n\x0eSquareBoxCoder\x12\x13\n\x07y_scale\x18\x01 \x01(\x02:\x02\x31\x30\x12\x13\n\x07x_scale\x18\x02 \x01(\x02:\x02\x31\x30\x12\x17\n\x0clength_scale\x18\x03 \x01(\x02:\x01\x35'
),
)
_SQUAREBOXCODER = _descriptor.Descriptor(
name="SquareBoxCoder",
full_name="object_detection.protos.SquareBoxCoder",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="y_scale",
full_name="object_detection.protos.SquareBoxCoder.y_scale",
index=0,
number=1,
type=2,
cpp_type=6,
label=1,
has_default_value=True,
default_value=float(10),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="x_scale",
full_name="object_detection.protos.SquareBoxCoder.x_scale",
index=1,
number=2,
type=2,
cpp_type=6,
label=1,
has_default_value=True,
default_value=float(10),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="length_scale",
full_name="object_detection.protos.SquareBoxCoder.length_scale",
index=2,
number=3,
type=2,
cpp_type=6,
label=1,
has_default_value=True,
default_value=float(5),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=75,
serialized_end=158,
)
DESCRIPTOR.message_types_by_name["SquareBoxCoder"] = _SQUAREBOXCODER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
SquareBoxCoder = _reflection.GeneratedProtocolMessageType(
"SquareBoxCoder",
(_message.Message,),
dict(
DESCRIPTOR=_SQUAREBOXCODER,
__module__="object_detection.protos.square_box_coder_pb2",
# @@protoc_insertion_point(class_scope:object_detection.protos.SquareBoxCoder)
),
)
_sym_db.RegisterMessage(SquareBoxCoder)
# @@protoc_insertion_point(module_scope)
|
prompt | prompt | from django.contrib.postgres.fields import ArrayField
from django.db import models
from django.utils import timezone
class Prompt(models.Model):
step: models.IntegerField = models.IntegerField()
type: models.CharField = models.CharField(max_length=200) # tooltip, modal, etc
title: models.CharField = models.CharField(max_length=200)
text: models.CharField = models.CharField(max_length=1000)
placement: models.CharField = models.CharField(
max_length=200, default="top"
) # top, bottom, left, right, top-start, bottom-start, etc.
buttons: models.JSONField = models.JSONField()
reference: models.CharField = models.CharField(
max_length=200, default=None, null=True
) # should match a `data-attr` reference to attach to a component
icon: models.CharField = models.CharField(
max_length=200
) # sync with iconMap in frontend
class PromptSequence(models.Model):
class Meta:
constraints = [
models.UniqueConstraint(fields=["key"], name="unique_prompt_sequence"),
]
key: models.CharField = models.CharField(max_length=200)
type: models.CharField = models.CharField(
max_length=200
) # we use this to toggle different behaviors in the frontend
path_match: ArrayField = ArrayField(
models.CharField(max_length=200)
) # wildcard path to match the current URL
path_exclude: ArrayField = ArrayField(
models.CharField(max_length=200)
) # wildcard path to exclude the current URL
status: models.CharField = models.CharField(max_length=200) # active, inactive, etc
must_have_completed: models.ManyToManyField = models.ManyToManyField(
"self", blank=True, symmetrical=False
)
requires_opt_in: models.BooleanField = models.BooleanField(default=False)
prompts: models.ManyToManyField = models.ManyToManyField(Prompt)
autorun: models.BooleanField = models.BooleanField(
default=True
) # whether to run this sequence automatically for all users
class UserPromptState(models.Model):
class Meta:
constraints = [
models.UniqueConstraint(
fields=["user", "sequence"], name="unique_user_prompt_state"
)
]
user: models.ForeignKey = models.ForeignKey("User", on_delete=models.CASCADE)
sequence: models.ForeignKey = models.ForeignKey(
PromptSequence, on_delete=models.CASCADE
)
last_updated_at: models.DateTimeField = models.DateTimeField(default=timezone.now)
step: models.IntegerField = models.IntegerField(default=None, null=True)
completed: models.BooleanField = models.BooleanField(default=False)
dismissed: models.BooleanField = models.BooleanField(default=False)
|
core | display_context_test | __copyright__ = "Copyright (C) 2014-2016 Martin Blais"
__license__ = "GNU GPLv2"
import unittest
from decimal import Decimal
from beancount.core import display_context
from beancount.core.display_context import Align, Precision
def decimalize(number_list):
decimalized_list = []
for element in number_list:
if isinstance(element, str):
decimalized_list.append(Decimal(element))
else:
decimalized_list.append((Decimal(element[0]),) + element[1:])
return decimalized_list
class DisplayContextBaseTest(unittest.TestCase):
alignment = None
def assertFormatNumbers(self, number_strings, expected_fmt_numbers, **build_args):
dcontext = display_context.DisplayContext()
numbers = decimalize(number_strings)
if not build_args.pop("noinit", None):
for number in numbers:
if isinstance(number, Decimal):
dcontext.update(number)
else:
number, currency = number
dcontext.update(number, currency)
dformat = dcontext.build(alignment=self.alignment, **build_args)
fmt_numbers = []
for number in numbers:
if isinstance(number, Decimal):
fmt_numbers.append(dformat.format(number))
else:
number, currency = number
fmt_numbers.append(dformat.format(number, currency))
self.assertEqual(expected_fmt_numbers, fmt_numbers)
class TestDisplayContext(DisplayContextBaseTest):
def test_dump(self):
dcontext = display_context.DisplayContext()
dcontext.update(Decimal("1.234"))
dcontext.update(Decimal("1.23"), "USD")
dcontext.update(Decimal("7"), "HOOL")
self.assertRegex(str(dcontext), "sign=")
class TestDisplayContextNatural(DisplayContextBaseTest):
alignment = Align.NATURAL
def test_natural_uninitialized(self):
self.assertFormatNumbers(
["1.2345", "764", "-7409.01", "0.00000125"],
["1.2345", "764", "-7409.01", "0.00000125"],
noinit=True,
)
def test_natural_no_clear_mode(self):
self.assertFormatNumbers(
["1.2345", "764", "-7409.01", "0.00000125"],
["1.23450000", "764.00000000", "-7409.01000000", "0.00000125"],
)
def test_natural_clear_mode(self):
self.assertFormatNumbers(
["1.2345", "1.23", "234.26", "38.019"], ["1.23", "1.23", "234.26", "38.02"]
)
def test_natural_maximum(self):
self.assertFormatNumbers(
["1.2345", "1.23", "234.26", "38.019"],
["1.2345", "1.2300", "234.2600", "38.0190"],
precision=Precision.MAXIMUM,
)
def test_natural_commas(self):
self.assertFormatNumbers(
["0.2345", "1.23", "12234.26"], ["0.23", "1.23", "12,234.26"], commas=True
)
def test_natural_reserved(self):
self.assertFormatNumbers(
["1.2345", "1.23", "234.26", "38.019"],
["1.23", "1.23", "234.26", "38.02"],
reserved=10,
)
class TestDisplayContextRight(DisplayContextBaseTest):
alignment = Align.RIGHT
def test_right_uninitialized(self):
self.assertFormatNumbers(
["1.2345", "764", "-7409.01", "0.00000125"],
["1.2345", "764", "-7409.01", "0.00000125"],
noinit=True,
)
def test_right_sign(self):
self.assertFormatNumbers(["7409.01", "0.1"], ["7409.01", " 0.10"])
self.assertFormatNumbers(["-7409.01", "0.1"], ["-7409.01", " 0.10"])
def test_right_integer(self):
self.assertFormatNumbers(
["1", "20", "300", "4000", "50000"],
[" 1", " 20", " 300", " 4000", "50000"],
)
self.assertFormatNumbers(
["1", "20", "300", "4000", "50000", "0.001"],
[
" 1.000",
" 20.000",
" 300.000",
" 4000.000",
"50000.000",
" 0.001",
],
precision=Precision.MAXIMUM,
)
def test_right_integer_commas(self):
self.assertFormatNumbers(
["1", "20", "300", "4000", "50000"],
[" 1", " 20", " 300", " 4,000", "50,000"],
commas=True,
)
def test_right_fractional(self):
self.assertFormatNumbers(
["4000", "0.01", "0.02", "0.0002"],
["4000.00", " 0.01", " 0.02", " 0.00"],
)
def test_right_fractional_commas(self):
self.assertFormatNumbers(
["4000", "0.01", "0.02", "0.0002"],
["4,000.00", " 0.01", " 0.02", " 0.00"],
commas=True,
)
class TestDisplayContextDot(DisplayContextBaseTest):
alignment = Align.DOT
def test_dot_uninitialized(self):
self.assertFormatNumbers(
["1.2345", "764", "-7409.01", "0.00000125"],
["1.23450000", "764.00000000", "-7409.01000000", "0.00000125"],
noinit=True,
)
def test_dot_basic(self):
self.assertFormatNumbers(
["1.2345", "764", "-7409.01", "0.00", "0.00000125"],
[" 1.23", " 764.00", "-7409.01", " 0.00", " 0.00"],
)
def test_dot_basic_multi(self):
self.assertFormatNumbers(
[
("1.2345", "USD"),
("764", "CAD"),
("-7409.01", "EUR"),
("0.00", "XAU"),
("0.00000125", "RBFF"),
],
[
" 1.2345 ",
" 764 ",
"-7409.01 ",
" 0.00 ",
" 0.00000125",
],
)
def test_dot_sign(self):
self.assertFormatNumbers([("7409.01", "USD"), "0.1"], ["7409.01", " 0.1 "])
self.assertFormatNumbers([("-7409.01", "USD"), "0.1"], ["-7409.01", " 0.1 "])
def test_dot_integer(self):
self.assertFormatNumbers(
["1", "20", "300", "4000", "50000"],
[" 1", " 20", " 300", " 4000", "50000"],
)
self.assertFormatNumbers(
["1", "20", "300", "4000", "50000", "0.001", ("0.1", "USD")],
[
" 1.000",
" 20.000",
" 300.000",
" 4000.000",
"50000.000",
" 0.001",
" 0.1 ",
],
precision=Precision.MAXIMUM,
)
def test_dot_integer_commas(self):
self.assertFormatNumbers(
["1", "20", "300", "4000", "50000"],
[" 1", " 20", " 300", " 4,000", "50,000"],
commas=True,
)
def test_dot_fractional(self):
self.assertFormatNumbers(
[("4000", "USD"), "0.01", "0.02", "0.0002"],
["4000 ", " 0.01", " 0.02", " 0.00"],
)
def test_dot_fractional_commas(self):
self.assertFormatNumbers(
[("4000", "USD"), "0.01", "0.02", "0.0002"],
["4,000 ", " 0.01", " 0.02", " 0.00"],
commas=True,
)
class TestDisplayContextQuantize(unittest.TestCase):
def test_quantize_basic(self):
dcontext = display_context.DisplayContext()
dcontext.update(Decimal("1.23"), "USD")
self.assertEqual(Decimal("3.23"), dcontext.quantize(Decimal("3.23253343"), "USD"))
dcontext.update(Decimal("1.2301"), "USD")
dcontext.update(Decimal("1.2302"), "USD")
self.assertEqual(Decimal("3.2325"), dcontext.quantize(Decimal("3.23253343"), "USD"))
if __name__ == "__main__":
unittest.main()
|
heartbeat | formatted_webhook | from pathlib import PurePath
from apps.integrations.metadata.heartbeat._heartbeat_text_creator import (
HeartBeatTextCreator,
)
integration_verbal = PurePath(__file__).stem
creator = HeartBeatTextCreator(integration_verbal)
heartbeat_text = creator.get_heartbeat_texts()
heartbeat_expired_title = heartbeat_text.heartbeat_expired_title
heartbeat_expired_message = heartbeat_text.heartbeat_expired_message
heartbeat_expired_payload = {
"alert_uid": "fbdad422-b27d-454a-8553-84d1517e0005",
"title": heartbeat_expired_title,
"image_url": None,
"state": "alerting",
"link_to_upstream_details": None,
"message": heartbeat_expired_message,
"is_oncall_heartbeat": True,
"is_oncall_heartbeat_restored": False,
"is_amixr_heartbeat": True, # Keep for backwards compatibility
"is_amixr_heartbeat_restored": False, # Keep for backwards compatibility
}
heartbeat_restored_title = heartbeat_text.heartbeat_restored_title
heartbeat_restored_message = heartbeat_text.heartbeat_restored_message
heartbeat_restored_payload = {
"alert_uid": "fbdad422-b27d-454a-8553-84d1517e0005",
"title": heartbeat_restored_title,
"image_url": None,
"state": "ok",
"link_to_upstream_details": None,
"message": heartbeat_restored_message,
"is_oncall_heartbeat": True,
"is_oncall_heartbeat_restored": True,
"is_amixr_heartbeat": True, # Keep for backwards compatibility
"is_amixr_heartbeat_restored": True, # Keep for backwards compatibility
}
|
dialogs | strokedlg | # -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2016 by Ihor E. Novikov
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
from copy import deepcopy
import wal
from dashedit_dlg import dash_editor_dlg
from sk1 import _, config
from sk1.pwidgets import (
ArrowChoice,
CapChoice,
DashChoice,
JoinChoice,
SolidFill,
StaticUnitLabel,
UnitSpin,
)
from sk1.resources import icons
from uc2 import uc2const
FALLBACK_STROKE = [
0,
0.28346456692913385,
[uc2const.COLOR_CMYK, [0.0, 0.0, 0.0, 1.0], 1.0, ""],
[],
1,
0,
-2.0526525391269526,
0,
0,
[],
]
class StrokeDialog(wal.OkCancelDialog):
color_tab = None
stroke_tab = None
presenter = None
orig_stroke = None
new_stroke = None
orig_color = None
new_color = None
nb = None
def __init__(self, parent, title, presenter, stroke_style=None):
stroke_style = stroke_style or None
self.presenter = presenter
self.app = presenter.app
self.orig_stroke = stroke_style
if self.orig_stroke:
self.new_stroke = deepcopy(stroke_style)
self.orig_color = stroke_style[2]
self.new_color = deepcopy(stroke_style[2])
else:
self.new_stroke = deepcopy(FALLBACK_STROKE)
self.orig_color = []
self.new_color = []
size = config.stroke_dlg_size
wal.OkCancelDialog.__init__(
self,
parent,
title,
style=wal.VERTICAL,
resizable=True,
size=size,
add_line=False,
action_button=wal.BUTTON_APPLY,
)
self.set_minsize(config.stroke_dlg_minsize)
def build(self):
self.nb = wal.Notebook(self)
self.color_tab = StrokeColor(self.nb, self, self.orig_color)
self.stroke_tab = StrokeStyle(self.nb, self, self.new_stroke)
self.nb.add_page(self.color_tab, _("Stroke Color"))
self.nb.add_page(self.stroke_tab, _("Stroke Style"))
if self.new_color:
self.nb.set_active_index(1)
else:
self.nb.remove_page_by_index(1)
self.pack(self.nb, fill=True, expand=True)
def set_color(self, color):
if not self.new_color and color:
self.nb.add_page(self.stroke_tab, _("Stroke Style"))
self.new_color = color
if not self.new_color and self.stroke_tab:
self.nb.remove_page(self.stroke_tab)
def get_result(self):
if self.new_color:
self.new_stroke = self.stroke_tab.get_stroke()
self.new_stroke[2] = self.color_tab.get_color()
return self.new_stroke
return []
def show(self):
ret = None
if self.show_modal() == wal.BUTTON_OK:
ret = self.get_result()
w, h = self.get_size()
if wal.is_unity_16_04():
h = max(h - 28, config.stroke_dlg_minsize[1])
config.stroke_dlg_size = (w, h)
self.destroy()
return ret
def stroke_dlg(parent, presenter, stroke_style, title=_("Stroke")):
return StrokeDialog(parent, title, presenter, stroke_style).show()
class StrokeStyle(wal.VPanel):
def __init__(self, parent, dlg, new_stroke):
self.dlg = dlg
self.app = dlg.app
self.stroke = new_stroke
wal.VPanel.__init__(self, parent)
hp = wal.HPanel(self)
width_p = wal.LabeledPanel(hp, _("Width:"))
p = wal.HPanel(width_p)
self.width_spin = UnitSpin(self.app, p, self.stroke[1], step=0.1)
p.pack(self.width_spin)
p.pack((5, 5))
p.pack(StaticUnitLabel(self.app, p))
width_p.pack(p, padding_all=5)
hp.pack(width_p, fill=True)
hp.pack((5, 5))
arrow_p = wal.LabeledPanel(hp, _("Markers:"))
p = wal.HPanel(arrow_p)
end, start = [], []
if self.stroke[9]:
end, start = self.stroke[9]
self.end_arrow = ArrowChoice(p, arrow=end, end=True)
self.end_arrow.set_arrow(end)
p.pack(self.end_arrow)
p.pack((5, 5))
self.start_arrow = ArrowChoice(p, arrow=start)
self.start_arrow.set_arrow(start)
p.pack(self.start_arrow)
arrow_p.pack(p)
hp.pack(arrow_p, expand=True, fill=True)
self.pack(hp, fill=True, padding_all=10)
p = wal.HPanel(self)
p.pack(wal.Label(p, _("Dashes:")), padding=5)
self.dashes = DashChoice(p, self.stroke[3])
p.pack(self.dashes)
txt = _("Edit dash pattern")
p.pack(
wal.ImageButton(
p,
icons.PD_EDIT,
art_size=wal.SIZE_16,
tooltip=txt,
flat=False,
onclick=self.edit_dash,
),
padding=5,
)
self.pack(p)
grid = wal.GridPanel(self, rows=1, cols=3, vgap=15, hgap=15)
grid.add_growable_col(2)
caps_p = wal.LabeledPanel(grid, _("Caps:"))
self.caps = CapChoice(caps_p, self.stroke[4])
caps_p.pack(self.caps, align_center=False, padding_all=10)
grid.pack(caps_p)
join_p = wal.LabeledPanel(grid, _("Join:"))
self.join = JoinChoice(join_p, self.stroke[5])
join_p.pack(self.join, align_center=False, padding_all=10)
grid.pack(join_p)
opt_p = wal.LabeledPanel(grid, _("Options:"))
p = wal.HPanel(opt_p)
p.pack(wal.Label(p, _("Miter limit:")), padding=5)
self.miter_limit = wal.FloatSpin(
p, self.stroke[6], range_val=(0.0, 1000.0), digits=5
)
p.pack(self.miter_limit)
opt_p.pack(p, align_center=False, padding_all=10)
p = wal.VPanel(opt_p)
self.behind = wal.NumCheckbox(p, _("Behind fill"), self.stroke[7])
p.pack(self.behind, align_center=False)
self.scalable = wal.NumCheckbox(p, _("Scalable stroke"), self.stroke[8])
p.pack(self.scalable, align_center=False, padding=10)
opt_p.pack(p, align_center=False, padding_all=10)
grid.pack(opt_p, fill=True)
self.pack(grid, padding_all=10, fill=True)
self.layout()
def edit_dash(self):
ret = dash_editor_dlg(self.dlg, self.dashes.get_dash())
if ret is not None:
self.dashes.set_dash(ret)
def get_stroke(self):
self.stroke[1] = self.width_spin.get_point_value()
self.stroke[3] = self.dashes.get_dash()
self.stroke[4] = self.caps.get_cap()
self.stroke[5] = self.join.get_join()
self.stroke[6] = self.miter_limit.get_value()
self.stroke[7] = self.behind.get_value()
self.stroke[8] = self.scalable.get_value()
start = self.start_arrow.get_arrow()
end = self.end_arrow.get_arrow()
self.stroke[9] = [] if start == [] and end == [] else [end, start]
return self.stroke
class StrokeColor(wal.VPanel):
def __init__(self, parent, dlg, orig_color):
self.dlg = dlg
wal.VPanel.__init__(self, parent)
cms = dlg.presenter.cms
self.color_panel = SolidFill(self, dlg, cms)
self.pack(self.color_panel, fill=True, expand=True)
fill = []
if orig_color:
fill = [0, 0, orig_color]
self.color_panel.activate(
fill, use_rule=False, onmodechange=self.on_mode_change
)
def on_mode_change(self):
self.dlg.set_color(self.get_color())
def get_color(self):
fill = self.color_panel.get_result()
if fill:
return fill[2]
return []
|
tasks | escalate_alert_group | from common.custom_celery_tasks import shared_dedicated_queue_retry_task
from django.conf import settings
from django.db import transaction
from kombu.utils.uuid import uuid as celery_uuid
from .compare_escalations import compare_escalations
from .task_logger import task_logger
@shared_dedicated_queue_retry_task(
autoretry_for=(Exception,),
retry_backoff=True,
max_retries=0 if settings.DEBUG else None,
)
def escalate_alert_group(alert_group_pk):
"""
This task is on duty to send escalated alerts and schedule further escalation.
"""
from apps.alerts.models import AlertGroup
task_logger.debug(f"Start escalate_alert_group for alert_group {alert_group_pk}")
log_message = ""
with transaction.atomic():
try:
alert_group = AlertGroup.objects.filter(
pk=alert_group_pk
).select_for_update()[0] # Lock alert_group:
except IndexError:
return f"Alert group with pk {alert_group_pk} doesn't exist"
if not compare_escalations(
escalate_alert_group.request.id, alert_group.active_escalation_id
):
return "Active escalation ID mismatch. Duplication or non-active escalation triggered. Active: {}".format(
alert_group.active_escalation_id
)
if (
alert_group.resolved
or alert_group.acknowledged
or alert_group.is_silenced_forever
):
task_logger.info(
f"alert_group {alert_group.pk} resolved, acked or silenced forever. No need to escalate."
)
alert_group.stop_escalation()
return
if alert_group.is_silenced_for_period:
# escalation will be restarted by unsilence_task
task_logger.info(
f"alert_group {alert_group.pk} silenced for period. Escalation will be restarted by unsilence_task"
)
return
if alert_group.root_alert_group is not None:
# TODO: consistent_is_escalation_finished remove this check for is_escalation_finished
return "Alert is dependent on another. No need to activate escalation."
if alert_group.wiped_at is not None:
# TODO: consistent_is_escalation_finished remove this check for is_escalation_finished
return "Alert is wiped. No need to activate escalation."
escalation_snapshot = alert_group.escalation_snapshot
if escalation_snapshot is None:
return (
f"alert_group {alert_group_pk} has no saved escalation snapshot. "
f"Probably its channel filter was deleted or has no attached escalation chain."
)
escalation_snapshot.execute_actual_escalation_step()
alert_group.raw_escalation_snapshot = escalation_snapshot.convert_to_dict()
if escalation_snapshot.stop_escalation:
alert_group.is_escalation_finished = True
alert_group.save(
update_fields=["is_escalation_finished", "raw_escalation_snapshot"]
)
log_message += "Alert lifecycle finished. OnCall will be silent about this incident from now. "
elif escalation_snapshot.pause_escalation:
alert_group.save(update_fields=["raw_escalation_snapshot"])
log_message += "Escalation is paused. "
else:
eta = escalation_snapshot.next_step_eta
task_id = celery_uuid()
alert_group.active_escalation_id = task_id
transaction.on_commit(
lambda: escalate_alert_group.apply_async(
(alert_group.pk,), immutable=True, eta=eta, task_id=task_id
)
)
alert_group.save(
update_fields=["active_escalation_id", "raw_escalation_snapshot"]
)
log_message += "Next escalation poked, id: {} ".format(task_id)
task_logger.debug(
f"end of transaction in escalate_alert_group for alert_group {alert_group_pk}"
)
task_logger.debug(f"Finish escalate_alert_group for alert_group {alert_group_pk}")
return log_message + "Escalation executed."
|
VersionUpgrade54to55 | VersionUpgrade54to55 | # Copyright (c) 2023 UltiMaker
# Cura is released under the terms of the LGPLv3 or higher.
import configparser
import io
import re
from typing import List, Tuple
from UM.VersionUpgrade import VersionUpgrade
class VersionUpgrade54to55(VersionUpgrade):
profile_regex = re.compile(
r"um\_(?P<machine>s(3|5|7))_(?P<core_type>aa|cc|bb)(?P<nozzle_size>0\.(6|4|8))_(?P<material>pla|petg|abs|cpe|cpe_plus|nylon|pc|petcf|tough_pla|tpu)_(?P<layer_height>0\.\d{1,2}mm)"
)
@staticmethod
def _isUpgradedUltimakerDefinitionId(definition_id: str) -> bool:
if definition_id.startswith("ultimaker_s5"):
return True
if definition_id.startswith("ultimaker_s3"):
return True
if definition_id.startswith("ultimaker_s7"):
return True
return False
@staticmethod
def _isBrandedMaterialID(material_id: str) -> bool:
return material_id.startswith("ultimaker_")
@staticmethod
def upgradeStack(serialized: str, filename: str) -> Tuple[List[str], List[str]]:
"""
Upgrades stacks to have the new version number.
:param serialized: The original contents of the stack.
:param filename: The original file name of the stack.
:return: A list of new file names, and a list of the new contents for
those files.
"""
parser = configparser.ConfigParser(interpolation=None)
parser.read_string(serialized)
# Update version number.
if "general" not in parser:
parser["general"] = {}
extruder_definition_id = parser["containers"]["7"]
if parser["metadata"][
"type"
] == "extruder_train" and VersionUpgrade54to55._isUpgradedUltimakerDefinitionId(
extruder_definition_id
):
# We only need to update certain Ultimaker extruder ID's
material_id = parser["containers"]["4"]
quality_id = parser["containers"]["3"]
intent_id = parser["containers"]["2"]
if VersionUpgrade54to55._isBrandedMaterialID(material_id):
# We have an Ultimaker branded material ID, so we should change the intent & quality!
quality_id = VersionUpgrade54to55.profile_regex.sub(
r"um_\g<machine>_\g<core_type>\g<nozzle_size>_um-\g<material>_\g<layer_height>",
quality_id,
)
intent_id = VersionUpgrade54to55.profile_regex.sub(
r"um_\g<machine>_\g<core_type>\g<nozzle_size>_um-\g<material>_\g<layer_height>",
intent_id,
)
parser["containers"]["3"] = quality_id
parser["containers"]["2"] = intent_id
# We're not changing any settings, but we are changing how certain stacks are handled.
parser["general"]["version"] = "6"
result = io.StringIO()
parser.write(result)
return [filename], [result.getvalue()]
|
cli | argparser | import argparse
import errno
import os
import re
import sys
from argparse import RawDescriptionHelpFormatter
from textwrap import dedent
from urllib.parse import urlsplit
from requests.utils import get_netrc_auth
from ..context import Environment
from ..plugins.registry import plugin_manager
from ..utils import ExplicitNullAuth, get_content_type
from .argtypes import (
PARSED_DEFAULT_FORMAT_OPTIONS,
AuthCredentials,
KeyValueArgType,
SSLCredentials,
parse_auth,
parse_format_options,
)
from .constants import (
BASE_OUTPUT_OPTIONS,
HTTP_GET,
HTTP_POST,
OUT_RESP_BODY,
OUTPUT_OPTIONS,
OUTPUT_OPTIONS_DEFAULT,
OUTPUT_OPTIONS_DEFAULT_OFFLINE,
OUTPUT_OPTIONS_DEFAULT_STDOUT_REDIRECTED,
PRETTY_MAP,
PRETTY_STDOUT_TTY_ONLY,
SEPARATOR_CREDENTIALS,
SEPARATOR_GROUP_ALL_ITEMS,
SEPARATOR_GROUP_DATA_ITEMS,
URL_SCHEME_RE,
RequestType,
)
from .exceptions import ParseError
from .requestitems import RequestItems
class HTTPieHelpFormatter(RawDescriptionHelpFormatter):
"""A nicer help formatter.
Help for arguments can be indented and contain new lines.
It will be de-dented and arguments in the help
will be separated by a blank line for better readability.
"""
def __init__(self, max_help_position=6, *args, **kwargs):
# A smaller indent for args help.
kwargs["max_help_position"] = max_help_position
super().__init__(*args, **kwargs)
def _split_lines(self, text, width):
text = dedent(text).strip() + "\n\n"
return text.splitlines()
def add_usage(self, usage, actions, groups, prefix=None):
# Only display the positional arguments
displayed_actions = [action for action in actions if not action.option_strings]
_, exception, _ = sys.exc_info()
if (
isinstance(exception, argparse.ArgumentError)
and len(exception.args) >= 1
and isinstance(exception.args[0], argparse.Action)
):
# add_usage path is also taken when you pass an invalid option,
# e.g --style=invalid. If something like that happens, we want
# to include to action that caused to the invalid usage into
# the list of actions we are displaying.
displayed_actions.insert(0, exception.args[0])
super().add_usage(usage, displayed_actions, groups, prefix="usage:\n ")
# TODO: refactor and design type-annotated data structures
# for raw args + parsed args and keep things immutable.
class BaseHTTPieArgumentParser(argparse.ArgumentParser):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.env = None
self.args = None
self.has_stdin_data = False
self.has_input_data = False
# noinspection PyMethodOverriding
def parse_args(
self, env: Environment, args=None, namespace=None
) -> argparse.Namespace:
self.env = env
self.args, no_options = self.parse_known_args(args, namespace)
if self.args.debug:
self.args.traceback = True
self.has_stdin_data = (
self.env.stdin
and not getattr(self.args, "ignore_stdin", False)
and not self.env.stdin_isatty
)
self.has_input_data = (
self.has_stdin_data or getattr(self.args, "raw", None) is not None
)
return self.args
# noinspection PyShadowingBuiltins
def _print_message(self, message, file=None):
# Sneak in our stderr/stdout.
if hasattr(self, "root"):
env = self.root.env
else:
env = self.env
if env is not None:
file = {
sys.stdout: env.stdout,
sys.stderr: env.stderr,
None: env.stderr,
}.get(file, file)
if not hasattr(file, "buffer") and isinstance(message, str):
message = message.encode(env.stdout_encoding)
super()._print_message(message, file)
class HTTPieManagerArgumentParser(BaseHTTPieArgumentParser):
def parse_known_args(self, args=None, namespace=None):
try:
return super().parse_known_args(args, namespace)
except SystemExit as exc:
if not hasattr(self, "root") and exc.code == 2: # Argument Parser Error
raise argparse.ArgumentError(None, None)
raise
class HTTPieArgumentParser(BaseHTTPieArgumentParser):
"""Adds additional logic to `argparse.ArgumentParser`.
Handles all input (CLI args, file args, stdin), applies defaults,
and performs extra validation.
"""
def __init__(self, *args, formatter_class=HTTPieHelpFormatter, **kwargs):
kwargs.setdefault("add_help", False)
super().__init__(*args, formatter_class=formatter_class, **kwargs)
# noinspection PyMethodOverriding
def parse_args(
self, env: Environment, args=None, namespace=None
) -> argparse.Namespace:
self.env = env
self.env.args = namespace = namespace or argparse.Namespace()
self.args, no_options = super().parse_known_args(args, namespace)
if self.args.debug:
self.args.traceback = True
self.has_stdin_data = (
self.env.stdin and not self.args.ignore_stdin and not self.env.stdin_isatty
)
self.has_input_data = self.has_stdin_data or self.args.raw is not None
# Arguments processing and environment setup.
self._apply_no_options(no_options)
self._process_request_type()
self._process_download_options()
self._setup_standard_streams()
self._process_output_options()
self._process_pretty_options()
self._process_format_options()
self._guess_method()
self._parse_items()
self._process_url()
self._process_auth()
self._process_ssl_cert()
if self.args.raw is not None:
self._body_from_input(self.args.raw)
elif self.has_stdin_data:
self._body_from_file(self.env.stdin)
if self.args.compress:
# TODO: allow --compress with --chunked / --multipart
if self.args.chunked:
self.error("cannot combine --compress and --chunked")
if self.args.multipart:
self.error("cannot combine --compress and --multipart")
return self.args
def _process_request_type(self):
request_type = self.args.request_type
self.args.json = request_type is RequestType.JSON
self.args.multipart = request_type is RequestType.MULTIPART
self.args.form = request_type in {
RequestType.FORM,
RequestType.MULTIPART,
}
def _process_url(self):
if self.args.url.startswith("://"):
# Paste URL & add space shortcut: `http ://pie.dev` → `http://pie.dev`
self.args.url = self.args.url[3:]
if not URL_SCHEME_RE.match(self.args.url):
if os.path.basename(self.env.program_name) == "https":
scheme = "https://"
else:
scheme = self.args.default_scheme + "://"
# See if we're using curl style shorthand for localhost (:3000/foo)
shorthand = re.match(r"^:(?!:)(\d*)(/?.*)$", self.args.url)
if shorthand:
port = shorthand.group(1)
rest = shorthand.group(2)
self.args.url = scheme + "localhost"
if port:
self.args.url += ":" + port
self.args.url += rest
else:
self.args.url = scheme + self.args.url
def _setup_standard_streams(self):
"""
Modify `env.stdout` and `env.stdout_isatty` based on args, if needed.
"""
self.args.output_file_specified = bool(self.args.output_file)
if self.args.download:
# FIXME: Come up with a cleaner solution.
if not self.args.output_file and not self.env.stdout_isatty:
# Use stdout as the download output file.
self.args.output_file = self.env.stdout
# With `--download`, we write everything that would normally go to
# `stdout` to `stderr` instead. Let's replace the stream so that
# we don't have to use many `if`s throughout the codebase.
# The response body will be treated separately.
self.env.stdout = self.env.stderr
self.env.stdout_isatty = self.env.stderr_isatty
elif self.args.output_file:
# When not `--download`ing, then `--output` simply replaces
# `stdout`. The file is opened for appending, which isn't what
# we want in this case.
self.args.output_file.seek(0)
try:
self.args.output_file.truncate()
except OSError as e:
if e.errno == errno.EINVAL:
# E.g. /dev/null on Linux.
pass
else:
raise
self.env.stdout = self.args.output_file
self.env.stdout_isatty = False
if self.args.quiet:
self.env.quiet = self.args.quiet
self.env.stderr = self.env.devnull
if not (self.args.output_file_specified and not self.args.download):
self.env.stdout = self.env.devnull
self.env.apply_warnings_filter()
def _process_ssl_cert(self):
from httpie.ssl_ import _is_key_file_encrypted
if self.args.cert_key_pass is None:
self.args.cert_key_pass = SSLCredentials(None)
if (
self.args.cert_key is not None
and self.args.cert_key_pass.value is None
and _is_key_file_encrypted(self.args.cert_key)
):
self.args.cert_key_pass.prompt_password(self.args.cert_key)
def _process_auth(self):
# TODO: refactor & simplify this method.
self.args.auth_plugin = None
default_auth_plugin = plugin_manager.get_auth_plugins()[0]
auth_type_set = self.args.auth_type is not None
url = urlsplit(self.args.url)
if self.args.auth is None and not auth_type_set:
if url.username is not None:
# Handle http://username:password@hostname/
username = url.username
password = url.password or ""
self.args.auth = AuthCredentials(
key=username,
value=password,
sep=SEPARATOR_CREDENTIALS,
orig=SEPARATOR_CREDENTIALS.join([username, password]),
)
if self.args.auth is not None or auth_type_set:
if not self.args.auth_type:
self.args.auth_type = default_auth_plugin.auth_type
plugin = plugin_manager.get_auth_plugin(self.args.auth_type)()
if (
not self.args.ignore_netrc
and self.args.auth is None
and plugin.netrc_parse
):
# Only host needed, so it’s OK URL not finalized.
netrc_credentials = get_netrc_auth(self.args.url)
if netrc_credentials:
self.args.auth = AuthCredentials(
key=netrc_credentials[0],
value=netrc_credentials[1],
sep=SEPARATOR_CREDENTIALS,
orig=SEPARATOR_CREDENTIALS.join(netrc_credentials),
)
if plugin.auth_require and self.args.auth is None:
self.error("--auth required")
plugin.raw_auth = self.args.auth
self.args.auth_plugin = plugin
already_parsed = isinstance(self.args.auth, AuthCredentials)
if self.args.auth is None or not plugin.auth_parse:
self.args.auth = plugin.get_auth()
else:
if already_parsed:
# from the URL
credentials = self.args.auth
else:
credentials = parse_auth(self.args.auth)
if not credentials.has_password() and plugin.prompt_password:
if self.args.ignore_stdin:
# Non-tty stdin read by now
self.error(
"Unable to prompt for passwords because"
" --ignore-stdin is set."
)
credentials.prompt_password(url.netloc)
if credentials.key and credentials.value:
plugin.raw_auth = credentials.key + ":" + credentials.value
self.args.auth = plugin.get_auth(
username=credentials.key,
password=credentials.value,
)
if not self.args.auth and self.args.ignore_netrc:
# Set a no-op auth to force requests to ignore .netrc
# <https://github.com/psf/requests/issues/2773#issuecomment-174312831>
self.args.auth = ExplicitNullAuth()
def _apply_no_options(self, no_options):
"""For every `--no-OPTION` in `no_options`, set `args.OPTION` to
its default value. This allows for un-setting of options, e.g.,
specified in config.
"""
invalid = []
for option in no_options:
if not option.startswith("--no-"):
invalid.append(option)
continue
# --no-option => --option
inverted = "--" + option[5:]
for action in self._actions:
if inverted in action.option_strings:
setattr(self.args, action.dest, action.default)
break
else:
invalid.append(option)
if invalid:
self.error(f'unrecognized arguments: {" ".join(invalid)}')
def _body_from_file(self, fd):
"""Read the data from a file-like object.
Bytes are always read.
"""
self._ensure_one_data_source(self.args.data, self.args.files)
self.args.data = getattr(fd, "buffer", fd)
def _body_from_input(self, data):
"""Read the data from the CLI."""
self._ensure_one_data_source(
self.has_stdin_data, self.args.data, self.args.files
)
self.args.data = data.encode()
def _ensure_one_data_source(self, *other_sources):
"""There can only be one source of input request data."""
if any(other_sources):
self.error(
"Request body (from stdin, --raw or a file) and request "
"data (key=value) cannot be mixed. Pass "
"--ignore-stdin to let key/value take priority. "
"See https://httpie.io/docs#scripting for details."
)
def _guess_method(self):
"""Set `args.method` if not specified to either POST or GET
based on whether the request has data or not.
"""
if self.args.method is None:
# Invoked as `http URL'.
assert not self.args.request_items
if self.has_input_data:
self.args.method = HTTP_POST
else:
self.args.method = HTTP_GET
# FIXME: False positive, e.g., "localhost" matches but is a valid URL.
elif not re.match("^[a-zA-Z]+$", self.args.method):
# Invoked as `http URL item+'. The URL is now in `args.method`
# and the first ITEM is now incorrectly in `args.url`.
try:
# Parse the URL as an ITEM and store it as the first ITEM arg.
self.args.request_items.insert(
0,
KeyValueArgType(*SEPARATOR_GROUP_ALL_ITEMS).__call__(self.args.url),
)
except argparse.ArgumentTypeError as e:
if self.args.traceback:
raise
self.error(e.args[0])
else:
# Set the URL correctly
self.args.url = self.args.method
# Infer the method
has_data = self.has_input_data or any(
item.sep in SEPARATOR_GROUP_DATA_ITEMS
for item in self.args.request_items
)
self.args.method = HTTP_POST if has_data else HTTP_GET
def _parse_items(self):
"""
Parse `args.request_items` into `args.headers`, `args.data`,
`args.params`, and `args.files`.
"""
try:
request_items = RequestItems.from_args(
request_item_args=self.args.request_items,
request_type=self.args.request_type,
)
except ParseError as e:
if self.args.traceback:
raise
self.error(e.args[0])
else:
self.args.headers = request_items.headers
self.args.data = request_items.data
self.args.files = request_items.files
self.args.params = request_items.params
self.args.multipart_data = request_items.multipart_data
if self.args.files and not self.args.form:
# `http url @/path/to/file`
request_file = None
for key, file in self.args.files.items():
if key != "":
self.error(
"Invalid file fields (perhaps you meant --form?):"
f' {",".join(self.args.files.keys())}'
)
if request_file is not None:
self.error("Can't read request from multiple files")
request_file = file
fn, fd, ct = request_file
self.args.files = {}
self._body_from_file(fd)
if "Content-Type" not in self.args.headers:
content_type = get_content_type(fn)
if content_type:
self.args.headers["Content-Type"] = content_type
def _process_output_options(self):
"""Apply defaults to output options, or validate the provided ones.
The default output options are stdout-type-sensitive.
"""
def check_options(value, option):
unknown = set(value) - OUTPUT_OPTIONS
if unknown:
self.error(f'Unknown output options: {option}={",".join(unknown)}')
if self.args.verbose:
self.args.all = True
if self.args.output_options is None:
if self.args.verbose >= 2:
self.args.output_options = "".join(OUTPUT_OPTIONS)
elif self.args.verbose == 1:
self.args.output_options = "".join(BASE_OUTPUT_OPTIONS)
elif self.args.offline:
self.args.output_options = OUTPUT_OPTIONS_DEFAULT_OFFLINE
elif not self.env.stdout_isatty:
self.args.output_options = OUTPUT_OPTIONS_DEFAULT_STDOUT_REDIRECTED
else:
self.args.output_options = OUTPUT_OPTIONS_DEFAULT
if self.args.output_options_history is None:
self.args.output_options_history = self.args.output_options
check_options(self.args.output_options, "--print")
check_options(self.args.output_options_history, "--history-print")
if self.args.download and OUT_RESP_BODY in self.args.output_options:
# Response body is always downloaded with --download and it goes
# through a different routine, so we remove it.
self.args.output_options = str(
set(self.args.output_options) - set(OUT_RESP_BODY)
)
def _process_pretty_options(self):
if self.args.prettify == PRETTY_STDOUT_TTY_ONLY:
self.args.prettify = PRETTY_MAP["all" if self.env.stdout_isatty else "none"]
elif self.args.prettify and self.env.is_windows and self.args.output_file:
self.error("Only terminal output can be colorized on Windows.")
else:
# noinspection PyTypeChecker
self.args.prettify = PRETTY_MAP[self.args.prettify]
def _process_download_options(self):
if self.args.offline:
self.args.download = False
self.args.download_resume = False
return
if not self.args.download:
if self.args.download_resume:
self.error("--continue only works with --download")
if self.args.download_resume and not (
self.args.download and self.args.output_file
):
self.error("--continue requires --output to be specified")
def _process_format_options(self):
format_options = self.args.format_options or []
parsed_options = PARSED_DEFAULT_FORMAT_OPTIONS
for options_group in format_options:
parsed_options = parse_format_options(
options_group, defaults=parsed_options
)
self.args.format_options = parsed_options
def print_manual(self):
from httpie.output.ui import man_pages
if man_pages.is_available(self.env.program_name):
man_pages.display_for(self.env, self.env.program_name)
return None
text = self.format_help()
with self.env.rich_console.pager():
self.env.rich_console.print(text, highlight=False)
def print_usage(self, file):
from httpie.output.ui import rich_help
from rich.text import Text
whitelist = set()
_, exception, _ = sys.exc_info()
if (
isinstance(exception, argparse.ArgumentError)
and len(exception.args) >= 1
and isinstance(exception.args[0], argparse.Action)
and exception.args[0].option_strings
):
# add_usage path is also taken when you pass an invalid option,
# e.g --style=invalid. If something like that happens, we want
# to include to action that caused to the invalid usage into
# the list of actions we are displaying.
whitelist.add(exception.args[0].option_strings[0])
usage_text = Text("usage", style="bold")
usage_text.append(":\n ")
usage_text.append(rich_help.to_usage(self.spec, whitelist=whitelist))
self.env.rich_error_console.print(usage_text)
def error(self, message):
"""Prints a usage message incorporating the message to stderr and
exits."""
self.print_usage(sys.stderr)
self.env.rich_error_console.print(
dedent(
f"""
[bold]error[/bold]:
{message}
[bold]for more information[/bold]:
run '{self.prog} --help' or visit https://httpie.io/docs/cli
""".rstrip()
)
)
self.exit(2)
|
extractor | packtpub | from __future__ import unicode_literals
import json
import re
from ..compat import compat_HTTPError # compat_str,
from ..utils import (
ExtractorError,
clean_html, # remove_end,; urljoin,
str_or_none,
strip_or_none,
unified_timestamp,
)
from .common import InfoExtractor
class PacktPubBaseIE(InfoExtractor):
# _PACKT_BASE = 'https://www.packtpub.com'
_STATIC_PRODUCTS_BASE = "https://static.packt-cdn.com/products/"
class PacktPubIE(PacktPubBaseIE):
_VALID_URL = r"https?://(?:(?:www\.)?packtpub\.com/mapt|subscription\.packtpub\.com)/video/[^/]+/(?P<course_id>\d+)/(?P<chapter_id>[^/]+)/(?P<id>[^/]+)(?:/(?P<display_id>[^/?&#]+))?"
_TESTS = [
{
"url": "https://www.packtpub.com/mapt/video/web-development/9781787122215/20528/20530/Project+Intro",
"md5": "1e74bd6cfd45d7d07666f4684ef58f70",
"info_dict": {
"id": "20530",
"ext": "mp4",
"title": "Project Intro",
"thumbnail": r"re:(?i)^https?://.*\.jpg",
"timestamp": 1490918400,
"upload_date": "20170331",
},
},
{
"url": "https://subscription.packtpub.com/video/web_development/9781787122215/20528/20530/project-intro",
"only_matching": True,
},
{
"url": "https://subscription.packtpub.com/video/programming/9781838988906/p1/video1_1/business-card-project",
"only_matching": True,
},
]
_NETRC_MACHINE = "packtpub"
_TOKEN = None
def _real_initialize(self):
username, password = self._get_login_info()
if username is None:
return
try:
self._TOKEN = self._download_json(
"https://services.packtpub.com/auth-v1/users/tokens",
None,
"Downloading Authorization Token",
data=json.dumps(
{
"username": username,
"password": password,
}
).encode(),
)["data"]["access"]
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code in (
400,
401,
404,
):
message = self._parse_json(e.cause.read().decode(), None)["message"]
raise ExtractorError(message, expected=True)
raise
def _real_extract(self, url):
course_id, chapter_id, video_id, display_id = re.match(
self._VALID_URL, url
).groups()
headers = {}
if self._TOKEN:
headers["Authorization"] = "Bearer " + self._TOKEN
try:
video_url = self._download_json(
"https://services.packtpub.com/products-v1/products/%s/%s/%s"
% (course_id, chapter_id, video_id),
video_id,
"Downloading JSON video",
headers=headers,
)["data"]
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 400:
self.raise_login_required("This video is locked")
raise
# TODO: find a better way to avoid duplicating course requests
# metadata = self._download_json(
# '%s/products/%s/chapters/%s/sections/%s/metadata'
# % (self._MAPT_REST, course_id, chapter_id, video_id),
# video_id)['data']
# title = metadata['pageTitle']
# course_title = metadata.get('title')
# if course_title:
# title = remove_end(title, ' - %s' % course_title)
# timestamp = unified_timestamp(metadata.get('publicationDate'))
# thumbnail = urljoin(self._PACKT_BASE, metadata.get('filepath'))
return {
"id": video_id,
"url": video_url,
"title": display_id or video_id, # title,
# 'thumbnail': thumbnail,
# 'timestamp': timestamp,
}
class PacktPubCourseIE(PacktPubBaseIE):
_VALID_URL = r"(?P<url>https?://(?:(?:www\.)?packtpub\.com/mapt|subscription\.packtpub\.com)/video/[^/]+/(?P<id>\d+))"
_TESTS = [
{
"url": "https://www.packtpub.com/mapt/video/web-development/9781787122215",
"info_dict": {
"id": "9781787122215",
"title": "Learn Nodejs by building 12 projects [Video]",
"description": "md5:489da8d953f416e51927b60a1c7db0aa",
},
"playlist_count": 90,
},
{
"url": "https://subscription.packtpub.com/video/web_development/9781787122215",
"only_matching": True,
},
]
@classmethod
def suitable(cls, url):
return (
False
if PacktPubIE.suitable(url)
else super(PacktPubCourseIE, cls).suitable(url)
)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
url, course_id = mobj.group("url", "id")
course = self._download_json(
self._STATIC_PRODUCTS_BASE + "%s/toc" % course_id, course_id
)
metadata = (
self._download_json(
self._STATIC_PRODUCTS_BASE + "%s/summary" % course_id,
course_id,
fatal=False,
)
or {}
)
entries = []
for chapter_num, chapter in enumerate(course["chapters"], 1):
chapter_id = str_or_none(chapter.get("id"))
sections = chapter.get("sections")
if not chapter_id or not isinstance(sections, list):
continue
chapter_info = {
"chapter": chapter.get("title"),
"chapter_number": chapter_num,
"chapter_id": chapter_id,
}
for section in sections:
section_id = str_or_none(section.get("id"))
if not section_id or section.get("contentType") != "video":
continue
entry = {
"_type": "url_transparent",
"url": "/".join([url, chapter_id, section_id]),
"title": strip_or_none(section.get("title")),
"description": clean_html(section.get("summary")),
"thumbnail": metadata.get("coverImage"),
"timestamp": unified_timestamp(metadata.get("publicationDate")),
"ie_key": PacktPubIE.ie_key(),
}
entry.update(chapter_info)
entries.append(entry)
return self.playlist_result(
entries, course_id, metadata.get("title"), clean_html(metadata.get("about"))
)
|
pyelliptic | arithmetic | """
Arithmetic Expressions
"""
import hashlib
import re
P = 2**256 - 2**32 - 2**9 - 2**8 - 2**7 - 2**6 - 2**4 - 1
A = 0
Gx = 55066263022277343669578718895168534326250603453777594175500187360389116729240
Gy = 32670510020758816978083085130507043184471273380659243275938904335757337482424
G = (Gx, Gy)
def inv(a, n):
"""Inversion"""
lm, hm = 1, 0
low, high = a % n, n
while low > 1:
r = high // low
nm, new = hm - lm * r, high - low * r
lm, low, hm, high = nm, new, lm, low
return lm % n
def get_code_string(base):
"""Returns string according to base value"""
if base == 2:
return b"01"
if base == 10:
return b"0123456789"
if base == 16:
return b"0123456789abcdef"
if base == 58:
return b"123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"
if base == 256:
try:
return b"".join([chr(x) for x in range(256)])
except TypeError:
return bytes([x for x in range(256)])
raise ValueError("Invalid base!")
def encode(val, base, minlen=0):
"""Returns the encoded string"""
code_string = get_code_string(base)
result = b""
while val > 0:
val, i = divmod(val, base)
result = code_string[i : i + 1] + result
if len(result) < minlen:
result = code_string[0:1] * (minlen - len(result)) + result
return result
def decode(string, base):
"""Returns the decoded string"""
code_string = get_code_string(base)
result = 0
if base == 16:
string = string.lower()
while string:
result *= base
result += code_string.find(string[0])
string = string[1:]
return result
def changebase(string, frm, to, minlen=0):
"""Change base of the string"""
return encode(decode(string, frm), to, minlen)
def base10_add(a, b):
"""Adding the numbers that are of base10"""
# pylint: disable=too-many-function-args
if a is None:
return b[0], b[1]
if b is None:
return a[0], a[1]
if a[0] == b[0]:
if a[1] == b[1]:
return base10_double(a[0], a[1])
return None
m = ((b[1] - a[1]) * inv(b[0] - a[0], P)) % P
x = (m * m - a[0] - b[0]) % P
y = (m * (a[0] - x) - a[1]) % P
return (x, y)
def base10_double(a):
"""Double the numbers that are of base10"""
if a is None:
return None
m = ((3 * a[0] * a[0] + A) * inv(2 * a[1], P)) % P
x = (m * m - 2 * a[0]) % P
y = (m * (a[0] - x) - a[1]) % P
return (x, y)
def base10_multiply(a, n):
"""Multiply the numbers that are of base10"""
if n == 0:
return G
if n == 1:
return a
n, m = divmod(n, 2)
if m == 0:
return base10_double(base10_multiply(a, n))
if m == 1:
return base10_add(base10_double(base10_multiply(a, n)), a)
return None
def hex_to_point(h):
"""Converting hexadecimal to point value"""
return (decode(h[2:66], 16), decode(h[66:], 16))
def point_to_hex(p):
"""Converting point value to hexadecimal"""
return b"04" + encode(p[0], 16, 64) + encode(p[1], 16, 64)
def multiply(privkey, pubkey):
"""Multiplying keys"""
return point_to_hex(base10_multiply(hex_to_point(pubkey), decode(privkey, 16)))
def privtopub(privkey):
"""Converting key from private to public"""
return point_to_hex(base10_multiply(G, decode(privkey, 16)))
def add(p1, p2):
"""Adding two public keys"""
if len(p1) == 32:
return encode(decode(p1, 16) + decode(p2, 16) % P, 16, 32)
return point_to_hex(base10_add(hex_to_point(p1), hex_to_point(p2)))
def hash_160(string):
"""Hashed version of public key"""
intermed = hashlib.sha256(string).digest()
ripemd160 = hashlib.new("ripemd160")
ripemd160.update(intermed)
return ripemd160.digest()
def dbl_sha256(string):
"""Double hashing (SHA256)"""
return hashlib.sha256(hashlib.sha256(string).digest()).digest()
def bin_to_b58check(inp):
"""Convert binary to base58"""
inp_fmtd = "\x00" + inp
leadingzbytes = len(re.match("^\x00*", inp_fmtd).group(0))
checksum = dbl_sha256(inp_fmtd)[:4]
return "1" * leadingzbytes + changebase(inp_fmtd + checksum, 256, 58)
def pubkey_to_address(pubkey):
"""Convert a public key (in hex) to a Bitcoin address"""
return bin_to_b58check(hash_160(changebase(pubkey, 16, 256)))
|
migrator | oncall_api_client | from contextlib import suppress
from time import sleep
from urllib.parse import urljoin
import requests
from migrator.config import ONCALL_API_TOKEN, ONCALL_API_URL
from requests import HTTPError
from requests.adapters import HTTPAdapter, Retry
def api_call(method: str, path: str, **kwargs) -> requests.Response:
url = urljoin(ONCALL_API_URL, path)
# Retry on network errors
session = requests.Session()
retries = Retry(total=5, backoff_factor=0.1)
session.mount("http://", HTTPAdapter(max_retries=retries))
session.mount("https://", HTTPAdapter(max_retries=retries))
response = session.request(
method, url, headers={"Authorization": ONCALL_API_TOKEN}, **kwargs
)
try:
response.raise_for_status()
except HTTPError as e:
if e.response.status_code == 429:
cooldown_seconds = int(e.response.headers["Retry-After"])
sleep(cooldown_seconds)
return api_call(method, path, **kwargs)
elif e.response.status_code == 400:
resp_json = None
with suppress(requests.exceptions.JSONDecodeError):
resp_json = response.json()
# if no JSON payload is available, just raise the original exception
if not resp_json:
raise
# this is mostly taken from requests.models.Response.raise_for_status, but with additional JSON payload
http_error_msg = (
"%s Client Error: %s for url: %s, response payload JSON: %s"
% (response.status_code, e.response.reason, response.url, resp_json)
)
raise requests.exceptions.HTTPError(
http_error_msg, response=e.response
) from e
else:
raise
return response
def list_all(path: str) -> list[dict]:
response = api_call("get", path)
data = response.json()
results = data["results"]
while data["next"]:
response = api_call("get", data["next"])
data = response.json()
results += data["results"]
return results
def create(path: str, payload: dict) -> dict:
response = api_call("post", path, json=payload)
return response.json()
def delete(path: str) -> None:
try:
api_call("delete", path)
except requests.exceptions.HTTPError as e:
# ignore 404s on delete so deleting resources manually while running the script doesn't break it
if e.response.status_code != 404:
raise
def update(path: str, payload: dict) -> dict:
response = api_call("put", path, json=payload)
return response.json()
|
httpie | downloads | """
Download mode implementation.
"""
import mimetypes
import os
import re
from mailbox import Message
from time import monotonic
from typing import IO, Optional, Tuple
from urllib.parse import urlsplit
import requests
from .context import Environment
from .models import HTTPResponse, OutputOptions
from .output.streams import RawStream
PARTIAL_CONTENT = 206
class ContentRangeError(ValueError):
pass
def parse_content_range(content_range: str, resumed_from: int) -> int:
"""
Parse and validate Content-Range header.
<https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html>
:param content_range: the value of a Content-Range response header
eg. "bytes 21010-47021/47022"
:param resumed_from: first byte pos. from the Range request header
:return: total size of the response body when fully downloaded.
"""
if content_range is None:
raise ContentRangeError("Missing Content-Range")
pattern = (
r"^bytes (?P<first_byte_pos>\d+)-(?P<last_byte_pos>\d+)"
r"/(\*|(?P<instance_length>\d+))$"
)
match = re.match(pattern, content_range)
if not match:
raise ContentRangeError(f"Invalid Content-Range format {content_range!r}")
content_range_dict = match.groupdict()
first_byte_pos = int(content_range_dict["first_byte_pos"])
last_byte_pos = int(content_range_dict["last_byte_pos"])
instance_length = (
int(content_range_dict["instance_length"])
if content_range_dict["instance_length"]
else None
)
# "A byte-content-range-spec with a byte-range-resp-spec whose
# last- byte-pos value is less than its first-byte-pos value,
# or whose instance-length value is less than or equal to its
# last-byte-pos value, is invalid. The recipient of an invalid
# byte-content-range- spec MUST ignore it and any content
# transferred along with it."
if first_byte_pos > last_byte_pos or (
instance_length is not None and instance_length <= last_byte_pos
):
raise ContentRangeError(f"Invalid Content-Range returned: {content_range!r}")
if first_byte_pos != resumed_from or (
instance_length is not None and last_byte_pos + 1 != instance_length
):
# Not what we asked for.
raise ContentRangeError(
f"Unexpected Content-Range returned ({content_range!r})"
f' for the requested Range ("bytes={resumed_from}-")'
)
return last_byte_pos + 1
def filename_from_content_disposition(content_disposition: str) -> Optional[str]:
"""
Extract and validate filename from a Content-Disposition header.
:param content_disposition: Content-Disposition value
:return: the filename if present and valid, otherwise `None`
"""
# attachment; filename=jakubroztocil-httpie-0.4.1-20-g40bd8f6.tar.gz
msg = Message(f"Content-Disposition: {content_disposition}")
filename = msg.get_filename()
if filename:
# Basic sanitation.
filename = os.path.basename(filename).lstrip(".").strip()
if filename:
return filename
def filename_from_url(url: str, content_type: Optional[str]) -> str:
fn = urlsplit(url).path.rstrip("/")
fn = os.path.basename(fn) if fn else "index"
if "." not in fn and content_type:
content_type = content_type.split(";")[0]
if content_type == "text/plain":
# mimetypes returns '.ksh'
ext = ".txt"
else:
ext = mimetypes.guess_extension(content_type)
if ext == ".htm":
ext = ".html"
if ext:
fn += ext
return fn
def trim_filename(filename: str, max_len: int) -> str:
if len(filename) > max_len:
trim_by = len(filename) - max_len
name, ext = os.path.splitext(filename)
if trim_by >= len(name):
filename = filename[:-trim_by]
else:
filename = name[:-trim_by] + ext
return filename
def get_filename_max_length(directory: str) -> int:
max_len = 255
if hasattr(os, "pathconf") and "PC_NAME_MAX" in os.pathconf_names:
max_len = os.pathconf(directory, "PC_NAME_MAX")
return max_len
def trim_filename_if_needed(filename: str, directory=".", extra=0) -> str:
max_len = get_filename_max_length(directory) - extra
if len(filename) > max_len:
filename = trim_filename(filename, max_len)
return filename
def get_unique_filename(filename: str, exists=os.path.exists) -> str:
attempt = 0
while True:
suffix = f"-{attempt}" if attempt > 0 else ""
try_filename = trim_filename_if_needed(filename, extra=len(suffix))
try_filename += suffix
if not exists(try_filename):
return try_filename
attempt += 1
class Downloader:
def __init__(self, env: Environment, output_file: IO = None, resume: bool = False):
"""
:param resume: Should the download resume if partial download
already exists.
:param output_file: The file to store response body in. If not
provided, it will be guessed from the response.
:param progress_file: Where to report download progress.
"""
self.finished = False
self.status = DownloadStatus(env=env)
self._output_file = output_file
self._resume = resume
self._resumed_from = 0
def pre_request(self, request_headers: dict):
"""Called just before the HTTP request is sent.
Might alter `request_headers`.
"""
# Ask the server not to encode the content so that we can resume, etc.
request_headers["Accept-Encoding"] = "identity"
if self._resume:
bytes_have = os.path.getsize(self._output_file.name)
if bytes_have:
# Set ``Range`` header to resume the download
# TODO: Use "If-Range: mtime" to make sure it's fresh?
request_headers["Range"] = f"bytes={bytes_have}-"
self._resumed_from = bytes_have
def start(
self, initial_url: str, final_response: requests.Response
) -> Tuple[RawStream, IO]:
"""
Initiate and return a stream for `response` body with progress
callback attached. Can be called only once.
:param initial_url: The original requested URL
:param final_response: Initiated response object with headers already fetched
:return: RawStream, output_file
"""
assert not self.status.time_started
# FIXME: some servers still might sent Content-Encoding: gzip
# <https://github.com/httpie/cli/issues/423>
try:
total_size = int(final_response.headers["Content-Length"])
except (KeyError, ValueError, TypeError):
total_size = None
if not self._output_file:
self._output_file = self._get_output_file_from_response(
initial_url=initial_url,
final_response=final_response,
)
else:
# `--output, -o` provided
if self._resume and final_response.status_code == PARTIAL_CONTENT:
total_size = parse_content_range(
final_response.headers.get("Content-Range"), self._resumed_from
)
else:
self._resumed_from = 0
try:
self._output_file.seek(0)
self._output_file.truncate()
except OSError:
pass # stdout
output_options = OutputOptions.from_message(
final_response, headers=False, body=True
)
stream = RawStream(
msg=HTTPResponse(final_response),
output_options=output_options,
on_body_chunk_downloaded=self.chunk_downloaded,
)
self.status.started(
output_file=self._output_file,
resumed_from=self._resumed_from,
total_size=total_size,
)
return stream, self._output_file
def finish(self):
assert not self.finished
self.finished = True
self.status.finished()
def failed(self):
self.status.terminate()
@property
def interrupted(self) -> bool:
return (
self.finished
and self.status.total_size
and self.status.total_size != self.status.downloaded
)
def chunk_downloaded(self, chunk: bytes):
"""
A download progress callback.
:param chunk: A chunk of response body data that has just
been downloaded and written to the output.
"""
self.status.chunk_downloaded(len(chunk))
@staticmethod
def _get_output_file_from_response(
initial_url: str,
final_response: requests.Response,
) -> IO:
# Output file not specified. Pick a name that doesn't exist yet.
filename = None
if "Content-Disposition" in final_response.headers:
filename = filename_from_content_disposition(
final_response.headers["Content-Disposition"]
)
if not filename:
filename = filename_from_url(
url=initial_url,
content_type=final_response.headers.get("Content-Type"),
)
unique_filename = get_unique_filename(filename)
return open(unique_filename, buffering=0, mode="a+b")
class DownloadStatus:
"""Holds details about the download status."""
def __init__(self, env):
self.env = env
self.downloaded = 0
self.total_size = None
self.resumed_from = 0
self.time_started = None
self.time_finished = None
def started(self, output_file, resumed_from=0, total_size=None):
assert self.time_started is None
self.total_size = total_size
self.downloaded = self.resumed_from = resumed_from
self.time_started = monotonic()
self.start_display(output_file=output_file)
def start_display(self, output_file):
from httpie.output.ui.rich_progress import (
DummyDisplay,
ProgressDisplay,
StatusDisplay,
)
message = f"Downloading to {output_file.name}"
if self.env.show_displays:
if self.total_size is None:
# Rich does not support progress bars without a total
# size given. Instead we use status objects.
self.display = StatusDisplay(self.env)
else:
self.display = ProgressDisplay(self.env)
else:
self.display = DummyDisplay(self.env)
self.display.start(
total=self.total_size, at=self.downloaded, description=message
)
def chunk_downloaded(self, size):
assert self.time_finished is None
self.downloaded += size
self.display.update(size)
@property
def has_finished(self):
return self.time_finished is not None
@property
def time_spent(self):
if self.time_started is not None and self.time_finished is not None:
return self.time_finished - self.time_started
else:
return None
def finished(self):
assert self.time_started is not None
assert self.time_finished is None
self.time_finished = monotonic()
if hasattr(self, "display"):
self.display.stop(self.time_spent)
def terminate(self):
if hasattr(self, "display"):
self.display.stop(self.time_spent)
|
interaction-payloads | interactive_messages | """
[Documentation](https://api.slack.com/legacy/interactive-messages#receiving-action-invocations)
"""
import enum
import typing
from apps.slack.types.common import BaseEvent, Channel, PayloadType
class InteractiveMessageActionType(enum.StrEnum):
SELECT = "select"
BUTTON = "button"
class InteractiveMessageAction(typing.TypedDict):
"""
[Documentation](https://api.slack.com/legacy/interactive-messages#checking-action-type)
"""
name: str
type: InteractiveMessageActionType
class OriginalMessage(typing.TypedDict):
"""
[Documentation](https://api.slack.com/legacy/interactive-messages#checking-action-type)
"""
text: str
username: str
bot_id: str
attachments: typing.List
type: typing.Literal["message"]
subtype: str
ts: str
class InteractiveMessagesPayload(BaseEvent):
"""
[Documentation](https://api.slack.com/legacy/interactive-messages#receiving-action-invocations)
"""
type: typing.Literal[PayloadType.INTERACTIVE_MESSAGE]
"""
Helps identify which type of interactive component sent the payload.
An interactive element in a block will have a type of `block_actions`, whereas an interactive element in a
[message attachment](https://api.slack.com/reference/messaging/attachments) will have a type of
`interactive_message`.
"""
trigger_id: str
"""
A short-lived ID that can be [used to open modals](https://api.slack.com/interactivity/handling#modal_responses).
Triggers expire in three seconds. Use them before you lose them. You'll receive a `trigger_expired` error when
using a method with an expired `trigger_id`.
Triggers may only be used once. You may perform just one operation with a `trigger_id`. Subsequent attempts are presented with a `trigger_exchanged` error.
For more info see [here](https://api.slack.com/interactivity/handling#modal_responses).
"""
actions: typing.List[InteractiveMessageAction]
token: str
"""
Represents a deprecated verification token feature.
You should validate the request payload, however, and the best way to do so is to
[use the signing secret provided to your app](https://api.slack.com/reference/interaction-payloads/block-actions#:~:text=use%20the%20signing%20secret%20provided%20to%20your%20app).
""" # noqa: E501
channel: Channel
original_message: OriginalMessage
|
command | drive | # -*- Mode: Python -*-
# vi:si:et:sw=4:sts=4:ts=4
# Copyright (C) 2009 Thomas Vander Stichele
# This file is part of whipper.
#
# whipper is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# whipper is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with whipper. If not, see <http://www.gnu.org/licenses/>.
import logging
from whipper.command.basecommand import BaseCommand
from whipper.common import config, drive
from whipper.extern.task import task
from whipper.program import cdparanoia
logger = logging.getLogger(__name__)
class Analyze(BaseCommand):
summary = "analyze caching behaviour of drive"
description = (
"""Determine whether cdparanoia can defeat the audio cache of the drive.""" # noqa: E501
)
device_option = True
def do(self):
runner = task.SyncRunner()
t = cdparanoia.AnalyzeTask(self.options.device)
runner.run(t)
if t.defeatsCache is None:
logger.critical("cannot analyze the drive: is there a CD in it?")
return
if not t.defeatsCache:
logger.info("cdparanoia cannot defeat the audio cache " "on this drive")
else:
logger.info("cdparanoia can defeat the audio cache on this drive")
info = drive.getDeviceInfo(self.options.device)
if not info:
logger.error(
"drive caching behaviour not saved: " "could not get device info"
)
return
logger.info("adding drive cache behaviour to configuration file")
config.Config().setDefeatsCache(info[0], info[1], info[2], t.defeatsCache)
class List(BaseCommand):
summary = "list drives"
description = """list available CD-DA drives"""
def do(self):
paths = drive.getAllDevicePaths()
self.config = config.Config()
if not paths:
logger.critical(
"no drives found. Create /dev/cdrom "
"if you have a CD drive, or install "
"pycdio for better detection"
)
return
try:
import cdio as _ # noqa: F401 (TODO: fix it in a separate PR?)
except ImportError:
logger.error("install pycdio for vendor/model/release detection")
return
for path in paths:
vendor, model, release = drive.getDeviceInfo(path)
print(
"drive: %s, vendor: %s, model: %s, release: %s"
% (path, vendor, model, release)
)
try:
offset = self.config.getReadOffset(vendor, model, release)
print(" Configured read offset: %d" % offset)
except KeyError:
# Note spaces at the beginning for pretty terminal output
logger.warning("no read offset found. " "Run 'whipper offset find'")
try:
defeats = self.config.getDefeatsCache(vendor, model, release)
print(" Can defeat audio cache: %s" % defeats)
except KeyError:
logger.warning(
"unknown whether audio cache can be "
"defeated. Run 'whipper drive analyze'"
)
class Drive(BaseCommand):
summary = "handle drives"
description = """Drive utilities."""
subcommands = {"analyze": Analyze, "list": List}
|
qltk | appwindow | # Copyright 2016 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
class AppWindow:
"""The shared interface provided by both QL and EF"""
def open_file(self, filename):
"""Open the specified file and play it.
The file can be missing or a directory..
Args:
filename (fsnative)
Returns:
bool: If opening worked
"""
return False
def get_is_persistent(self):
"""If closing this window should shut down the application
Returns:
bool
"""
return True
def set_as_osx_window(self, osx_app):
"""Set up the passed in osx app instance
FIXME: split this into getters..
Args:
osx_app (GtkosxApplication.Application)
"""
pass
|
api | event | import json
import urllib
from datetime import datetime
from typing import Any, Dict, List, Optional, Union
from django.db.models.query import Prefetch
from drf_spectacular.types import OpenApiTypes
from drf_spectacular.utils import OpenApiParameter
from posthog.api.documentation import PropertiesSerializer, extend_schema
from posthog.api.routing import StructuredViewSetMixin
from posthog.client import query_with_columns, sync_execute
from posthog.hogql.constants import DEFAULT_RETURNED_ROWS, MAX_SELECT_RETURNED_ROWS
from posthog.models import Element, Filter, Person
from posthog.models.event.query_event_list import query_events_list
from posthog.models.event.sql import GET_CUSTOM_EVENTS, SELECT_ONE_EVENT_SQL
from posthog.models.event.util import ClickhouseEventSerializer
from posthog.models.person.util import get_persons_by_distinct_ids
from posthog.models.team import Team
from posthog.models.utils import UUIDT
from posthog.permissions import (
ProjectMembershipNecessaryPermissions,
TeamMemberAccessPermission,
)
from posthog.queries.property_values import get_property_values_for_key
from posthog.rate_limit import (
ClickHouseBurstRateThrottle,
ClickHouseSustainedRateThrottle,
)
from posthog.utils import convert_property_value, flatten
from rest_framework import mixins, request, response, serializers, viewsets
from rest_framework.decorators import action
from rest_framework.exceptions import NotFound
from rest_framework.permissions import IsAuthenticated
from rest_framework.settings import api_settings
from rest_framework_csv import renderers as csvrenderers
from sentry_sdk import capture_exception
QUERY_DEFAULT_EXPORT_LIMIT = 3_500
class ElementSerializer(serializers.ModelSerializer):
event = serializers.CharField()
class Meta:
model = Element
fields = [
"event",
"text",
"tag_name",
"attr_class",
"href",
"attr_id",
"nth_child",
"nth_of_type",
"attributes",
"order",
]
class EventViewSet(
StructuredViewSetMixin,
mixins.RetrieveModelMixin,
mixins.ListModelMixin,
viewsets.GenericViewSet,
):
renderer_classes = tuple(api_settings.DEFAULT_RENDERER_CLASSES) + (
csvrenderers.PaginatedCSVRenderer,
)
serializer_class = ClickhouseEventSerializer
permission_classes = [
IsAuthenticated,
ProjectMembershipNecessaryPermissions,
TeamMemberAccessPermission,
]
throttle_classes = [ClickHouseBurstRateThrottle, ClickHouseSustainedRateThrottle]
def _build_next_url(
self,
request: request.Request,
last_event_timestamp: datetime,
order_by: List[str],
) -> str:
params = request.GET.dict()
reverse = "-timestamp" in order_by
timestamp = last_event_timestamp.astimezone().isoformat()
if reverse:
params["before"] = timestamp
else:
params["after"] = timestamp
return request.build_absolute_uri(
f"{request.path}?{urllib.parse.urlencode(params)}"
)
@extend_schema(
parameters=[
OpenApiParameter(
"event",
OpenApiTypes.STR,
description="Filter list by event. For example `user sign up` or `$pageview`.",
),
OpenApiParameter(
"select",
OpenApiTypes.STR,
description="(Experimental) JSON-serialized array of HogQL expressions to return",
many=True,
),
OpenApiParameter(
"where",
OpenApiTypes.STR,
description="(Experimental) JSON-serialized array of HogQL expressions that must pass",
many=True,
),
OpenApiParameter(
"person_id", OpenApiTypes.INT, description="Filter list by person id."
),
OpenApiParameter(
"distinct_id",
OpenApiTypes.INT,
description="Filter list by distinct id.",
),
OpenApiParameter(
"before",
OpenApiTypes.DATETIME,
description="Only return events with a timestamp before this time.",
),
OpenApiParameter(
"after",
OpenApiTypes.DATETIME,
description="Only return events with a timestamp after this time.",
),
OpenApiParameter(
"limit",
OpenApiTypes.INT,
description="The maximum number of results to return",
),
PropertiesSerializer(required=False),
]
)
def list(
self, request: request.Request, *args: Any, **kwargs: Any
) -> response.Response:
try:
is_csv_request = self.request.accepted_renderer.format == "csv"
if self.request.GET.get("limit", None):
limit = int(self.request.GET.get("limit")) # type: ignore
elif is_csv_request:
limit = QUERY_DEFAULT_EXPORT_LIMIT
else:
limit = DEFAULT_RETURNED_ROWS
limit = min(limit, MAX_SELECT_RETURNED_ROWS)
try:
offset = int(request.GET["offset"]) if request.GET.get("offset") else 0
except ValueError:
offset = 0
team = self.team
filter = Filter(request=request, team=self.team)
order_by: List[str] = (
list(json.loads(request.GET["orderBy"]))
if request.GET.get("orderBy")
else ["-timestamp"]
)
query_result = query_events_list(
filter=filter,
team=team,
limit=limit,
offset=offset,
request_get_query_dict=request.GET.dict(),
order_by=order_by,
action_id=request.GET.get("action_id"),
)
# Retry the query without the 1 day optimization
if len(query_result) < limit and not request.GET.get("after"):
query_result = query_events_list(
unbounded_date_from=True, # only this changed from the query above
filter=filter,
team=team,
limit=limit,
offset=offset,
request_get_query_dict=request.GET.dict(),
order_by=order_by,
action_id=request.GET.get("action_id"),
)
result = ClickhouseEventSerializer(
query_result[0:limit],
many=True,
context={"people": self._get_people(query_result, team)},
).data
next_url: Optional[str] = None
if not is_csv_request and len(query_result) > limit:
next_url = self._build_next_url(
request, query_result[limit - 1]["timestamp"], order_by
)
return response.Response({"next": next_url, "results": result})
except Exception as ex:
capture_exception(ex)
raise ex
def _get_people(self, query_result: List[Dict], team: Team) -> Dict[str, Any]:
distinct_ids = [event["distinct_id"] for event in query_result]
persons = get_persons_by_distinct_ids(team.pk, distinct_ids)
persons = persons.prefetch_related(
Prefetch("persondistinctid_set", to_attr="distinct_ids_cache")
)
distinct_to_person: Dict[str, Person] = {}
for person in persons:
for distinct_id in person.distinct_ids:
distinct_to_person[distinct_id] = person
return distinct_to_person
def retrieve(
self,
request: request.Request,
pk: Optional[Union[int, str]] = None,
*args: Any,
**kwargs: Any,
) -> response.Response:
if not isinstance(pk, str) or not UUIDT.is_valid_uuid(pk):
return response.Response(
{
"detail": "Invalid UUID",
"code": "invalid",
"type": "validation_error",
},
status=400,
)
query_result = query_with_columns(
SELECT_ONE_EVENT_SQL,
{"team_id": self.team.pk, "event_id": pk.replace("-", "")},
team_id=self.team.pk,
)
if len(query_result) == 0:
raise NotFound(detail=f"No events exist for event UUID {pk}")
query_context = {}
if request.query_params.get("include_person", False):
query_context["people"] = self._get_people(query_result, self.team)
res = ClickhouseEventSerializer(
query_result[0], many=False, context=query_context
).data
return response.Response(res)
@action(methods=["GET"], detail=False)
def values(self, request: request.Request, **kwargs) -> response.Response:
team = self.team
key = request.GET.get("key")
event_names = request.GET.getlist("event_name", None)
flattened = []
if key == "custom_event":
events = sync_execute(
GET_CUSTOM_EVENTS, {"team_id": team.pk}, team_id=team.pk
)
return response.Response([{"name": event[0]} for event in events])
elif key:
result = get_property_values_for_key(
key, team, event_names, value=request.GET.get("value")
)
for value in result:
try:
# Try loading as json for dicts or arrays
flattened.append(json.loads(value[0]))
except json.decoder.JSONDecodeError:
flattened.append(value[0])
return response.Response(
[{"name": convert_property_value(value)} for value in flatten(flattened)]
)
class LegacyEventViewSet(EventViewSet):
legacy_team_compatibility = True
|
ConcurrentLogHandler | cloghandler | # Copyright 2013 Lowell Alleman
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" cloghandler.py: A smart replacement for the standard RotatingFileHandler
ConcurrentRotatingFileHandler: This class is a log handler which is a drop-in
replacement for the python standard log handler 'RotateFileHandler', the primary
difference being that this handler will continue to write to the same file if
the file cannot be rotated for some reason, whereas the RotatingFileHandler will
strictly adhere to the maximum file size. Unfortunately, if you are using the
RotatingFileHandler on Windows, you will find that once an attempted rotation
fails, all subsequent log messages are dropped. The other major advantage of
this module is that multiple processes can safely write to a single log file.
To put it another way: This module's top priority is preserving your log
records, whereas the standard library attempts to limit disk usage, which can
potentially drop log messages. If you are trying to determine which module to
use, there are number of considerations: What is most important: strict disk
space usage or preservation of log messages? What OSes are you supporting? Can
you afford to have processes blocked by file locks?
Concurrent access is handled by using file locks, which should ensure that log
messages are not dropped or clobbered. This means that a file lock is acquired
and released for every log message that is written to disk. (On Windows, you may
also run into a temporary situation where the log file must be opened and closed
for each log message.) This can have potentially performance implications. In my
testing, performance was more than adequate, but if you need a high-volume or
low-latency solution, I suggest you look elsewhere.
This module currently only support the 'nt' and 'posix' platforms due to the
usage of the portalocker module. I do not have access to any other platforms
for testing, patches are welcome.
See the README file for an example usage of this module.
This module supports Python 2.6 and later.
"""
__version__ = "0.9.1"
__revision__ = (
"lowell87@gmail.com-20130711022321-doutxl7zyzuwss5a 2013-07-10 22:23:21 -0400 [0]"
)
__author__ = "Lowell Alleman"
__all__ = [
"ConcurrentRotatingHandler",
]
import os
import sys
from logging import Handler, LogRecord
from logging.handlers import BaseRotatingHandler
from random import randint
try:
import codecs
except ImportError:
codecs = None
# Question/TODO: Should we have a fallback mode if we can't load portalocker /
# we should still be better off than with the standard RotattingFileHandler
# class, right? We do some rename checking... that should prevent some file
# clobbering that the builtin class allows.
# sibling module than handles all the ugly platform-specific details of file locking
from portalocker import LOCK_EX, LOCK_NB, LockException, lock, unlock
# Workaround for handleError() in Python 2.7+ where record is written to stderr
class NullLogRecord(LogRecord):
def __init__(self):
pass
def __getattr__(self, attr):
return None
class ConcurrentRotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a set of files, which switches from one file to the
next when the current file reaches a certain size. Multiple processes can
write to the log file concurrently, but this may mean that the file will
exceed the given size.
"""
def __init__(
self,
filename,
mode="a",
maxBytes=0,
backupCount=0,
encoding=None,
debug=True,
delay=0,
):
"""
Open the specified file and use it as the stream for logging.
By default, the file grows indefinitely. You can specify particular
values of maxBytes and backupCount to allow the file to rollover at
a predetermined size.
Rollover occurs whenever the current log file is nearly maxBytes in
length. If backupCount is >= 1, the system will successively create
new files with the same pathname as the base file, but with extensions
".1", ".2" etc. appended to it. For example, with a backupCount of 5
and a base file name of "app.log", you would get "app.log",
"app.log.1", "app.log.2", ... through to "app.log.5". The file being
written to is always "app.log" - when it gets filled up, it is closed
and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
exist, then they are renamed to "app.log.2", "app.log.3" etc.
respectively.
If maxBytes is zero, rollover never occurs.
On Windows, it is not possible to rename a file that is currently opened
by another process. This means that it is not possible to rotate the
log files if multiple processes is using the same log file. In this
case, the current log file will continue to grow until the rotation can
be completed successfully. In order for rotation to be possible, all of
the other processes need to close the file first. A mechanism, called
"degraded" mode, has been created for this scenario. In degraded mode,
the log file is closed after each log message is written. So once all
processes have entered degraded mode, the net rotation attempt should
be successful and then normal logging can be resumed. Using the 'delay'
parameter may help reduce contention in some usage patterns.
This log handler assumes that all concurrent processes logging to a
single file will are using only this class, and that the exact same
parameters are provided to each instance of this class. If, for
example, two different processes are using this class, but with
different values for 'maxBytes' or 'backupCount', then odd behavior is
expected. The same is true if this class is used by one application, but
the RotatingFileHandler is used by another.
"""
# Absolute file name handling done by FileHandler since Python 2.5
BaseRotatingHandler.__init__(self, filename, mode, encoding, delay)
self.delay = delay
self._rotateFailed = False
self.maxBytes = maxBytes
self.backupCount = backupCount
self._open_lockfile()
# For debug mode, swap out the "_degrade()" method with a more a verbose one.
if debug:
self._degrade = self._degrade_debug
def _open_lockfile(self):
# Use 'file.lock' and not 'file.log.lock' (Only handles the normal "*.log" case.)
if self.baseFilename.endswith(".log"):
lock_file = self.baseFilename[:-4]
else:
lock_file = self.baseFilename
lock_file += ".lock"
self.stream_lock = open(lock_file, "w")
def _open(self, mode=None):
"""
Open the current base file with the (original) mode and encoding.
Return the resulting stream.
Note: Copied from stdlib. Added option to override 'mode'
"""
if mode is None:
mode = self.mode
if self.encoding is None:
stream = open(self.baseFilename, mode)
else:
stream = codecs.open(self.baseFilename, mode, self.encoding)
return stream
def _close(self):
"""Close file stream. Unlike close(), we don't tear anything down, we
expect the log to be re-opened after rotation."""
if self.stream:
try:
if not self.stream.closed:
# Flushing probably isn't technically necessary, but it feels right
self.stream.flush()
self.stream.close()
finally:
self.stream = None
def acquire(self):
"""Acquire thread and file locks. Re-opening log for 'degraded' mode."""
# handle thread lock
Handler.acquire(self)
# Issue a file lock. (This is inefficient for multiple active threads
# within a single process. But if you're worried about high-performance,
# you probably aren't using this log handler.)
if self.stream_lock:
# If stream_lock=None, then assume close() was called or something
# else weird and ignore all file-level locks.
if self.stream_lock.closed:
# Daemonization can close all open file descriptors, see
# https://bugzilla.redhat.com/show_bug.cgi?id=952929
# Try opening the lock file again. Should we warn() here?!?
try:
self._open_lockfile()
except Exception:
self.handleError(NullLogRecord())
# Don't try to open the stream lock again
self.stream_lock = None
return
lock(self.stream_lock, LOCK_EX)
# Stream will be opened as part by FileHandler.emit()
def release(self):
"""Release file and thread locks. If in 'degraded' mode, close the
stream to reduce contention until the log files can be rotated."""
try:
if self._rotateFailed:
self._close()
except Exception:
self.handleError(NullLogRecord())
finally:
try:
if self.stream_lock and not self.stream_lock.closed:
unlock(self.stream_lock)
except Exception:
self.handleError(NullLogRecord())
finally:
# release thread lock
Handler.release(self)
def close(self):
"""
Close log stream and stream_lock."""
try:
self._close()
if not self.stream_lock.closed:
self.stream_lock.close()
finally:
self.stream_lock = None
Handler.close(self)
def _degrade(self, degrade, msg, *args):
"""Set degrade mode or not. Ignore msg."""
self._rotateFailed = degrade
del msg, args # avoid pychecker warnings
def _degrade_debug(self, degrade, msg, *args):
"""A more colorful version of _degade(). (This is enabled by passing
"debug=True" at initialization).
"""
if degrade:
if not self._rotateFailed:
sys.stderr.write(
"Degrade mode - ENTERING - (pid=%d) %s\n"
% (os.getpid(), msg % args)
)
self._rotateFailed = True
else:
if self._rotateFailed:
sys.stderr.write(
"Degrade mode - EXITING - (pid=%d) %s\n"
% (os.getpid(), msg % args)
)
self._rotateFailed = False
def doRollover(self):
"""
Do a rollover, as described in __init__().
"""
self._close()
if self.backupCount <= 0:
# Don't keep any backups, just overwrite the existing backup file
# Locking doesn't much matter here; since we are overwriting it anyway
self.stream = self._open("w")
return
try:
# Determine if we can rename the log file or not. Windows refuses to
# rename an open file, Unix is inode base so it doesn't care.
# Attempt to rename logfile to tempname: There is a slight race-condition here, but it seems unavoidable
tmpname = None
while not tmpname or os.path.exists(tmpname):
tmpname = "%s.rotate.%08d" % (self.baseFilename, randint(0, 99999999))
try:
# Do a rename test to determine if we can successfully rename the log file
os.rename(self.baseFilename, tmpname)
except (IOError, OSError):
exc_value = sys.exc_info()[1]
self._degrade(
True, "rename failed. File in use? " "exception=%s", exc_value
)
return
# Q: Is there some way to protect this code from a KeboardInterupt?
# This isn't necessarily a data loss issue, but it certainly does
# break the rotation process during stress testing.
# There is currently no mechanism in place to handle the situation
# where one of these log files cannot be renamed. (Example, user
# opens "logfile.3" in notepad); we could test rename each file, but
# nobody's complained about this being an issue; so the additional
# code complexity isn't warranted.
for i in range(self.backupCount - 1, 0, -1):
sfn = "%s.%d" % (self.baseFilename, i)
dfn = "%s.%d" % (self.baseFilename, i + 1)
if os.path.exists(sfn):
# print "%s -> %s" % (sfn, dfn)
if os.path.exists(dfn):
os.remove(dfn)
os.rename(sfn, dfn)
dfn = self.baseFilename + ".1"
if os.path.exists(dfn):
os.remove(dfn)
os.rename(tmpname, dfn)
# print "%s -> %s" % (self.baseFilename, dfn)
self._degrade(False, "Rotation completed")
finally:
# Re-open the output stream, but if "delay" is enabled then wait
# until the next emit() call. This could reduce rename contention in
# some usage patterns.
if not self.delay:
self.stream = self._open()
def shouldRollover(self, record):
"""
Determine if rollover should occur.
For those that are keeping track. This differs from the standard
library's RotatingLogHandler class. Because there is no promise to keep
the file size under maxBytes we ignore the length of the current record.
"""
del record # avoid pychecker warnings
# Is stream is not yet open, skip rollover check. (Check will occur on
# next message, after emit() calls _open())
if self.stream is None:
return False
if self._shouldRollover():
# If some other process already did the rollover (which is possible
# on Unix) the file our stream may now be named "log.1", thus
# triggering another rollover. Avoid this by closing and opening
# "log" again.
self._close()
self.stream = self._open()
return self._shouldRollover()
return False
def _shouldRollover(self):
if self.maxBytes > 0: # are we rolling over?
self.stream.seek(0, 2) # due to non-posix-compliant Windows feature
if self.stream.tell() >= self.maxBytes:
return True
else:
self._degrade(False, "Rotation done or not needed at this time")
return False
# Publish this class to the "logging.handlers" module so that it can be use
# from a logging config file via logging.config.fileConfig().
import logging.handlers
logging.handlers.ConcurrentRotatingFileHandler = ConcurrentRotatingFileHandler
|
schema | session_replay_events | from typing import Dict, List
from posthog.hogql.database.models import (
DatabaseField,
DateTimeDatabaseField,
FieldOrTable,
FieldTraverser,
IntegerDatabaseField,
LazyJoin,
LazyTable,
StringDatabaseField,
Table,
)
from posthog.hogql.database.schema.person_distinct_ids import (
PersonDistinctIdsTable,
join_with_person_distinct_ids_table,
)
SESSION_REPLAY_EVENTS_COMMON_FIELDS: Dict[str, FieldOrTable] = {
"session_id": StringDatabaseField(name="session_id"),
"team_id": IntegerDatabaseField(name="team_id"),
"distinct_id": StringDatabaseField(name="distinct_id"),
"min_first_timestamp": DateTimeDatabaseField(name="min_first_timestamp"),
"max_last_timestamp": DateTimeDatabaseField(name="max_last_timestamp"),
"first_url": DatabaseField(name="first_url"),
"click_count": IntegerDatabaseField(name="click_count"),
"keypress_count": IntegerDatabaseField(name="keypress_count"),
"mouse_activity_count": IntegerDatabaseField(name="mouse_activity_count"),
"active_milliseconds": IntegerDatabaseField(name="active_milliseconds"),
"console_log_count": IntegerDatabaseField(name="console_log_count"),
"console_warn_count": IntegerDatabaseField(name="console_warn_count"),
"console_error_count": IntegerDatabaseField(name="console_error_count"),
"size": IntegerDatabaseField(name="size"),
"event_count": IntegerDatabaseField(name="event_count"),
"message_count": IntegerDatabaseField(name="message_count"),
"pdi": LazyJoin(
from_field="distinct_id",
join_table=PersonDistinctIdsTable(),
join_function=join_with_person_distinct_ids_table,
),
"person": FieldTraverser(chain=["pdi", "person"]),
"person_id": FieldTraverser(chain=["pdi", "person_id"]),
}
class RawSessionReplayEventsTable(Table):
fields: Dict[str, FieldOrTable] = {
**SESSION_REPLAY_EVENTS_COMMON_FIELDS,
"min_first_timestamp": DateTimeDatabaseField(name="min_first_timestamp"),
"max_last_timestamp": DateTimeDatabaseField(name="max_last_timestamp"),
"first_url": DatabaseField(name="first_url"),
}
def avoid_asterisk_fields(self) -> List[str]:
return ["first_url"]
def to_printed_clickhouse(self, context):
return "session_replay_events"
def to_printed_hogql(self):
return "raw_session_replay_events"
def select_from_session_replay_events_table(requested_fields: Dict[str, List[str]]):
from posthog.hogql import ast
table_name = "raw_session_replay_events"
aggregate_fields = {
"start_time": ast.Call(
name="min", args=[ast.Field(chain=[table_name, "min_first_timestamp"])]
),
"end_time": ast.Call(
name="max", args=[ast.Field(chain=[table_name, "max_last_timestamp"])]
),
"first_url": ast.Call(
name="argMinMerge", args=[ast.Field(chain=[table_name, "first_url"])]
),
"click_count": ast.Call(
name="sum", args=[ast.Field(chain=[table_name, "click_count"])]
),
"keypress_count": ast.Call(
name="sum", args=[ast.Field(chain=[table_name, "keypress_count"])]
),
"mouse_activity_count": ast.Call(
name="sum", args=[ast.Field(chain=[table_name, "mouse_activity_count"])]
),
"active_milliseconds": ast.Call(
name="sum", args=[ast.Field(chain=[table_name, "active_milliseconds"])]
),
"console_log_count": ast.Call(
name="sum", args=[ast.Field(chain=[table_name, "console_log_count"])]
),
"console_warn_count": ast.Call(
name="sum", args=[ast.Field(chain=[table_name, "console_warn_count"])]
),
"console_error_count": ast.Call(
name="sum", args=[ast.Field(chain=[table_name, "console_error_count"])]
),
"distinct_id": ast.Call(
name="any", args=[ast.Field(chain=[table_name, "distinct_id"])]
),
"size": ast.Call(name="sum", args=[ast.Field(chain=[table_name, "size"])]),
"event_count": ast.Call(
name="sum", args=[ast.Field(chain=[table_name, "event_count"])]
),
"message_count": ast.Call(
name="sum", args=[ast.Field(chain=[table_name, "message_count"])]
),
}
select_fields: List[ast.Expr] = []
group_by_fields: List[ast.Expr] = []
for name, chain in requested_fields.items():
if name in aggregate_fields:
select_fields.append(ast.Alias(alias=name, expr=aggregate_fields[name]))
else:
select_fields.append(
ast.Alias(alias=name, expr=ast.Field(chain=[table_name] + chain))
)
group_by_fields.append(ast.Field(chain=[table_name] + chain))
return ast.SelectQuery(
select=select_fields,
select_from=ast.JoinExpr(table=ast.Field(chain=[table_name])),
group_by=group_by_fields,
)
class SessionReplayEventsTable(LazyTable):
fields: Dict[str, FieldOrTable] = {
**SESSION_REPLAY_EVENTS_COMMON_FIELDS,
"start_time": DateTimeDatabaseField(name="start_time"),
"end_time": DateTimeDatabaseField(name="end_time"),
"first_url": StringDatabaseField(name="first_url"),
}
def lazy_select(self, requested_fields: Dict[str, List[str]]):
return select_from_session_replay_events_table(requested_fields)
def to_printed_clickhouse(self, context):
return "session_replay_events"
def to_printed_hogql(self):
return "session_replay_events"
|
util | importhelper | # Copyright 2012,2013 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import importlib
import os
import sys
from os.path import basename, join, splitext
from quodlibet import print_d, util
def load_dir_modules(path, package):
"""Load all modules and packages in path (recursive).
In case the module is already loaded, doesn't reload it.
"""
# needed for pickle etc.
assert package in sys.modules
try:
modules = [e[0] for e in get_importables(path)]
except OSError:
util.print_w("%r not found" % path)
return []
# get_importables can yield py and pyc for the same module
# and we want to load it only once
modules = set(modules)
loaded = []
for name in modules:
try:
mod = load_module(name, package, path)
except Exception:
util.print_exc()
continue
if mod:
loaded.append(mod)
return loaded
def get_importables(folder):
"""Searches a folder and its subfolders for modules and packages to import.
No subfolders in packages, .so supported.
The root folder will not be considered a package.
returns a tuple of the name, import path, list of possible dependencies
"""
def is_ok(f):
if f.startswith("_"):
return False
if f.endswith(".py"):
return True
return False
def is_init(f):
return f == "__init__.py"
first = True
for root, dirs, names in os.walk(folder):
# Ignore packages like "_shared"
for d in dirs:
if d.startswith("_") or d.startswith("."):
print_d("Ignoring %r" % os.path.join(root, d))
dirs.remove(d)
if not first and any((is_init(n) for n in names)):
yield (
basename(root),
root,
[d for d in (join(root, name) for name in names) if is_ok(d)],
)
else:
for name in filter(is_ok, names):
yield (splitext(name)[0], join(root, name), [join(root, name)])
first = False
def load_module(name, package, path):
"""Load a module/package. Returns the module or None.
Doesn't catch any exceptions during the actual import.
"""
fullname = package + "." + name
try:
return sys.modules[fullname]
except KeyError:
pass
spec = importlib.machinery.PathFinder.find_spec(fullname, [path])
if spec is None:
return
# modules need a parent package
if package not in sys.modules:
spec = importlib.machinery.ModuleSpec(package, None, is_package=True)
sys.modules[package] = importlib.util.module_from_spec(spec)
mod = spec.loader.load_module(fullname)
# make it accessible from the parent, like __import__ does
vars(sys.modules[package])[name] = mod
return mod
|
deluge | minify_web_js | #!/usr/bin/env python
#
# Copyright (C) 2014 Calum Lind <calumlind@gmail.com>
# Copyright (C) 2010 Damien Churchill <damoxc@gmail.com>
#
# This file is part of Deluge and is licensed under GNU General Public License 3.0, or later, with
# the additional special exception to link portions of this program with the OpenSSL library.
# See LICENSE for more details.
#
"""Minifies the WebUI JS files.
Usage: python minify_web_js.py deluge/ui/web/js/deluge-all
"""
import fileinput
import fnmatch
import os
import subprocess
import sys
from shutil import which
closure_cmd = None
for cmd in ["closure-compiler", "closure"]:
if which(cmd):
closure_cmd = cmd
break
def minify_closure(file_in, file_out):
try:
subprocess.check_call(
[
closure_cmd,
"--warning_level",
"QUIET",
"--language_in=ECMASCRIPT5",
"--js",
file_in,
"--js_output_file",
file_out,
]
)
return True
except subprocess.CalledProcessError:
return False
# Closure outputs smallest files but java-based command, can use rJSmin
# as a python-only fallback.
#
# deluge-all.js: Closure 131K, rJSmin: 148K
#
if not closure_cmd:
try:
from rjsmin import jsmin as minify
except ImportError:
print("Warning: No minifying command found.")
minify = None
def source_files_list(source_dir):
scripts = []
for root, dirnames, filenames in os.walk(source_dir):
dirnames.sort(reverse=True)
files = fnmatch.filter(filenames, "*.js")
files.sort()
order_file = os.path.join(root, ".order")
if os.path.isfile(order_file):
with open(order_file) as _file:
for line in _file:
if line.startswith("+ "):
order_filename = line.split()[1]
files.pop(files.index(order_filename))
files.insert(0, order_filename)
# Ensure root directory files are bottom of list.
if dirnames:
scripts.extend([os.path.join(root, f) for f in files])
else:
for filename in reversed(files):
scripts.insert(0, os.path.join(root, filename))
return scripts
def concat_src_files(file_list, fileout_path):
with open(fileout_path, "w") as file_out:
file_in = fileinput.input(file_list)
file_out.writelines(file_in)
def minify_file(file_debug, file_minified):
if closure_cmd:
return minify_closure(file_debug, file_minified)
elif minify:
with open(file_minified, "w") as file_out:
with open(file_debug) as file_in:
file_out.write(minify(file_in.read()))
return True
def minify_js_dir(source_dir):
build_name = os.path.basename(source_dir)
build_dir = os.path.dirname(source_dir)
file_debug_js = os.path.join(build_dir, build_name + "-debug.js")
file_minified_js = os.path.join(build_dir, build_name + ".js")
source_files = source_files_list(source_dir)
if not source_files:
print("No js files found, skipping %s" % source_dir)
return
concat_src_files(source_files, file_debug_js)
print("Minifying %s" % source_dir)
if not minify_file(file_debug_js, file_minified_js):
print("Warning: Failed minifying files %s, debug only" % source_dir)
if os.path.isfile(file_minified_js):
os.remove(file_minified_js)
if __name__ == "__main__":
if len(sys.argv) != 2:
JS_SOURCE_DIRS = [
"deluge/ui/web/js/deluge-all",
"deluge/ui/web/js/extjs/ext-extensions",
]
else:
JS_SOURCE_DIRS = [os.path.abspath(sys.argv[1])]
for js_source_dir in JS_SOURCE_DIRS:
minify_js_dir(js_source_dir)
|
scripts | dynamicattributes | #!/usr/bin/env python2.7
"""
This script will generate a dynamicItemAttributes.json file using res files
"""
import argparse
import json
import os
from shutil import copyfile
parser = argparse.ArgumentParser(
description="This script updates module icons for pyfa"
)
parser.add_argument("-e", "--eve", required=True, type=str, help="path to eve's ")
parser.add_argument(
"-s",
"--server",
required=False,
default="tq",
type=str,
help="which server to use (defaults to tq)",
)
args = parser.parse_args()
LOADER_FILE = "app:/bin/dynamicItemAttributesLoader.pyd"
RES_FILE = "res:/staticdata/dynamicitemattributes.fsdbinary"
binaryfile = os.path.split(RES_FILE)[1]
eve_path = os.path.join(args.eve, "index_{}.txt".format(args.server))
with open(eve_path, "r") as f:
lines = f.readlines()
file_index = {x.split(",")[0]: x.split(",") for x in lines}
resfileindex = file_index["app:/resfileindex.txt"]
res_cache = os.path.join(args.eve, "ResFiles")
with open(os.path.join(res_cache, resfileindex[1]), "r") as f:
lines = f.readlines()
res_index = {x.split(",")[0].lower(): x.split(",") for x in lines}
# Need to copy the file to our cuirrent directory
attribute_loader_file = os.path.join(res_cache, file_index[LOADER_FILE][1])
to_path = os.path.dirname(os.path.abspath(__file__))
copyfile(
attribute_loader_file,
os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.path.split(LOADER_FILE)[1]
),
)
# The loader expect it to be the correct filename, so copy trhe file as well
dynattribute_file = os.path.join(res_cache, res_index[RES_FILE.lower()][1])
to_path = os.path.dirname(os.path.abspath(__file__))
copyfile(
dynattribute_file,
os.path.join(os.path.dirname(os.path.abspath(__file__)), binaryfile),
)
import dynamicItemAttributesLoader
attributes = dynamicItemAttributesLoader.load(os.path.join(to_path, binaryfile))
attributes_obj = {}
# convert top level to dict
attributes = dict(attributes)
# This is such a brute force method. todo: recursively generate this by inspecting the objects
for k, v in attributes.items():
attributes_obj[k] = {
"attributeIDs": dict(v.attributeIDs),
"inputOutputMapping": list(v.inputOutputMapping),
}
for i, x in enumerate(v.inputOutputMapping):
attributes_obj[k]["inputOutputMapping"][i] = {
"resultingType": x.resultingType,
"applicableTypes": list(x.applicableTypes),
}
for k2, v2 in v.attributeIDs.items():
attributes_obj[k]["attributeIDs"][k2] = {"min": v2.min, "max": v2.max}
with open("dynamicattributes.json", "w") as outfile:
json.dump(attributes_obj, outfile)
|
USBPrinting | USBPrinterOutputDeviceManager | # Copyright (c) 2020 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
import threading
import time
from os import environ
from re import search
import serial.tools.list_ports
from cura.PrinterOutput.PrinterOutputDevice import ConnectionState
from PyQt6.QtCore import QObject, pyqtSignal
from UM.i18n import i18nCatalog
from UM.OutputDevice.OutputDevicePlugin import OutputDevicePlugin
from UM.Platform import Platform
from UM.Signal import Signal, signalemitter
from . import USBPrinterOutputDevice
i18n_catalog = i18nCatalog("cura")
@signalemitter
class USBPrinterOutputDeviceManager(QObject, OutputDevicePlugin):
"""Manager class that ensures that an USBPrinterOutput device is created for every connected USB printer."""
addUSBOutputDeviceSignal = Signal()
progressChanged = pyqtSignal()
def __init__(self, application, parent=None):
if USBPrinterOutputDeviceManager.__instance is not None:
raise RuntimeError(
"Try to create singleton '%s' more than once" % self.__class__.__name__
)
super().__init__(parent=parent)
USBPrinterOutputDeviceManager.__instance = self
self._application = application
self._serial_port_list = []
self._usb_output_devices = {}
self._usb_output_devices_model = None
self._update_thread = threading.Thread(target=self._updateThread)
self._update_thread.daemon = True
self._check_updates = True
self._application.applicationShuttingDown.connect(self.stop)
# Because the model needs to be created in the same thread as the QMLEngine, we use a signal.
self.addUSBOutputDeviceSignal.connect(self.addOutputDevice)
self._application.globalContainerStackChanged.connect(
self.updateUSBPrinterOutputDevices
)
# The method updates/reset the USB settings for all connected USB devices
def updateUSBPrinterOutputDevices(self):
for device in self._usb_output_devices.values():
if isinstance(device, USBPrinterOutputDevice.USBPrinterOutputDevice):
device.resetDeviceSettings()
def start(self):
self._check_updates = True
self._update_thread.start()
def stop(self, store_data: bool = True):
self._check_updates = False
def _onConnectionStateChanged(self, serial_port):
if serial_port not in self._usb_output_devices:
return
changed_device = self._usb_output_devices[serial_port]
if changed_device.connectionState == ConnectionState.Connected:
self.getOutputDeviceManager().addOutputDevice(changed_device)
else:
self.getOutputDeviceManager().removeOutputDevice(serial_port)
def _updateThread(self):
while self._check_updates:
container_stack = self._application.getGlobalContainerStack()
if container_stack is None:
time.sleep(5)
continue
port_list = [] # Just an empty list; all USB devices will be removed.
if container_stack.getMetaDataEntry("supports_usb_connection"):
machine_file_formats = [
file_type.strip()
for file_type in container_stack.getMetaDataEntry(
"file_formats"
).split(";")
]
if "text/x-gcode" in machine_file_formats:
# We only limit listing usb on windows is a fix for connecting tty/cu printers on MacOS and Linux
port_list = self.getSerialPortList(
only_list_usb=Platform.isWindows()
)
self._addRemovePorts(port_list)
time.sleep(5)
def _addRemovePorts(self, serial_ports):
"""Helper to identify serial ports (and scan for them)"""
# First, find and add all new or changed keys
for serial_port in list(serial_ports):
if serial_port not in self._serial_port_list:
self.addUSBOutputDeviceSignal.emit(
serial_port
) # Hack to ensure its created in main thread
continue
self._serial_port_list = list(serial_ports)
for port, device in self._usb_output_devices.items():
if port not in self._serial_port_list:
device.close()
def addOutputDevice(self, serial_port):
"""Because the model needs to be created in the same thread as the QMLEngine, we use a signal."""
device = USBPrinterOutputDevice.USBPrinterOutputDevice(serial_port)
device.connectionStateChanged.connect(self._onConnectionStateChanged)
self._usb_output_devices[serial_port] = device
device.connect()
def getSerialPortList(self, only_list_usb=False):
"""Create a list of serial ports on the system.
:param only_list_usb: If true, only usb ports are listed
"""
base_list = []
try:
port_list = serial.tools.list_ports.comports()
except TypeError: # Bug in PySerial causes a TypeError if port gets disconnected while processing.
port_list = []
for port in port_list:
if not isinstance(port, tuple):
port = (port.device, port.description, port.hwid)
if not port[
2
]: # HWID may be None if the device is not USB or the system doesn't report the type.
continue
if only_list_usb and not port[2].startswith("USB"):
continue
# To prevent cura from messing with serial ports of other devices,
# filter by regular expressions passed in as environment variables.
# Get possible patterns with python3 -m serial.tools.list_ports -v
# set CURA_DEVICENAMES=USB[1-9] -> e.g. not matching /dev/ttyUSB0
pattern = environ.get("CURA_DEVICENAMES")
if pattern and not search(pattern, port[0]):
continue
# set CURA_DEVICETYPES=CP2102 -> match a type of serial converter
pattern = environ.get("CURA_DEVICETYPES")
if pattern and not search(pattern, port[1]):
continue
# set CURA_DEVICEINFOS=LOCATION=2-1.4 -> match a physical port
# set CURA_DEVICEINFOS=VID:PID=10C4:EA60 -> match a vendor:product
pattern = environ.get("CURA_DEVICEINFOS")
if pattern and not search(pattern, port[2]):
continue
base_list += [port[0]]
return list(base_list)
__instance = None # type: USBPrinterOutputDeviceManager
@classmethod
def getInstance(cls, *args, **kwargs) -> "USBPrinterOutputDeviceManager":
return cls.__instance
|
src | itunes |
# https://github.com/albertz/itunes-scripts
# Copyright (c) 2012, Albert Zeyer, www.az2000.de
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import codecs # utf8
import os
import sys
try:
libraryXmlFile = codecs.open(os.path.expanduser("~/Music/iTunes/iTunes Music Library.xml"), "r", "utf-8")
except IOError: # file not found or so
libraryXmlFile = None
def parse_xml(stream):
state = 0
spaces = " \t\n"
data = ""
node = ""
nodeprefix = ""
nodepostfix = ""
nodeargs = []
while True:
c = stream.read(1)
if c == "": break
oldstate = state
if state == 0:
if c == "<": state = 1
else: data += c
elif state == 1: # in node
if c in spaces:
if node == "": pass
else: state = 2
elif c in "!/?":
if node == "": nodeprefix += c
else: nodepostfix += c
elif c == ">": state = 0
elif c == "\"": state = 3
else: node += c
elif state == 2: # in nodeargs
if c in spaces: pass
elif c == ">": state = 0
elif c == "\"": state = 3
elif c == "/": nodepostfix += c
else:
nodeargs.append(c)
state = 4
elif state == 3: # in nodearg str
if c == "\\": state = 5
elif c == "\"": state = 2
else: pass # we dont store it right now
elif state == 4: # in nodearg
if c in spaces: state = 2
elif c == ">": state = 0
elif c == "\"": state = 3
elif c == "/": nodepostfix += c
else: nodeargs[-1] += c
elif state == 5: # in escaped nodearg str
# we dont store it right now
state = 3
if oldstate > 0 and state == 0:
yield nodeprefix + node + nodepostfix, nodeargs, data
nodeprefix, node, nodepostfix = "", "", ""
nodeargs = []
data = ""
import base64
def _plistDataConv(data):
data = data.replace(" ", "")
data = data.replace("\t", "")
data = data.replace("\n", "")
return base64.b64decode(data)
# code from here: http://wiki.python.org/moin/EscapingXml
import xml.parsers.expat
def xmlUnescape(s):
want_unicode = False
if isinstance(s, unicode):
s = s.encode("utf-8")
want_unicode = True
# the rest of this assumes that `s` is UTF-8
list = []
# create and initialize a parser object
p = xml.parsers.expat.ParserCreate("utf-8")
p.buffer_text = True
p.returns_unicode = want_unicode
p.CharacterDataHandler = list.append
# parse the data wrapped in a dummy element
# (needed so the "document" is well-formed)
p.Parse("<e>", 0)
p.Parse(s, 0)
p.Parse("</e>", 1)
# join the extracted strings and return
es = ""
if want_unicode:
es = u""
return es.join(list)
plistPrimitiveTypes = {"integer": int, "real": float, "string": xmlUnescape, "date": str, "data": _plistDataConv}
def parse_plist_content(xmlIter, prefix, nodeExceptions = {}):
for node, nodeargs, data in xmlIter:
if node in nodeExceptions:
raise nodeExceptions[node]
elif node == "array":
for entry in parse_plist_arrayContent(xmlIter, prefix): yield entry
elif node == "dict":
for entry in parse_plist_dictContent(xmlIter, prefix): yield entry
elif node in plistPrimitiveTypes:
for entry in parse_plist_primitiveContent(xmlIter, prefix, node): yield entry
elif node == "true/":
yield prefix, True
elif node == "false/":
yield prefix, False
else:
print >>sys.stderr, "didnt expected node", repr(node), "in content in prefix", repr(prefix)
break
def parse_plist_primitiveContent(xmlIter, prefix, contentType):
for node, nodeargs, data in xmlIter:
if node == "/" + contentType:
yield prefix, plistPrimitiveTypes[contentType](data)
else:
print >>sys.stderr, "didnt expected node", repr(node), "in primitive content", repr(contentType), "in prefix", repr(prefix)
break
class PlistMarkerArrayBegin: pass
class PlistMarkerArrayEnd: pass
def parse_plist_arrayContent(xmlIter, prefix):
yield prefix, PlistMarkerArrayBegin
index = 0
while True:
try:
for entry in parse_plist_content(xmlIter, prefix + [index], {"/array": PlistMarkerArrayEnd()}):
yield entry
except PlistMarkerArrayEnd:
break
index += 1
yield prefix, PlistMarkerArrayEnd
class PlistMarkerDictBegin: pass
class PlistMarkerDictEnd: pass
# dict in plist is a list of key/value pairs
def parse_plist_dictContent(xmlIter, prefix):
lastkey = None
yield prefix, PlistMarkerDictBegin
for node, nodeargs, data in xmlIter:
if node == "key": pass
elif node == "/key":
if lastkey is not None: print >>sys.stderr, "expected value after key in dict content in prefix", repr(prefix)
lastkey = data
for entry in parse_plist_content(xmlIter, prefix + [lastkey]):
yield entry
lastkey = None
elif node == "/dict": break
else:
print >>sys.stderr, "didn't expected node", repr(node), "in dict content in prefix", repr(prefix)
yield prefix, PlistMarkerDictEnd
def parse_plist(xmlIter):
for node, nodeargs, data in xmlIter:
if node == "plist":
for entry in parse_plist_content(xmlIter, []): yield entry
if libraryXmlFile:
libraryPlistIter = parse_plist(parse_xml(libraryXmlFile))
else:
libraryPlistIter = []
def songsIter(plistIter):
for prefix, value in plistIter:
if len(prefix) == 2 and prefix[0] == "Tracks" and value is PlistMarkerDictBegin:
song = {}
for prefix2, value2 in plistIter:
if prefix2 == prefix and value2 is PlistMarkerDictEnd: break
song[prefix2[2]] = value2
if "Rating" not in song: song["Rating"] = None
yield song
librarySongsIter = songsIter(libraryPlistIter)
def ratingsIter():
import re
import urllib
import utils
for song in librarySongsIter:
rating = song["Rating"]
if rating is None: continue # print only songs with any rating set
rating /= 100.0 # the maximum is 100
fn = song["Location"]
fn = urllib.unquote(fn)
fn = re.sub("^file://(localhost)?", "", fn)
fn = utils.convertToUnicode(fn)
yield (fn, rating)
if __name__ == "__main__":
for fn, rating in ratingsIter():
print rating, repr(fn)
sys.exit()
def loadRatings():
def doCalc(queue):
for fn, rating in ratingsIter():
queue.put((fn,rating))
queue.put((None,None))
from utils import AsyncTask
queue = AsyncTask(func=doCalc, name="iTunes load ratings")
while True:
fn, rating = queue.get()
if fn is None: return
ratings[fn] = rating
# do some extra check in case we are reloading this module. don't reload the ratings. takes too long
try:
loadRatingsThread
except NameError:
ratings = {}
from utils import daemonThreadCall
daemonThreadCall(loadRatings, name = "iTunes ratings loader")
|
core | log_factory | # -*- coding: utf-8 -*-
import locale
import logging
import logging.handlers
import os
import sys
from contextlib import closing
try:
import colorlog
except ImportError:
colorlog = None
class LogFactory:
FILE_EXTENSION = ".log"
LINESTYLE = "{"
LINEFORMAT = "[{asctime}] {levelname:8} {name:>16} {message}"
LINEFORMAT_COLORED = "{badge_log_color}[{asctime}] {levelname:^8} {reset}{log_color} {name:>16} {message} {reset}{exc_log_color}"
DATEFORMAT = "%Y-%m-%d %H:%M:%S"
PRIMARY_COLORS = {
"DEBUG": "bold,black,bg_white",
"INFO": "black,bg_white",
"WARNING": "red,bg_yellow",
"ERROR": "bold,white,bg_red",
"CRITICAL": "bold,white,bg_black",
}
SECONDARY_COLORS = {
"badge": {
"DEBUG": "bold,white,bg_cyan",
"INFO": "bold,white,bg_green",
"WARNING": "bold,white,bg_yellow",
"ERROR": "bold,white,bg_red",
"CRITICAL": "bold,white,bg_black",
},
"exc": {"ERROR": "bold,black,bg_white", "CRITICAL": "bold,black,bg_white"},
}
def __init__(self, core):
self.pyload = core
self._ = core._
self.loggers = {}
def init_logger(self, name):
logger = logging.getLogger(name)
self.loggers[name] = logger
self._init_logger(logger)
return logger
def _init_logger(self, logger):
console = self.pyload.config.get("log", "console")
syslog = self.pyload.config.get("log", "syslog")
filelog = self.pyload.config.get("log", "filelog")
level = logging.DEBUG if self.pyload.debug else logging.INFO
logger.setLevel(level)
if console:
self._init_console_handler(logger)
if syslog:
self._init_syslog_handler(logger)
if filelog:
self._init_filelog_handler(logger)
def get_logger(self, name):
return self.loggers.get(name, self.init_logger(name))
def remove_logger(self, name):
logger = self.loggers.pop(name)
if not logger:
return
self._removeHandlers(logger)
def reset_logger(self, name):
logger = self.loggers.get(name)
if not logger:
return
self._init_logger(logger)
def _removeHandlers(self, logger):
for handler in logger.handlers:
with closing(handler) as hdlr:
logger.removeHandler(hdlr)
def shutdown(self):
for logger in self.loggers.values():
self._removeHandlers(logger)
self.loggers.clear()
def _init_console_handler(self, logger):
color = self.pyload.config.get("log", "console_color") and colorlog
if color:
consoleform = colorlog.ColoredFormatter(
self.LINEFORMAT_COLORED,
datefmt=self.DATEFORMAT,
log_colors=self.PRIMARY_COLORS,
secondary_log_colors=self.SECONDARY_COLORS,
style=self.LINESTYLE,
)
else:
consoleform = logging.Formatter(
self.LINEFORMAT, self.DATEFORMAT, self.LINESTYLE
)
consolehdlr = logging.StreamHandler(sys.stdout)
consolehdlr.setFormatter(consoleform)
logger.addHandler(consolehdlr)
def _init_syslog_handler(self, logger):
# try to mimic to normal syslog messages
fmt = "{asctime} {name}: {message}"
datefmt = "%b %e %H:%M:%S"
syslog_form = logging.Formatter(fmt, datefmt, self.LINESTYLE)
syslog_addr = None
location = self.pyload.config.get("log", "syslog_location")
if location == "remote":
host = self.pyload.config.get("log", "syslog_host")
port = self.pyload.config.get("log", "syslog_port")
syslog_addr = (host, port)
else:
folder = self.pyload.config.get("log", "syslog_folder")
if folder:
syslog_addr = folder
elif sys.platform == "darwin":
syslog_addr = "/var/run/syslog"
elif os.name == "nt":
# TODO: Recheck
syslog_addr = os.path.join(self.pyload.userdir, "logs", "syslog")
else:
syslog_addr = "/dev/log"
os.makedirs(syslog_addr, exist_ok=True)
sysloghdlr = logging.handlers.SysLogHandler(syslog_addr)
sysloghdlr.setFormatter(syslog_form)
logger.addHandler(sysloghdlr)
def _init_filelog_handler(self, logger):
filename = logger.name + self.FILE_EXTENSION
filelog_folder = self.pyload.config.get("log", "filelog_folder")
if not filelog_folder:
filelog_folder = os.path.join(self.pyload.userdir, "logs")
os.makedirs(filelog_folder, exist_ok=True)
filelog_form = logging.Formatter(
self.LINEFORMAT, self.DATEFORMAT, self.LINESTYLE
)
filelog_path = os.path.join(filelog_folder, filename)
encoding = locale.getpreferredencoding(do_setlocale=False)
if self.pyload.config.get("log", "filelog_rotate"):
max_size = self.pyload.config.get("log", "filelog_size") << 10
max_entries = self.pyload.config.get("log", "filelog_entries")
filehdlr = logging.handlers.RotatingFileHandler(
filelog_path,
maxBytes=max_size,
backupCount=max_entries,
encoding=encoding,
)
else:
filehdlr = logging.FileHandler(filelog_path, encoding=encoding)
filehdlr.setFormatter(filelog_form)
logger.addHandler(filehdlr)
|
WebServicesTool-CocoaBindings | RPCMethod | import objc
from Foundation import *
class RPCMethod(NSObject):
def initWithDocument_name_(self, aDocument, aName):
self = super(RPCMethod, self).init()
self.document = aDocument
self.k_methodName = aName
self.k_methodSignature = None
self.k_methodDescription = None
return self
def methodName(self):
return self.k_methodName
def displayName(self):
if self.k_methodSignature is None:
return self.k_methodName
else:
return self.k_methodSignature
def setMethodSignature_(self, aSignature):
self.k_methodSignature = aSignature
setMethodSignature_ = objc.accessor(setMethodSignature_)
def methodDescription(self):
if self.k_methodDescription is None:
self.setMethodDescription_("<description not yet received>")
self.document.fetchMethodDescription_(self)
return self.k_methodDescription
def setMethodDescription_(self, aDescription):
self.k_methodDescription = aDescription
setMethodDescription_ = objc.accessor(setMethodDescription_)
|
network | pac | # SPDX-FileCopyrightText: Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# SPDX-License-Identifier: GPL-3.0-or-later
"""Evaluation of PAC scripts."""
import functools
import sys
from typing import Optional, cast
from qutebrowser.qt import machinery
from qutebrowser.qt.core import QObject, QUrl, pyqtSignal, pyqtSlot
from qutebrowser.qt.network import (
QHostAddress,
QHostInfo,
QNetworkAccessManager,
QNetworkProxy,
QNetworkReply,
QNetworkRequest,
)
from qutebrowser.qt.qml import QJSEngine, QJSValue
from qutebrowser.utils import log, qtlog, qtutils, resources, urlutils, utils
class ParseProxyError(Exception):
"""Error while parsing PAC result string."""
class EvalProxyError(Exception):
"""Error while evaluating PAC script."""
def _js_slot(*args):
"""Wrap a methods as a JavaScript function.
Register a PACContext method as a JavaScript function, and catch
exceptions returning them as JavaScript Error objects.
Args:
args: Types of method arguments.
Return: Wrapped method.
"""
def _decorator(method):
@functools.wraps(method)
def new_method(self, *args, **kwargs):
"""Call the underlying function."""
try:
return method(self, *args, **kwargs)
except:
e = str(sys.exc_info()[0])
log.network.exception("PAC evaluation error")
# pylint: disable=protected-access
return self._error_con.callAsConstructor([e])
# pylint: enable=protected-access
# FIXME:mypy PyQt6 stubs issue, passing type should work too
deco = pyqtSlot(*args, result="QJSValue")
return deco(new_method)
return _decorator
class _PACContext(QObject):
"""Implementation of PAC API functions that require native calls.
See https://developer.mozilla.org/en-US/docs/Mozilla/Projects/Necko/Proxy_Auto-Configuration_(PAC)_file
"""
JS_DEFINITIONS = """
function dnsResolve(host) {
return PAC.dnsResolve(host);
}
function myIpAddress() {
return PAC.myIpAddress();
}
"""
def __init__(self, engine):
"""Create a new PAC API implementation instance.
Args:
engine: QJSEngine which is used for running PAC.
"""
super().__init__(parent=engine)
self._engine = engine
self._error_con = engine.globalObject().property("Error")
@_js_slot(str)
def dnsResolve(self, host):
"""Resolve a DNS hostname.
Resolves the given DNS hostname into an IP address, and returns it
in the dot-separated format as a string.
Args:
host: hostname to resolve.
"""
ips = QHostInfo.fromName(host)
if ips.error() != QHostInfo.HostInfoError.NoError or not ips.addresses():
err_f = "Failed to resolve host during PAC evaluation: {}"
log.network.info(err_f.format(host))
return QJSValue(QJSValue.SpecialValue.NullValue)
else:
return ips.addresses()[0].toString()
@_js_slot()
def myIpAddress(self):
"""Get host IP address.
Return the server IP address of the current machine, as a string in
the dot-separated integer format.
"""
return QHostAddress(QHostAddress.SpecialAddress.LocalHost).toString()
class PACResolver:
"""Evaluate PAC script files and resolve proxies."""
@staticmethod
def _parse_proxy_host(host_str):
host, _colon, port_str = host_str.partition(":")
try:
port = int(port_str)
except ValueError:
raise ParseProxyError("Invalid port number")
return (host, port)
@staticmethod
def _parse_proxy_entry(proxy_str):
"""Parse one proxy string entry, as described in PAC specification."""
config = [c.strip() for c in proxy_str.split(" ") if c]
if not config:
raise ParseProxyError("Empty proxy entry")
if config[0] == "DIRECT":
if len(config) != 1:
raise ParseProxyError("Invalid number of parameters for " + "DIRECT")
return QNetworkProxy(QNetworkProxy.ProxyType.NoProxy)
elif config[0] == "PROXY":
if len(config) != 2:
raise ParseProxyError("Invalid number of parameters for PROXY")
host, port = PACResolver._parse_proxy_host(config[1])
return QNetworkProxy(QNetworkProxy.ProxyType.HttpProxy, host, port)
elif config[0] in ["SOCKS", "SOCKS5"]:
if len(config) != 2:
raise ParseProxyError("Invalid number of parameters for SOCKS")
host, port = PACResolver._parse_proxy_host(config[1])
return QNetworkProxy(QNetworkProxy.ProxyType.Socks5Proxy, host, port)
else:
err = "Unknown proxy type: {}"
raise ParseProxyError(err.format(config[0]))
@staticmethod
def _parse_proxy_string(proxy_str):
proxies = proxy_str.split(";")
return [PACResolver._parse_proxy_entry(x) for x in proxies]
def _evaluate(self, js_code, js_file):
ret = self._engine.evaluate(js_code, js_file)
if ret.isError():
err = "JavaScript error while evaluating PAC file: {}"
raise EvalProxyError(err.format(ret.toString()))
def __init__(self, pac_str):
"""Create a PAC resolver.
Args:
pac_str: JavaScript code containing PAC resolver.
"""
self._engine = QJSEngine()
self._engine.installExtensions(QJSEngine.Extension.ConsoleExtension)
self._ctx = _PACContext(self._engine)
self._engine.globalObject().setProperty(
"PAC", self._engine.newQObject(self._ctx)
)
self._evaluate(_PACContext.JS_DEFINITIONS, "pac_js_definitions")
self._evaluate(resources.read_file("javascript/pac_utils.js"), "pac_utils")
proxy_config = self._engine.newObject()
proxy_config.setProperty("bindings", self._engine.newObject())
self._engine.globalObject().setProperty("ProxyConfig", proxy_config)
self._evaluate(pac_str, "pac")
global_js_object = self._engine.globalObject()
self._resolver = global_js_object.property("FindProxyForURL")
if not self._resolver.isCallable():
err = "Cannot resolve FindProxyForURL function, got '{}' instead"
raise EvalProxyError(err.format(self._resolver.toString()))
def resolve(self, query, from_file=False):
"""Resolve a proxy via PAC.
Args:
query: QNetworkProxyQuery.
from_file: Whether the proxy info is coming from a file.
Return:
A list of QNetworkProxy objects in order of preference.
"""
qtutils.ensure_valid(query.url())
string_flags: urlutils.UrlFlagsType
if from_file:
string_flags = QUrl.ComponentFormattingOption.PrettyDecoded
else:
string_flags = QUrl.UrlFormattingOption.RemoveUserInfo
if query.url().scheme() == "https":
https_opts = (
QUrl.UrlFormattingOption.RemovePath
| QUrl.UrlFormattingOption.RemoveQuery
)
if machinery.IS_QT5:
string_flags |= cast(QUrl.UrlFormattingOption, https_opts)
else:
string_flags |= https_opts
result = self._resolver.call(
[query.url().toString(string_flags), query.peerHostName()]
)
result_str = result.toString()
if not result.isString():
err = "Got strange value from FindProxyForURL: '{}'"
raise EvalProxyError(err.format(result_str))
return self._parse_proxy_string(result_str)
class PACFetcher(QObject):
"""Asynchronous fetcher of PAC files."""
finished = pyqtSignal()
def __init__(self, url, parent=None):
"""Resolve a PAC proxy from URL.
Args:
url: QUrl of a PAC proxy.
"""
super().__init__(parent)
pac_prefix = "pac+"
assert url.scheme().startswith(pac_prefix)
url.setScheme(url.scheme()[len(pac_prefix) :])
self._pac_url = url
with qtlog.disable_qt_msghandler():
# WORKAROUND for a hang when messages are printed, see our
# NetworkAccessManager subclass for details.
self._manager: Optional[QNetworkAccessManager] = QNetworkAccessManager()
self._manager.setProxy(QNetworkProxy(QNetworkProxy.ProxyType.NoProxy))
self._pac = None
self._error_message = None
self._reply = None
def __eq__(self, other):
return self._pac_url == other._pac_url
def __repr__(self):
return utils.get_repr(self, url=self._pac_url, constructor=True)
def fetch(self):
"""Fetch the proxy from the remote URL."""
assert self._manager is not None
self._reply = self._manager.get(QNetworkRequest(self._pac_url))
assert self._reply is not None
self._reply.finished.connect(self._finish)
@pyqtSlot()
def _finish(self):
assert self._reply is not None
if self._reply.error() != QNetworkReply.NetworkError.NoError:
error = "Can't fetch PAC file from URL, error code {}: {}"
self._error_message = error.format(
self._reply.error(), self._reply.errorString()
)
log.network.error(self._error_message)
else:
try:
pacscript = bytes(self._reply.readAll()).decode("utf-8")
except UnicodeError as e:
error = "Invalid encoding of a PAC file: {}"
self._error_message = error.format(e)
log.network.exception(self._error_message)
return
try:
self._pac = PACResolver(pacscript)
log.network.debug("Successfully evaluated PAC file.")
except EvalProxyError as e:
error = "Error in PAC evaluation: {}"
self._error_message = error.format(e)
log.network.exception(self._error_message)
self._manager = None
self._reply = None
self.finished.emit()
def _wait(self):
"""Wait until a reply from the remote server is received."""
if self._manager is not None:
loop = qtutils.EventLoop()
self.finished.connect(loop.quit)
loop.exec()
def fetch_error(self):
"""Check if PAC script is successfully fetched.
Return None iff PAC script is downloaded and evaluated successfully,
error string otherwise.
"""
self._wait()
return self._error_message
def resolve(self, query):
"""Resolve a query via PAC.
Args: QNetworkProxyQuery.
Return a list of QNetworkProxy objects in order of preference.
"""
self._wait()
assert self._pac is not None
from_file = self._pac_url.scheme() == "file"
try:
return self._pac.resolve(query, from_file=from_file)
except (EvalProxyError, ParseProxyError) as e:
log.network.exception("Error in PAC resolution: {}.".format(e))
# .invalid is guaranteed to be inaccessible in RFC 6761.
# Port 9 is for DISCARD protocol -- DISCARD servers act like
# /dev/null.
# Later NetworkManager.createRequest will detect this and display
# an error message.
error_host = "pac-resolve-error.qutebrowser.invalid"
return [QNetworkProxy(QNetworkProxy.ProxyType.HttpProxy, error_host, 9)]
|
versions | 012_e5ca33a5d445_add_resources | # encoding: utf-8
"""012 Add resources
Revision ID: e5ca33a5d445
Revises: 866f6370b4ac
Create Date: 2018-09-04 18:48:52.303211
"""
import sqlalchemy as sa
from alembic import op
from ckan.migration import skip_based_on_legacy_engine_version
# revision identifiers, used by Alembic.
revision = "e5ca33a5d445"
down_revision = "866f6370b4ac"
branch_labels = None
depends_on = None
def upgrade():
if skip_based_on_legacy_engine_version(op, __name__):
return
op.create_table(
"package_resource",
sa.Column("id", sa.Integer, primary_key=True),
sa.Column("package_id", sa.Integer, sa.ForeignKey("package.id")),
sa.Column("url", sa.UnicodeText, nullable=False),
sa.Column("format", sa.UnicodeText),
sa.Column("description", sa.UnicodeText),
sa.Column("position", sa.Integer),
sa.Column("state_id", sa.Integer),
sa.Column("revision_id", sa.UnicodeText, sa.ForeignKey("revision.id")),
)
op.create_table(
"package_resource_revision",
sa.Column("id", sa.Integer, primary_key=True),
sa.Column("package_id", sa.Integer, sa.ForeignKey("package.id")),
sa.Column("url", sa.UnicodeText, nullable=False),
sa.Column("format", sa.UnicodeText),
sa.Column("description", sa.UnicodeText),
sa.Column("position", sa.Integer),
sa.Column("state_id", sa.Integer),
sa.Column(
"revision_id",
sa.UnicodeText,
sa.ForeignKey("revision.id"),
primary_key=True,
),
sa.Column("continuity_id", sa.Integer, sa.ForeignKey("package_resource.id")),
)
op.drop_column("package", "download_url")
op.drop_column("package_revision", "download_url")
def downgrade():
op.drop_table("package_resource_revision")
op.drop_table("package_resource")
op.add_column(
"package",
sa.Column("download_url", sa.UnicodeText()),
)
op.add_column(
"package_revision",
sa.Column("download_url", sa.UnicodeText()),
)
|
Gui | FeatureExtension | # -*- coding: utf-8 -*-
# ***************************************************************************
# * Copyright (c) 2019 sliptonic <shopinthewoods@gmail.com> *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
import FreeCAD
import FreeCADGui
import Path
import Path.Base.Gui.Util as PathGuiUtil
import Path.Op.FeatureExtension as FeatureExtensions
import Path.Op.Gui.Base as PathOpGui
# lazily loaded modules
from lazy_loader.lazy_loader import LazyLoader
from pivy import coin
from PySide import QtCore, QtGui
Part = LazyLoader("Part", globals(), "Part")
__title__ = "Path Feature Extensions UI"
__author__ = "sliptonic (Brad Collette)"
__url__ = "https://www.freecad.org"
__doc__ = "Extensions feature page controller."
translate = FreeCAD.Qt.translate
if False:
Path.Log.setLevel(Path.Log.Level.DEBUG, Path.Log.thisModule())
Path.Log.trackModule(Path.Log.thisModule())
else:
Path.Log.setLevel(Path.Log.Level.INFO, Path.Log.thisModule())
class _Extension(object):
ColourEnabled = (1.0, 0.5, 1.0)
ColourDisabled = (1.0, 1.0, 0.5)
TransparencySelected = 0.0
TransparencyDeselected = 0.7
def __init__(self, obj, base, face, edge):
self.obj = obj
self.base = base
self.face = face
self.edge = edge
self.ext = None
if edge:
self.ext = FeatureExtensions.createExtension(obj, base, face, edge)
self.switch = self.createExtensionSoSwitch(self.ext)
self.root = self.switch
def createExtensionSoSwitch(self, ext):
if not ext:
return None
sep = coin.SoSeparator()
pos = coin.SoTranslation()
mat = coin.SoMaterial()
crd = coin.SoCoordinate3()
fce = coin.SoFaceSet()
hnt = coin.SoShapeHints()
numVert = list() # track number of vertices in each polygon face
try:
wire = ext.getWire()
except FreeCAD.Base.FreeCADError:
wire = None
if not wire:
return None
if isinstance(wire, (list, tuple)):
p0 = [p for p in wire[0].discretize(Deflection=0.02)]
p1 = [p for p in wire[1].discretize(Deflection=0.02)]
p2 = list(reversed(p1))
polygon = [(p.x, p.y, p.z) for p in (p0 + p2)]
else:
if ext.extFaces:
# Create polygon for each extension face in compound extensions
allPolys = list()
extFaces = ext.getExtensionFaces(wire)
for f in extFaces:
pCnt = 0
wCnt = 0
for w in f.Wires:
if wCnt == 0:
poly = [p for p in w.discretize(Deflection=0.01)]
else:
poly = [p for p in w.discretize(Deflection=0.01)][:-1]
pCnt += len(poly)
allPolys.extend(poly)
numVert.append(pCnt)
polygon = [(p.x, p.y, p.z) for p in allPolys]
else:
# poly = [p for p in wire.discretize(Deflection=0.02)][:-1]
poly = [p for p in wire.discretize(Deflection=0.02)]
polygon = [(p.x, p.y, p.z) for p in poly]
crd.point.setValues(polygon)
mat.diffuseColor = self.ColourDisabled
mat.transparency = self.TransparencyDeselected
hnt.faceType = coin.SoShapeHints.UNKNOWN_FACE_TYPE
hnt.vertexOrdering = coin.SoShapeHints.CLOCKWISE
if numVert:
# Transfer vertex counts for polygon faces
fce.numVertices.setValues(tuple(numVert))
sep.addChild(pos)
sep.addChild(mat)
sep.addChild(hnt)
sep.addChild(crd)
sep.addChild(fce)
# Finalize SoSwitch
switch = coin.SoSwitch()
switch.addChild(sep)
switch.whichChild = coin.SO_SWITCH_NONE
self.material = mat
return switch
def _setColour(self, r, g, b):
self.material.diffuseColor = (r, g, b)
def isValid(self):
return not self.root is None
def show(self):
if self.switch:
self.switch.whichChild = coin.SO_SWITCH_ALL
def hide(self):
if self.switch:
self.switch.whichChild = coin.SO_SWITCH_NONE
def enable(self, ena=True):
if ena:
self.material.diffuseColor = self.ColourEnabled
else:
self.disable()
def disable(self):
self.material.diffuseColor = self.ColourDisabled
def select(self):
self.material.transparency = self.TransparencySelected
def deselect(self):
self.material.transparency = self.TransparencyDeselected
class TaskPanelExtensionPage(PathOpGui.TaskPanelPage):
DataObject = QtCore.Qt.ItemDataRole.UserRole
DataSwitch = QtCore.Qt.ItemDataRole.UserRole + 2
Direction = {
FeatureExtensions.Extension.DirectionNormal: translate("PathPocket", "Normal"),
FeatureExtensions.Extension.DirectionX: translate("PathPocket", "X"),
FeatureExtensions.Extension.DirectionY: translate("PathPocket", "Y"),
}
def initPage(self, obj):
self.setTitle("Extensions")
self.OpIcon = ":/icons/view-axonometric.svg"
self.setIcon(self.OpIcon)
self.initialEdgeCount = -1
self.edgeCountThreshold = 30
self.fieldsSet = False
self.useOutlineCheckbox = None
self.useOutline = -1
self.extensionsCache = dict()
self.extensionsReady = False
self.enabled = True
self.lastDefaultLength = ""
self.extensions = list()
self.defaultLength = PathGuiUtil.QuantitySpinBox(
self.form.defaultLength, obj, "ExtensionLengthDefault"
)
self.form.extensionTree.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)
self.form.extensionTree.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
self.switch = coin.SoSwitch()
self.obj.ViewObject.RootNode.addChild(self.switch)
self.switch.whichChild = coin.SO_SWITCH_ALL
self.model = QtGui.QStandardItemModel(self.form.extensionTree)
self.model.setHorizontalHeaderLabels(["Base", "Extension"])
"""
# russ4262: This `if` block shows all available extensions upon edit of operation with any extension enabled.
# This can cause the model(s) to overly obscured due to previews of extensions.
# Would be great if only enabled extensions were shown.
if 0 < len(obj.ExtensionFeature):
self.form.showExtensions.setCheckState(QtCore.Qt.Checked)
else:
self.form.showExtensions.setCheckState(QtCore.Qt.Unchecked)
"""
self.form.showExtensions.setCheckState(QtCore.Qt.Unchecked)
self.blockUpdateData = False
def cleanupPage(self, obj):
try:
self.obj.ViewObject.RootNode.removeChild(self.switch)
except ReferenceError:
Path.Log.debug("obj already destroyed - no cleanup required")
def getForm(self):
form = FreeCADGui.PySideUic.loadUi(":/panels/PageOpPocketExtEdit.ui")
return form
def forAllItemsCall(self, cb):
for modelRow in range(self.model.rowCount()):
model = self.model.item(modelRow, 0)
for featureRow in range(model.rowCount()):
feature = model.child(featureRow, 0)
for edgeRow in range(feature.rowCount()):
item = feature.child(edgeRow, 0)
ext = item.data(self.DataObject)
cb(item, ext)
def currentExtensions(self):
Path.Log.debug("currentExtensions()")
extensions = []
def extractExtension(item, ext):
if ext and ext.edge and item.checkState() == QtCore.Qt.Checked:
extensions.append(ext.ext)
if self.form.enableExtensions.isChecked():
self.forAllItemsCall(extractExtension)
Path.Log.track("extensions", extensions)
return extensions
def updateProxyExtensions(self, obj):
Path.Log.debug("updateProxyExtensions()")
self.extensions = self.currentExtensions()
FeatureExtensions.setExtensions(obj, self.extensions)
def getFields(self, obj):
Path.Log.track(obj.Label, self.model.rowCount(), self.model.columnCount())
self.blockUpdateData = True
if obj.ExtensionCorners != self.form.extendCorners.isChecked():
obj.ExtensionCorners = self.form.extendCorners.isChecked()
self.defaultLength.updateProperty()
self.updateProxyExtensions(obj)
self.blockUpdateData = False
def setFields(self, obj):
Path.Log.track(obj.Label)
# Path.Log.debug("setFields()")
if obj.ExtensionCorners != self.form.extendCorners.isChecked():
self.form.extendCorners.toggle()
self._autoEnableExtensions() # Check edge count for auto-disable Extensions on initial Task Panel loading
self._initializeExtensions(obj) # Efficiently initialize Extensions
self.defaultLength.updateSpinBox()
self._getUseOutlineState() # Find `useOutline` checkbox and get its boolean value
self.lastDefaultLength = self.form.defaultLength.text() # set last DL value
self.fieldsSet = True # flag to identify initial values set
def _initializeExtensions(self, obj):
"""_initializeExtensions()...
Subroutine called inside `setFields()` to initialize Extensions efficiently."""
if self.enabled:
self.extensions = FeatureExtensions.getExtensions(obj)
elif len(obj.ExtensionFeature) > 0:
self.extensions = FeatureExtensions.getExtensions(obj)
self.form.enableExtensions.setChecked(True)
self._includeEdgesAndWires()
else:
self.form.extensionEdit.setDisabled(True)
self.setExtensions(self.extensions)
def _applyDefaultLengthChange(self, index=None):
"""_applyDefaultLengthChange(index=None)...
Helper method to update Default Length spinbox,
and update extensions due to change in Default Length."""
self.defaultLength.updateSpinBox()
if self.form.defaultLength.text() != self.lastDefaultLength:
self.lastDefaultLength = self.form.defaultLength.text()
self._resetCachedExtensions() # Reset extension cache because extension dimensions likely changed
self._enableExtensions() # Recalculate extensions
def createItemForBaseModel(self, base, sub, edges, extensions):
Path.Log.track(
base.Label, sub, "+", len(edges), len(base.Shape.getElement(sub).Edges)
)
# Path.Log.debug("createItemForBaseModel() label: {}, sub: {}, {}, edgeCnt: {}, subEdges: {}".format(base.Label, sub, '+', len(edges), len(base.Shape.getElement(sub).Edges)))
extendCorners = self.form.extendCorners.isChecked()
subShape = base.Shape.getElement(sub)
def createSubItem(label, ext0):
if ext0.root:
self.switch.addChild(ext0.root)
item0 = QtGui.QStandardItem()
item0.setData(label, QtCore.Qt.EditRole)
item0.setData(ext0, self.DataObject)
item0.setCheckable(True)
for e in extensions:
if e.obj == base and e.sub == label:
item0.setCheckState(QtCore.Qt.Checked)
ext0.enable()
break
item.appendRow([item0])
# ext = self._cachedExtension(self.obj, base, sub, None)
ext = None
item = QtGui.QStandardItem()
item.setData(sub, QtCore.Qt.EditRole)
item.setData(ext, self.DataObject)
item.setSelectable(False)
extensionEdges = {}
if self.useOutline == 1 and sub.startswith("Face"):
# Only show exterior extensions if `Use Outline` is True
subEdges = subShape.Wires[0].Edges
else:
# Show all exterior and interior extensions if `Use Outline` is False
subEdges = subShape.Edges
for edge in subEdges:
for e, label in edges:
if edge.isSame(e):
ext1 = self._cachedExtension(self.obj, base, sub, label)
if ext1.isValid():
extensionEdges[e] = label[4:] # isolate edge number
if not extendCorners:
createSubItem(label, ext1)
if extendCorners:
def edgesMatchShape(e0, e1):
flipped = Path.Geom.flipEdge(e1)
if flipped:
return Path.Geom.edgesMatch(e0, e1) or Path.Geom.edgesMatch(
e0, flipped
)
else:
return Path.Geom.edgesMatch(e0, e1)
self.extensionEdges = extensionEdges
Path.Log.debug(
"extensionEdges.values(): {}".format(extensionEdges.values())
)
for edgeList in Part.sortEdges(
list(extensionEdges)
): # Identify connected edges that form wires
self.edgeList = edgeList
if len(edgeList) == 1:
label = (
"Edge%s"
% [
extensionEdges[keyEdge]
for keyEdge in extensionEdges.keys()
if edgesMatchShape(keyEdge, edgeList[0])
][0]
)
else:
label = "Wire(%s)" % ",".join(
sorted(
[
extensionEdges[keyEdge]
for e in edgeList
for keyEdge in extensionEdges.keys()
if edgesMatchShape(e, keyEdge)
],
key=lambda s: int(s),
)
)
ext2 = self._cachedExtension(self.obj, base, sub, label)
createSubItem(label, ext2)
return item
def setExtensions(self, extensions):
Path.Log.track(len(extensions))
Path.Log.debug("setExtensions()")
if self.extensionsReady:
Path.Log.debug("setExtensions() returning per `extensionsReady` flag")
return
self.form.extensionTree.blockSignals(True)
# remember current visual state
if hasattr(self, "selectionModel"):
selectedExtensions = [
self.model.itemFromIndex(index).data(self.DataObject).ext
for index in self.selectionModel.selectedIndexes()
]
else:
selectedExtensions = []
collapsedModels = []
collapsedFeatures = []
for modelRow in range(self.model.rowCount()):
model = self.model.item(modelRow, 0)
modelName = model.data(QtCore.Qt.EditRole)
if not self.form.extensionTree.isExpanded(model.index()):
collapsedModels.append(modelName)
for featureRow in range(model.rowCount()):
feature = model.child(featureRow, 0)
if not self.form.extensionTree.isExpanded(feature.index()):
collapsedFeatures.append(
"%s.%s" % (modelName, feature.data(QtCore.Qt.EditRole))
)
# remove current extensions and all their visuals
def removeItemSwitch(item, ext):
ext.hide()
if ext.root:
self.switch.removeChild(ext.root)
self.forAllItemsCall(removeItemSwitch)
self.model.clear()
# create extensions for model and given argument
if self.enabled:
for base in self.obj.Base:
show = False
edges = [
(edge, "Edge%d" % (i + 1))
for i, edge in enumerate(base[0].Shape.Edges)
]
baseItem = QtGui.QStandardItem()
baseItem.setData(base[0].Label, QtCore.Qt.EditRole)
baseItem.setSelectable(False)
for sub in sorted(base[1]):
if sub.startswith("Face"):
show = True
baseItem.appendRow(
self.createItemForBaseModel(base[0], sub, edges, extensions)
)
if show:
self.model.appendRow(baseItem)
self.form.extensionTree.setModel(self.model)
self.form.extensionTree.expandAll()
self.form.extensionTree.resizeColumnToContents(0)
# restore previous state - at least the parts that are still valid
for modelRow in range(self.model.rowCount()):
model = self.model.item(modelRow, 0)
modelName = model.data(QtCore.Qt.EditRole)
if modelName in collapsedModels:
self.form.extensionTree.setExpanded(model.index(), False)
for featureRow in range(model.rowCount()):
feature = model.child(featureRow, 0)
featureName = "%s.%s" % (modelName, feature.data(QtCore.Qt.EditRole))
if featureName in collapsedFeatures:
self.form.extensionTree.setExpanded(feature.index(), False)
if hasattr(self, "selectionModel") and selectedExtensions:
self.restoreSelection(selectedExtensions)
self.form.extensionTree.blockSignals(False)
self.extensionsReady = True
Path.Log.debug(" setExtensions() finished and setting `extensionsReady=True`")
def updateData(self, obj, prop):
Path.Log.track(obj.Label, prop, self.blockUpdateData)
# Path.Log.debug("updateData({})".format(prop))
if not self.blockUpdateData:
if self.fieldsSet:
if self.form.enableExtensions.isChecked():
if prop == "ExtensionLengthDefault":
self._applyDefaultLengthChange()
elif prop == "Base":
self.extensionsReady = False
self.setExtensions(FeatureExtensions.getExtensions(obj))
elif prop == "UseOutline":
self._getUseOutlineState() # Find `useOutline` checkbox and get its boolean value
self._includeEdgesAndWires()
elif prop == "Base":
self.extensionsReady = False
def restoreSelection(self, selection):
Path.Log.debug("restoreSelection()")
Path.Log.track()
if 0 == self.model.rowCount():
Path.Log.track("-")
self.form.buttonClear.setEnabled(False)
self.form.buttonDisable.setEnabled(False)
self.form.buttonEnable.setEnabled(False)
else:
self.form.buttonClear.setEnabled(True)
if selection or self.selectionModel.selectedIndexes():
self.form.buttonDisable.setEnabled(True)
self.form.buttonEnable.setEnabled(True)
else:
self.form.buttonDisable.setEnabled(False)
self.form.buttonEnable.setEnabled(False)
FreeCADGui.Selection.clearSelection()
def selectItem(item, ext):
for sel in selection:
if ext.base == sel.obj and ext.edge == sel.sub:
return True
return False
def setSelectionVisuals(item, ext):
if selectItem(item, ext):
self.selectionModel.select(
item.index(), QtCore.QItemSelectionModel.Select
)
selected = self.selectionModel.isSelected(item.index())
if selected:
FreeCADGui.Selection.addSelection(ext.base, ext.face)
ext.select()
else:
ext.deselect()
if self.form.showExtensions.isChecked() or selected:
ext.show()
else:
ext.hide()
self.forAllItemsCall(setSelectionVisuals)
def selectionChanged(self):
Path.Log.debug("selectionChanged()")
self.restoreSelection([])
def extensionsClear(self):
Path.Log.debug("extensionsClear()")
def disableItem(item, ext):
item.setCheckState(QtCore.Qt.Unchecked)
ext.disable()
self.forAllItemsCall(disableItem)
self.setDirty()
def _extensionsSetState(self, state):
Path.Log.debug("_extensionsSetState()")
Path.Log.track(state)
for index in self.selectionModel.selectedIndexes():
item = self.model.itemFromIndex(index)
ext = item.data(self.DataObject)
if ext.edge:
item.setCheckState(state)
ext.enable(state == QtCore.Qt.Checked)
self.setDirty()
def extensionsDisable(self):
self._extensionsSetState(QtCore.Qt.Unchecked)
def extensionsEnable(self):
self._extensionsSetState(QtCore.Qt.Checked)
def updateItemEnabled(self, item):
Path.Log.track(item)
ext = item.data(self.DataObject)
if item.checkState() == QtCore.Qt.Checked:
ext.enable()
else:
ext.disable()
self.updateProxyExtensions(self.obj)
self.setDirty()
def showHideExtension(self):
if self.form.showExtensions.isChecked():
def enableExtensionEdit(item, ext):
ext.show()
self.forAllItemsCall(enableExtensionEdit)
else:
def disableExtensionEdit(item, ext):
if not self.selectionModel.isSelected(item.index()):
ext.hide()
self.forAllItemsCall(disableExtensionEdit)
# self.setDirty()
def toggleExtensionCorners(self):
Path.Log.debug("toggleExtensionCorners()")
Path.Log.track()
self.extensionsReady = False
extensions = FeatureExtensions.getExtensions(self.obj)
self.setExtensions(extensions)
self.selectionChanged()
self.setDirty()
def getSignalsForUpdate(self, obj):
Path.Log.track(obj.Label)
signals = []
signals.append(self.form.defaultLength.editingFinished)
signals.append(self.form.enableExtensions.toggled)
return signals
def registerSignalHandlers(self, obj):
self.form.showExtensions.clicked.connect(self.showHideExtension)
self.form.extendCorners.clicked.connect(self.toggleExtensionCorners)
self.form.buttonClear.clicked.connect(self.extensionsClear)
self.form.buttonDisable.clicked.connect(self.extensionsDisable)
self.form.buttonEnable.clicked.connect(self.extensionsEnable)
self.form.enableExtensions.toggled.connect(self._enableExtensions)
self.form.defaultLength.editingFinished.connect(self._applyDefaultLengthChange)
self.model.itemChanged.connect(self.updateItemEnabled)
self.selectionModel = self.form.extensionTree.selectionModel()
self.selectionModel.selectionChanged.connect(self.selectionChanged)
self.selectionChanged()
# Support methods
def _getUseOutlineState(self):
"""_getUseOutlineState() ...
This method locates the `useOutline` form checkbox in the `Operation` tab,
and saves that reference to self.useOutlineInput. If found, then the boolean
value of the checkbox is saved to self.useOutline.
"""
if self.useOutlineCheckbox:
self.useOutline = self.useOutlineCheckbox.isChecked()
if hasattr(self, "parent"):
parent = getattr(self, "parent")
if parent and hasattr(parent, "featurePages"):
for page in parent.featurePages:
if hasattr(page, "panelTitle"):
if page.panelTitle == "Operation" and hasattr(
page.form, "useOutline"
):
Path.Log.debug("Found useOutline checkbox")
self.useOutlineCheckbox = page.form.useOutline
if page.form.useOutline.isChecked():
self.useOutline = 1
return
else:
self.useOutline = 0
return
self.useOutline = -1
# Methods for enable and disablement of Extensions feature
def _autoEnableExtensions(self):
"""_autoEnableExtensions() ...
This method is called to determine if the Extensions feature should be enabled,
or auto disabled due to total edge count of selected faces.
The auto enable/disable feature is designed to allow quicker access
to operations that implement the Extensions feature when selected faces contain
large numbers of edges, which require long computation times for preparation.
The return value is a simple boolean to communicate whether or not Extensions
are be enabled.
"""
enabled = False
if self.form.enableExtensions.isChecked():
enabled = True
Path.Log.debug("_autoEnableExtensions() is {}".format(enabled))
self.enabled = enabled
def _enableExtensions(self):
"""_enableExtensions() ...
This method is called when the enableExtensions push button is toggled.
This method manages the enabled or disabled state of the extensionsEdit
Task Panel input group.
"""
Path.Log.debug("_enableExtensions()")
if self.form.enableExtensions.isChecked():
self.enabled = True
self.extensionsReady = False
self.form.extensionEdit.setEnabled(True)
self.extensions = FeatureExtensions.getExtensions(self.obj)
self.setExtensions(self.extensions)
else:
self.form.extensionEdit.setDisabled(True)
self.enabled = False
def _includeEdgesAndWires(self):
"""_includeEdgesAndWires() ...
This method is called when the includeEdges push button is toggled.
This method manages the state of the button and the message thereof.
"""
self._getUseOutlineState() # Find `useOutline` checkbox and get its boolean value
Path.Log.debug("_includeEdgesAndWires()")
self.extensionsReady = False
self._enableExtensions()
# Methods for creating and managing cached extensions
def _cachedExtension(self, obj, base, sub, label):
"""_cachedExtension(obj, base, sub, label)...
This method creates a new _Extension object if none is found within
the extensionCache dictionary."""
if label:
cacheLabel = base.Name + "_" + sub + "_" + label
else:
cacheLabel = base.Name + "_" + sub + "_None"
if cacheLabel in self.extensionsCache:
# Path.Log.debug("return _cachedExtension({})".format(cacheLabel))
return self.extensionsCache[cacheLabel]
else:
# Path.Log.debug("_cachedExtension({}) created".format(cacheLabel))
_ext = _Extension(obj, base, sub, label)
self.extensionsCache[cacheLabel] = _ext # cache the extension
return _ext
def _resetCachedExtensions(self):
Path.Log.debug("_resetCachedExtensions()")
reset = dict()
self.extensionsCache = reset
self.extensionsReady = False
# Eclass
FreeCAD.Console.PrintLog("Loading PathFeatureExtensionsGui... done\n")
|
src | qidenticon | ###
# qidenticon.py is Licesensed under FreeBSD License.
# (http://www.freebsd.org/copyright/freebsd-license.html)
#
# Copyright 1994-2009 Shin Adachi. All rights reserved.
# Copyright 2013 "Sendiulo". All rights reserved.
# Copyright 2018-2021 The Bitmessage Developers. All rights reserved.
#
# Redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###
# pylint: disable=too-many-locals,too-many-arguments,too-many-function-args
"""
Usage
-----
>>> import qidenticon
>>> qidenticon.render_identicon(code, size)
Returns an instance of :class:`QPixmap` which have generated identicon image.
``size`` specifies `patch size`. Generated image size is 3 * ``size``.
"""
from six.moves import range
try:
from PyQt5 import QtCore, QtGui
except (ImportError, RuntimeError):
from PyQt4 import QtCore, QtGui
class IdenticonRendererBase(object):
"""Encapsulate methods around rendering identicons"""
PATH_SET = []
def __init__(self, code):
"""
:param code: code for icon
"""
if not isinstance(code, int):
code = int(code)
self.code = code
def render(self, size, twoColor, opacity, penwidth):
"""
render identicon to QPixmap
:param size: identicon patchsize. (image size is 3 * [size])
:returns: :class:`QPixmap`
"""
# decode the code
middle, corner, side, foreColor, secondColor, swap_cross = self.decode(
self.code, twoColor
)
# make image
image = QtGui.QPixmap(QtCore.QSize(size * 3 + penwidth, size * 3 + penwidth))
# fill background
backColor = QtGui.QColor(255, 255, 255, opacity)
image.fill(backColor)
kwds = {
"image": image,
"size": size,
"foreColor": foreColor if swap_cross else secondColor,
"penwidth": penwidth,
"backColor": backColor,
}
# middle patch
image = self.drawPatchQt((1, 1), middle[2], middle[1], middle[0], **kwds)
# side patch
kwds["foreColor"] = foreColor
kwds["patch_type"] = side[0]
for i in range(4):
pos = [(1, 0), (2, 1), (1, 2), (0, 1)][i]
image = self.drawPatchQt(pos, side[2] + 1 + i, side[1], **kwds)
# corner patch
kwds["foreColor"] = secondColor
kwds["patch_type"] = corner[0]
for i in range(4):
pos = [(0, 0), (2, 0), (2, 2), (0, 2)][i]
image = self.drawPatchQt(pos, corner[2] + 1 + i, corner[1], **kwds)
return image
def drawPatchQt(
self, pos, turn, invert, patch_type, image, size, foreColor, backColor, penwidth
): # pylint: disable=unused-argument
"""
:param size: patch size
"""
path = self.PATH_SET[patch_type]
if not path:
# blank patch
invert = not invert
path = [(0.0, 0.0), (1.0, 0.0), (1.0, 1.0), (0.0, 1.0), (0.0, 0.0)]
polygon = QtGui.QPolygonF([QtCore.QPointF(x * size, y * size) for x, y in path])
rot = turn % 4
rect = [
QtCore.QPointF(0.0, 0.0),
QtCore.QPointF(size, 0.0),
QtCore.QPointF(size, size),
QtCore.QPointF(0.0, size),
]
rotation = [0, 90, 180, 270]
nopen = QtGui.QPen(foreColor, QtCore.Qt.NoPen)
foreBrush = QtGui.QBrush(foreColor, QtCore.Qt.SolidPattern)
if penwidth > 0:
pen_color = QtGui.QColor(255, 255, 255)
pen = QtGui.QPen(pen_color, QtCore.Qt.SolidPattern)
pen.setWidth(penwidth)
painter = QtGui.QPainter()
painter.begin(image)
painter.setPen(nopen)
painter.translate(pos[0] * size + penwidth / 2, pos[1] * size + penwidth / 2)
painter.translate(rect[rot])
painter.rotate(rotation[rot])
if invert:
# subtract the actual polygon from a rectangle to invert it
poly_rect = QtGui.QPolygonF(rect)
polygon = poly_rect.subtracted(polygon)
painter.setBrush(foreBrush)
if penwidth > 0:
# draw the borders
painter.setPen(pen)
painter.drawPolygon(polygon, QtCore.Qt.WindingFill)
# draw the fill
painter.setPen(nopen)
painter.drawPolygon(polygon, QtCore.Qt.WindingFill)
painter.end()
return image
def decode(self, code, twoColor):
"""virtual functions"""
raise NotImplementedError
class DonRenderer(IdenticonRendererBase):
"""
Don Park's implementation of identicon, see:
https://blog.docuverse.com/2007/01/18/identicon-updated-and-source-released
"""
PATH_SET = [
# [0] full square:
[(0, 0), (4, 0), (4, 4), (0, 4)],
# [1] right-angled triangle pointing top-left:
[(0, 0), (4, 0), (0, 4)],
# [2] upwardy triangle:
[(2, 0), (4, 4), (0, 4)],
# [3] left half of square, standing rectangle:
[(0, 0), (2, 0), (2, 4), (0, 4)],
# [4] square standing on diagonale:
[(2, 0), (4, 2), (2, 4), (0, 2)],
# [5] kite pointing topleft:
[(0, 0), (4, 2), (4, 4), (2, 4)],
# [6] Sierpinski triangle, fractal triangles:
[(2, 0), (4, 4), (2, 4), (3, 2), (1, 2), (2, 4), (0, 4)],
# [7] sharp angled lefttop pointing triangle:
[(0, 0), (4, 2), (2, 4)],
# [8] small centered square:
[(1, 1), (3, 1), (3, 3), (1, 3)],
# [9] two small triangles:
[(2, 0), (4, 0), (0, 4), (0, 2), (2, 2)],
# [10] small topleft square:
[(0, 0), (2, 0), (2, 2), (0, 2)],
# [11] downpointing right-angled triangle on bottom:
[(0, 2), (4, 2), (2, 4)],
# [12] uppointing right-angled triangle on bottom:
[(2, 2), (4, 4), (0, 4)],
# [13] small rightbottom pointing right-angled triangle on topleft:
[(2, 0), (2, 2), (0, 2)],
# [14] small lefttop pointing right-angled triangle on topleft:
[(0, 0), (2, 0), (0, 2)],
# [15] empty:
[],
]
# get the [0] full square, [4] square standing on diagonale,
# [8] small centered square, or [15] empty tile:
MIDDLE_PATCH_SET = [0, 4, 8, 15]
# modify path set
for idx, path in enumerate(PATH_SET):
if path:
p = [(vec[0] / 4.0, vec[1] / 4.0) for vec in path]
PATH_SET[idx] = p + p[:1]
def decode(self, code, twoColor):
"""decode the code"""
shift = 0
middleType = (code >> shift) & 0x03
shift += 2
middleInvert = (code >> shift) & 0x01
shift += 1
cornerType = (code >> shift) & 0x0F
shift += 4
cornerInvert = (code >> shift) & 0x01
shift += 1
cornerTurn = (code >> shift) & 0x03
shift += 2
sideType = (code >> shift) & 0x0F
shift += 4
sideInvert = (code >> shift) & 0x01
shift += 1
sideTurn = (code >> shift) & 0x03
shift += 2
blue = (code >> shift) & 0x1F
shift += 5
green = (code >> shift) & 0x1F
shift += 5
red = (code >> shift) & 0x1F
shift += 5
second_blue = (code >> shift) & 0x1F
shift += 5
second_green = (code >> shift) & 0x1F
shift += 5
second_red = (code >> shift) & 0x1F
shift += 1
swap_cross = (code >> shift) & 0x01
middleType = self.MIDDLE_PATCH_SET[middleType]
foreColor = (red << 3, green << 3, blue << 3)
foreColor = QtGui.QColor(*foreColor)
if twoColor:
secondColor = (second_blue << 3, second_green << 3, second_red << 3)
secondColor = QtGui.QColor(*secondColor)
else:
secondColor = foreColor
return (
(middleType, middleInvert, 0),
(cornerType, cornerInvert, cornerTurn),
(sideType, sideInvert, sideTurn),
foreColor,
secondColor,
swap_cross,
)
def render_identicon(
code, size, twoColor=False, opacity=255, penwidth=0, renderer=None
):
"""Render an image"""
if not renderer:
renderer = DonRenderer
return renderer(code).render(size, twoColor, opacity, penwidth)
|
Draft | Init | # ***************************************************************************
# * Copyright (c) 2009 Yorik van Havre <yorik@uncreated.net> *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
"""Initialization file of the workbench, non-GUI."""
import FreeCAD as App
# add Import/Export types
App.addImportType("Autodesk DXF 2D (*.dxf)", "importDXF")
App.addImportType("SVG as geometry (*.svg)", "importSVG")
App.addImportType("Open CAD Format (*.oca *.gcad)", "importOCA")
App.addImportType("Common airfoil data (*.dat)", "importAirfoilDAT")
App.addExportType("Autodesk DXF 2D (*.dxf)", "importDXF")
App.addExportType("Flattened SVG (*.svg)", "importSVG")
App.addExportType("Open CAD Format (*.oca)", "importOCA")
App.addImportType("Autodesk DWG 2D (*.dwg)", "importDWG")
App.addExportType("Autodesk DWG 2D (*.dwg)", "importDWG")
App.__unit_test__ += ["TestDraft"]
|
ui | diagrampage | import functools
import importlib
import logging
from typing import Optional
from gaphas.guide import GuidePainter
from gaphas.painter import FreeHandPainter, HandlePainter, PainterChain
from gaphas.segment import LineSegmentPainter
from gaphas.tool.rubberband import RubberbandPainter, RubberbandState
from gaphas.view import GtkView
from gaphor.core import event_handler, gettext
from gaphor.core.modeling import StyleSheet
from gaphor.core.modeling.diagram import Diagram, StyledDiagram
from gaphor.core.modeling.event import AttributeUpdated, ElementDeleted
from gaphor.diagram.diagramtoolbox import get_tool_def, tooliter
from gaphor.diagram.painter import DiagramTypePainter, ItemPainter
from gaphor.diagram.selection import Selection
from gaphor.diagram.tools import (
apply_default_tool_set,
apply_magnet_tool_set,
apply_placement_tool_set,
)
from gaphor.diagram.tools.magnet import MagnetPainter
from gaphor.ui.event import DiagramClosed, DiagramSelectionChanged, ToolSelected
from gi.repository import Adw, Gdk, GdkPixbuf, Gtk
log = logging.getLogger(__name__)
@functools.lru_cache(maxsize=1)
def placement_icon_base():
f = importlib.resources.files("gaphor") / "ui" / "placement-icon-base.png"
return GdkPixbuf.Pixbuf.new_from_file_at_scale(str(f), 64, 64, True)
if hasattr(GtkView, "set_css_name"):
GtkView.set_css_name("diagramview")
@functools.cache
def get_placement_icon(display, icon_name):
if display is None:
display = Gdk.Display.get_default()
pixbuf = placement_icon_base().copy()
theme_icon = Gtk.IconTheme.get_for_display(display).lookup_icon(
icon_name,
None,
24,
1,
Gtk.TextDirection.NONE,
Gtk.IconLookupFlags.FORCE_SYMBOLIC,
)
icon = GdkPixbuf.Pixbuf.new_from_file_at_scale(
theme_icon.get_file().get_path(), 32, 32, True
)
icon.copy_area(
0,
0,
icon.get_width(),
icon.get_height(),
pixbuf,
9,
15,
)
return Gdk.Texture.new_for_pixbuf(pixbuf)
def get_placement_cursor(display, icon_name):
return Gdk.Cursor.new_from_texture(get_placement_icon(display, icon_name), 1, 1)
class DiagramPage:
def __init__(self, diagram, event_manager, modeling_language):
self.event_manager = event_manager
self.diagram = diagram
self.modeling_language = modeling_language
self.style_manager = Adw.StyleManager.get_default()
self.view: Optional[GtkView] = None
self.widget: Optional[Gtk.Widget] = None
self.diagram_css: Optional[Gtk.CssProvider] = None
self.rubberband_state = RubberbandState()
self._notify_dark_id = self.style_manager.connect(
"notify::dark", self._on_notify_dark
)
self.event_manager.subscribe(self._on_element_delete)
self.event_manager.subscribe(self._on_attribute_updated)
self.event_manager.subscribe(self._on_tool_selected)
title = property(lambda s: s.diagram and s.diagram.name or gettext("<None>"))
def get_diagram(self):
return self.diagram
def get_view(self):
return self.view
def construct(self):
"""Create the widget.
Returns: the newly created widget.
"""
assert self.diagram
view = GtkView(selection=Selection())
view.add_css_class(self._css_class())
self.diagram_css = Gtk.CssProvider.new()
Gtk.StyleContext.add_provider_for_display(
Gdk.Display.get_default(),
self.diagram_css,
Gtk.STYLE_PROVIDER_PRIORITY_USER,
)
scrolled_window = Gtk.ScrolledWindow()
scrolled_window.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
scrolled_window.set_child(view)
view.selection.add_handler(self._on_view_selection_changed)
self.view = view
self.widget = scrolled_window
self.select_tool("toolbox-pointer")
self.update_drawing_style()
# Set model only after the painters are set
view.model = self.diagram
return self.widget
def apply_tool_set(self, tool_name):
"""Return a tool associated with an id (action name)."""
if tool_name == "toolbox-pointer":
return apply_default_tool_set(
self.view,
self.modeling_language,
self.event_manager,
self.rubberband_state,
)
elif tool_name == "toolbox-magnet":
return apply_magnet_tool_set(
self.view,
self.modeling_language,
self.event_manager,
)
tool_def = get_tool_def(self.modeling_language, tool_name)
item_factory = tool_def.item_factory
handle_index = tool_def.handle_index
return apply_placement_tool_set(
self.view,
item_factory=item_factory,
modeling_language=self.modeling_language,
event_manager=self.event_manager,
handle_index=handle_index,
)
def get_tool_icon_name(self, tool_name):
if tool_name == "toolbox-pointer":
return None
return next(
t
for t in tooliter(self.modeling_language.toolbox_definition)
if t.id == tool_name
).icon_name
def _css_class(self):
return f"diagram-{id(self)}"
@event_handler(ToolSelected)
def _on_tool_selected(self, event: ToolSelected):
self.select_tool(event.tool_name)
@event_handler(ElementDeleted)
def _on_element_delete(self, event: ElementDeleted):
if event.element is self.diagram:
self.event_manager.handle(DiagramClosed(self.diagram))
@event_handler(AttributeUpdated)
def _on_attribute_updated(self, event: AttributeUpdated):
if (
event.property is StyleSheet.styleSheet
or event.property is StyleSheet.naturalLanguage
):
self.update_drawing_style()
diagram = self.diagram
for item in diagram.get_all_items():
diagram.request_update(item)
elif event.property is Diagram.name and self.view:
self.view.update_back_buffer()
def _on_notify_dark(self, style_manager, gparam):
self.update_drawing_style()
def close(self):
"""Tab is destroyed.
Do the same thing that would be done if Close was pressed.
"""
assert self.widget
if self._notify_dark_id:
self._notify_dark_id = self.style_manager.disconnect(self._notify_dark_id)
Gtk.StyleContext.remove_provider_for_display(
Gdk.Display.get_default(),
self.diagram_css,
)
self.event_manager.unsubscribe(self._on_element_delete)
self.event_manager.unsubscribe(self._on_attribute_updated)
self.event_manager.unsubscribe(self._on_tool_selected)
self.view = None
def select_tool(self, tool_name: str):
if not self.view:
return
self.apply_tool_set(tool_name)
if icon_name := self.get_tool_icon_name(tool_name):
self.view.set_cursor(get_placement_cursor(None, icon_name))
else:
self.view.set_cursor(None)
def update_drawing_style(self):
"""Set the drawing style for the diagram based on the active style
sheet."""
assert self.view
assert self.diagram_css
dark_mode = self.style_manager.get_dark()
style = self.diagram.style(StyledDiagram(self.diagram, dark_mode=dark_mode))
bg = style.get("background-color", (0.0, 0.0, 0.0, 0.0))
# TODO: Temporary, until this is supported by PyGObject
if (Gtk.get_major_version(), Gtk.get_minor_version()) > (4, 8):
self.diagram_css.load_from_data(
f".{self._css_class()} {{ background-color: rgba({int(255*bg[0])}, {int(255*bg[1])}, {int(255*bg[2])}, {bg[3]}); }}",
-1,
)
else:
self.diagram_css.load_from_data(
f".{self._css_class()} {{ background-color: rgba({int(255*bg[0])}, {int(255*bg[1])}, {int(255*bg[2])}, {bg[3]}); }}".encode()
)
view = self.view
item_painter = ItemPainter(view.selection, dark_mode)
if sloppiness := style.get("line-style", 0.0):
item_painter = FreeHandPainter(item_painter, sloppiness=sloppiness)
view.bounding_box_painter = item_painter
view.painter = (
PainterChain()
.append(item_painter)
.append(HandlePainter(view))
.append(LineSegmentPainter(view.selection))
.append(GuidePainter(view))
.append(MagnetPainter(view))
.append(RubberbandPainter(self.rubberband_state))
.append(DiagramTypePainter(self.diagram))
)
view.request_update(self.diagram.get_all_items())
def _on_view_selection_changed(self, item):
view = self.view
if not view:
return
selection = view.selection
self.event_manager.handle(
DiagramSelectionChanged(
view, selection.focused_item, selection.selected_items
)
)
|
options | tags | # -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2006-2007, 2011 Lukáš Lalinský
# Copyright (C) 2009 Nikolai Prokoschenko
# Copyright (C) 2009-2010, 2018-2021 Philipp Wolfer
# Copyright (C) 2012 Erik Wasser
# Copyright (C) 2012 Johannes Weißl
# Copyright (C) 2012-2013 Michael Wiencek
# Copyright (C) 2013, 2017 Sophist-UK
# Copyright (C) 2016-2017 Sambhav Kothari
# Copyright (C) 2017-2018, 2020-2022 Laurent Monin
# Copyright (C) 2022 Marcin Szalowicz
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from picard.config import BoolOption, ListOption, get_config
from picard.ui.options import OptionsPage, register_options_page
from picard.ui.ui_options_tags import Ui_TagsOptionsPage
class TagsOptionsPage(OptionsPage):
NAME = "tags"
TITLE = N_("Tags")
PARENT = None
SORT_ORDER = 30
ACTIVE = True
HELP_URL = "/config/options_tags.html"
options = [
BoolOption("setting", "dont_write_tags", False),
BoolOption("setting", "preserve_timestamps", False),
BoolOption("setting", "clear_existing_tags", False),
BoolOption("setting", "preserve_images", False),
BoolOption("setting", "remove_id3_from_flac", False),
BoolOption("setting", "remove_ape_from_mp3", False),
BoolOption("setting", "fix_missing_seekpoints_flac", False),
ListOption("setting", "preserved_tags", []),
]
def __init__(self, parent=None):
super().__init__(parent)
self.ui = Ui_TagsOptionsPage()
self.ui.setupUi(self)
def load(self):
config = get_config()
self.ui.write_tags.setChecked(not config.setting["dont_write_tags"])
self.ui.preserve_timestamps.setChecked(config.setting["preserve_timestamps"])
self.ui.clear_existing_tags.setChecked(config.setting["clear_existing_tags"])
self.ui.preserve_images.setChecked(config.setting["preserve_images"])
self.ui.remove_ape_from_mp3.setChecked(config.setting["remove_ape_from_mp3"])
self.ui.remove_id3_from_flac.setChecked(config.setting["remove_id3_from_flac"])
self.ui.fix_missing_seekpoints_flac.setChecked(
config.setting["fix_missing_seekpoints_flac"]
)
self.ui.preserved_tags.update(config.setting["preserved_tags"])
self.ui.preserved_tags.set_user_sortable(False)
def save(self):
config = get_config()
config.setting["dont_write_tags"] = not self.ui.write_tags.isChecked()
config.setting["preserve_timestamps"] = self.ui.preserve_timestamps.isChecked()
clear_existing_tags = self.ui.clear_existing_tags.isChecked()
if clear_existing_tags != config.setting["clear_existing_tags"]:
config.setting["clear_existing_tags"] = clear_existing_tags
self.tagger.window.metadata_box.update()
config.setting["preserve_images"] = self.ui.preserve_images.isChecked()
config.setting["remove_ape_from_mp3"] = self.ui.remove_ape_from_mp3.isChecked()
config.setting[
"remove_id3_from_flac"
] = self.ui.remove_id3_from_flac.isChecked()
config.setting[
"fix_missing_seekpoints_flac"
] = self.ui.fix_missing_seekpoints_flac.isChecked()
config.setting["preserved_tags"] = list(self.ui.preserved_tags.tags)
self.tagger.window.enable_tag_saving_action.setChecked(
not config.setting["dont_write_tags"]
)
register_options_page(TagsOptionsPage)
|
PyObjCTest | test_cgcolor | from PyObjCTools.TestSupport import *
from Quartz.CoreGraphics import *
try:
long
except NameError:
long = int
try:
unicode
except NameError:
unicode = str
class TestCGColor(TestCase):
def testTypes(self):
self.assertIsCFType(CGColorRef)
@min_os_level("10.5")
def testFunctions10_5(self):
self.assertResultIsCFRetained(CGColorCreateGenericGray)
color = CGColorCreateGenericGray(0.75, 0.8)
self.assertIsInstance(color, CGColorRef)
self.assertResultIsCFRetained(CGColorCreateGenericRGB)
color = CGColorCreateGenericRGB(0.75, 0.8, 1.0, 0.5)
self.assertIsInstance(color, CGColorRef)
self.assertResultIsCFRetained(CGColorCreateGenericCMYK)
color = CGColorCreateGenericCMYK(0.75, 0.8, 0.5, 1.0, 0.5)
self.assertIsInstance(color, CGColorRef)
color = CGColorGetConstantColor(kCGColorWhite)
self.assertIsInstance(color, CGColorRef)
def testFunctions(self):
self.assertResultIsCFRetained(CGColorCreate)
color = CGColorCreate(CGColorSpaceCreateDeviceRGB(), [1.0, 0.5, 0.5])
self.assertIsInstance(color, CGColorRef)
self.assertResultIsCFRetained(CGColorCreateCopy)
v = CGColorCreateCopy(color)
self.assertIsInstance(v, CGColorRef)
self.assertResultIsCFRetained(CGColorCreateCopyWithAlpha)
v = CGColorCreateCopyWithAlpha(color, 0.7)
self.assertIsInstance(v, CGColorRef)
CGColorRetain(color)
CGColorRelease(color)
self.assertResultHasType(CGColorEqualToColor, objc._C_BOOL)
self.assertTrue(CGColorEqualToColor(color, color) is True)
self.assertTrue(CGColorEqualToColor(color, v) is False)
self.assertEqual(CGColorGetNumberOfComponents(color), 4)
v = CGColorGetComponents(color)
self.assertIsInstance(v, objc.varlist)
self.assertIsInstance(v[0], float)
v = CGColorGetAlpha(color)
self.assertIsInstance(v, float)
v = CGColorGetColorSpace(color)
self.assertIsInstance(v, CGColorSpaceRef)
v = CGColorGetPattern(color)
self.assertTrue(v is None)
self.assertIsInstance(CGColorGetTypeID(), (int, long))
# CGColorCreateWithPattern, CGColorGetPattern: tested in test_cgpattern
@min_os_level("10.5")
def testConstants(self):
self.assertIsInstance(kCGColorWhite, unicode)
self.assertIsInstance(kCGColorBlack, unicode)
self.assertIsInstance(kCGColorClear, unicode)
if __name__ == "__main__":
main()
|
beetsplug | export | # This file is part of beets.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Exports data from beets
"""
import codecs
import csv
import json
import sys
from datetime import date, datetime
from xml.etree import ElementTree
import mediafile
from beets import ui, util
from beets.plugins import BeetsPlugin
from beetsplug.info import library_data, tag_data
class ExportEncoder(json.JSONEncoder):
"""Deals with dates because JSON doesn't have a standard"""
def default(self, o):
if isinstance(o, (datetime, date)):
return o.isoformat()
return json.JSONEncoder.default(self, o)
class ExportPlugin(BeetsPlugin):
def __init__(self):
super().__init__()
self.config.add(
{
"default_format": "json",
"json": {
# JSON module formatting options.
"formatting": {
"ensure_ascii": False,
"indent": 4,
"separators": (",", ": "),
"sort_keys": True,
}
},
"jsonlines": {
# JSON Lines formatting options.
"formatting": {
"ensure_ascii": False,
"separators": (",", ": "),
"sort_keys": True,
}
},
"csv": {
# CSV module formatting options.
"formatting": {
# The delimiter used to separate columns.
"delimiter": ",",
# The dialect to use when formatting the file output.
"dialect": "excel",
}
},
"xml": {
# XML module formatting options.
"formatting": {}
},
# TODO: Use something like the edit plugin
# 'item_fields': []
}
)
def commands(self):
cmd = ui.Subcommand("export", help="export data from beets")
cmd.func = self.run
cmd.parser.add_option(
"-l",
"--library",
action="store_true",
help="show library fields instead of tags",
)
cmd.parser.add_option(
"-a",
"--album",
action="store_true",
help='show album fields instead of tracks (implies "--library")',
)
cmd.parser.add_option(
"--append",
action="store_true",
default=False,
help="if should append data to the file",
)
cmd.parser.add_option(
"-i",
"--include-keys",
default=[],
action="append",
dest="included_keys",
help="comma separated list of keys to show",
)
cmd.parser.add_option(
"-o",
"--output",
help="path for the output file. If not given, will print the data",
)
cmd.parser.add_option(
"-f",
"--format",
default="json",
help="the output format: json (default), jsonlines, csv, or xml",
)
return [cmd]
def run(self, lib, opts, args):
file_path = opts.output
file_mode = "a" if opts.append else "w"
file_format = opts.format or self.config["default_format"].get(str)
file_format_is_line_based = file_format == "jsonlines"
format_options = self.config[file_format]["formatting"].get(dict)
export_format = ExportFormat.factory(
file_type=file_format, **{"file_path": file_path, "file_mode": file_mode}
)
if opts.library or opts.album:
data_collector = library_data
else:
data_collector = tag_data
included_keys = []
for keys in opts.included_keys:
included_keys.extend(keys.split(","))
items = []
for data_emitter in data_collector(
lib,
ui.decargs(args),
album=opts.album,
):
try:
data, item = data_emitter(included_keys or "*")
except (mediafile.UnreadableFileError, OSError) as ex:
self._log.error("cannot read file: {0}", ex)
continue
for key, value in data.items():
if isinstance(value, bytes):
data[key] = util.displayable_path(value)
if file_format_is_line_based:
export_format.export(data, **format_options)
else:
items += [data]
if not file_format_is_line_based:
export_format.export(items, **format_options)
class ExportFormat:
"""The output format type"""
def __init__(self, file_path, file_mode="w", encoding="utf-8"):
self.path = file_path
self.mode = file_mode
self.encoding = encoding
# creates a file object to write/append or sets to stdout
self.out_stream = (
codecs.open(self.path, self.mode, self.encoding)
if self.path
else sys.stdout
)
@classmethod
def factory(cls, file_type, **kwargs):
if file_type in ["json", "jsonlines"]:
return JsonFormat(**kwargs)
elif file_type == "csv":
return CSVFormat(**kwargs)
elif file_type == "xml":
return XMLFormat(**kwargs)
else:
raise NotImplementedError()
def export(self, data, **kwargs):
raise NotImplementedError()
class JsonFormat(ExportFormat):
"""Saves in a json file"""
def __init__(self, file_path, file_mode="w", encoding="utf-8"):
super().__init__(file_path, file_mode, encoding)
def export(self, data, **kwargs):
json.dump(data, self.out_stream, cls=ExportEncoder, **kwargs)
self.out_stream.write("\n")
class CSVFormat(ExportFormat):
"""Saves in a csv file"""
def __init__(self, file_path, file_mode="w", encoding="utf-8"):
super().__init__(file_path, file_mode, encoding)
def export(self, data, **kwargs):
header = list(data[0].keys()) if data else []
writer = csv.DictWriter(self.out_stream, fieldnames=header, **kwargs)
writer.writeheader()
writer.writerows(data)
class XMLFormat(ExportFormat):
"""Saves in a xml file"""
def __init__(self, file_path, file_mode="w", encoding="utf-8"):
super().__init__(file_path, file_mode, encoding)
def export(self, data, **kwargs):
# Creates the XML file structure.
library = ElementTree.Element("library")
tracks = ElementTree.SubElement(library, "tracks")
if data and isinstance(data[0], dict):
for index, item in enumerate(data):
track = ElementTree.SubElement(tracks, "track")
for key, value in item.items():
track_details = ElementTree.SubElement(track, key)
track_details.text = value
# Depending on the version of python the encoding needs to change
try:
data = ElementTree.tostring(library, encoding="unicode", **kwargs)
except LookupError:
data = ElementTree.tostring(library, encoding="utf-8", **kwargs)
self.out_stream.write(data)
|
fta | topevent | """Top Event item definition."""
from gaphas.geometry import Rectangle
from gaphor.core.modeling import DrawContext
from gaphor.diagram.presentation import (
Classified,
ElementPresentation,
from_package_str,
)
from gaphor.diagram.shapes import Box, Text, draw_border
from gaphor.diagram.support import represents
from gaphor.diagram.text import FontStyle, FontWeight
from gaphor.RAAML import raaml
from gaphor.UML.recipes import stereotypes_str
@represents(raaml.TopEvent)
class TopEventItem(Classified, ElementPresentation):
def __init__(self, diagram, id=None):
super().__init__(diagram, id)
self.watch("subject[NamedElement].name").watch(
"subject[NamedElement].namespace.name"
)
def update_shapes(self, event=None):
self.shape = Box(
Box(
Text(
text=lambda: stereotypes_str(
self.subject, [self.diagram.gettext("Top Event")]
),
),
Text(
text=lambda: self.subject.name or "",
width=lambda: self.width - 4,
style={
"font-weight": FontWeight.BOLD,
"font-style": FontStyle.NORMAL,
},
),
Text(
text=lambda: from_package_str(self),
style={"font-size": "x-small"},
),
style={"padding": (12, 4, 12, 4)},
),
draw=draw_top_event,
)
def draw_top_event(box, context: DrawContext, bounding_box: Rectangle):
draw_border(box, context, bounding_box)
|
extractors | tumblr | #!/usr/bin/env python
__all__ = ["tumblr_download"]
from ..common import *
from .dailymotion import dailymotion_download
from .universal import *
from .vimeo import vimeo_download
from .vine import vine_download
def tumblr_download(url, output_dir=".", merge=True, info_only=False, **kwargs):
if re.match(r"https?://\d+\.media\.tumblr\.com/", url):
universal_download(url, output_dir, merge=merge, info_only=info_only)
return
import ssl
ssl_context = request.HTTPSHandler(
context=ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
) # server requires TLS v1.2
cookie_handler = request.HTTPCookieProcessor()
opener = request.build_opener(ssl_context, cookie_handler)
request.install_opener(opener)
page = get_html(url)
form_key = match1(page, r'id="tumblr_form_key" content="([^"]+)"')
if form_key is not None:
# bypass GDPR consent page
referer = (
"https://www.tumblr.com/privacy/consent?redirect=%s" % parse.quote_plus(url)
)
post_content(
"https://www.tumblr.com/svc/privacy/consent",
headers={
"Content-Type": "application/json",
"User-Agent": fake_headers["User-Agent"],
"Referer": referer,
"X-tumblr-form-key": form_key,
"X-Requested-With": "XMLHttpRequest",
},
post_data_raw='{"eu_resident":true,"gdpr_is_acceptable_age":true,"gdpr_consent_core":true,"gdpr_consent_first_party_ads":true,"gdpr_consent_third_party_ads":true,"gdpr_consent_search_history":true,"redirect_to":"%s","gdpr_reconsent":false}'
% url,
)
page = get_html(url, faker=True)
html = parse.unquote(page).replace("\/", "/")
feed = r1(r'<meta property="og:type" content="tumblr-feed:(\w+)" />', html)
if feed in ["photo", "photoset", "entry"] or feed is None:
# try to extract photos
page_title = (
r1(r'<meta name="description" content="([^"\n]+)', html)
or r1(r'<meta property="og:description" content="([^"\n]+)', html)
or r1(r"<title>([^<\n]*)", html)
)
urls = (
re.findall(r'(https?://[^;"&]+/tumblr_[^;"&]+_\d+\.jpg)', html)
+ re.findall(r'(https?://[^;"&]+/tumblr_[^;"&]+_\d+\.png)', html)
+ re.findall(r'(https?://[^;"&]+/tumblr_[^;"&]+_\d+\.gif)', html)
+ re.findall(
r'(https?://\d+\.media\.tumblr\.com/[^;"&]+/s\d+x\d+/[^;"&]+\.jpg)',
html,
)
+ re.findall(
r'(https?://\d+\.media\.tumblr\.com/[^;"&]+/s\d+x\d+/[^;"&]+\.png)',
html,
)
+ re.findall(
r'(https?://\d+\.media\.tumblr\.com/[^;"&]+/s\d+x\d+/[^;"&]+\.gif)',
html,
)
)
tuggles = {}
for url in urls:
if url.endswith(".gif"):
hd_url = url
elif url.endswith(".jpg"):
hd_url = url # FIXME: decide actual quality # r1(r'(.+)_\d+\.jpg$', url) + '_1280.jpg'
elif url.endswith(".png"):
hd_url = url # FIXME: decide actual quality # r1(r'(.+)_\d+\.png$', url) + '_1280.png'
else:
continue
filename = parse.unquote(hd_url.split("/")[-1])
title = ".".join(filename.split(".")[:-1])
tumblr_id = r1(r"^tumblr_(.+)_\d+$", title) or title
try:
quality = int(r1(r"^tumblr_.+_(\d+)$", title))
except:
quality = int(r1(r"/s(\d+)x\d+/", hd_url))
ext = filename.split(".")[-1]
try:
size = int(get_head(hd_url)["Content-Length"])
if tumblr_id not in tuggles or tuggles[tumblr_id]["quality"] < quality:
tuggles[tumblr_id] = {
"title": title,
"url": hd_url,
"quality": quality,
"ext": ext,
"size": size,
}
except:
pass
if tuggles:
size = sum([tuggles[t]["size"] for t in tuggles])
print_info(site_info, page_title, None, size)
if not info_only:
for t in tuggles:
title = tuggles[t]["title"]
ext = tuggles[t]["ext"]
size = tuggles[t]["size"]
url = tuggles[t]["url"]
print_info(site_info, title, ext, size)
download_urls([url], title, ext, size, output_dir=output_dir)
return
# feed == 'audio' or feed == 'video' or feed is None
# try to extract video / audio
real_url = r1(r"source src=\\x22([^\\]+)\\", html)
if not real_url:
real_url = r1(r"audio_file=([^&]+)&", html)
if real_url:
real_url = (
real_url
+ "?plead=please-dont-download-this-or-our-lawyers-wont-let-us-host-audio"
)
if not real_url:
real_url = r1(r'<source src="([^"]*)"', html)
if not real_url:
iframe_url = r1(
r'<[^>]+tumblr_video_container[^>]+><iframe[^>]+src=[\'"]([^\'"]*)[\'"]',
html,
)
if iframe_url is None:
universal_download(
url, output_dir, merge=merge, info_only=info_only, **kwargs
)
return
if iframe_url:
iframe_html = get_content(iframe_url, headers=fake_headers)
real_url = r1(
r'<video[^>]*>[\n ]*<source[^>]+src=[\'"]([^\'"]*)[\'"]', iframe_html
)
else:
iframe_url = r1(r'<iframe[^>]+src=[\'"]([^\'"]*)[\'"]', html)
if iframe_url[:2] == "//":
iframe_url = "http:" + iframe_url
if re.search(r"player\.vimeo\.com", iframe_url):
vimeo_download(
iframe_url,
output_dir,
merge=merge,
info_only=info_only,
referer="http://tumblr.com/",
**kwargs,
)
return
elif re.search(r"dailymotion\.com", iframe_url):
dailymotion_download(
iframe_url, output_dir, merge=merge, info_only=info_only, **kwargs
)
return
elif re.search(r"vine\.co", iframe_url):
vine_download(
iframe_url, output_dir, merge=merge, info_only=info_only, **kwargs
)
return
else:
iframe_html = get_content(iframe_url)
real_url = r1(r'<source src="([^"]*)"', iframe_html)
title = unescape_html(
r1(r'<meta property="og:title" content="([^"]*)" />', html)
or r1(r'<meta property="og:description" content="([^"]*)" />', html)
or r1(r"<title>([^<\n]*)", html)
or url.split("/")[4]
).replace("\n", "")
# this is better
vcode = r1(r"tumblr_(\w+)", real_url)
real_url = "https://vt.media.tumblr.com/tumblr_%s.mp4" % vcode
type, ext, size = url_info(real_url, faker=True)
print_info(site_info, title, type, size)
if not info_only:
download_urls([real_url], title, ext, size, output_dir, merge=merge)
site_info = "Tumblr.com"
download = tumblr_download
download_playlist = playlist_not_supported("tumblr")
|
base | chat_bot | # -*- coding: utf-8 -*-
from threading import Thread
from pyload.core.api import FileDoesNotExists, PackageDoesNotExists
from pyload.core.utils import format
from .addon import BaseAddon
class ChatBot(Thread, BaseAddon):
__name__ = "ChatBot"
__type__ = "addon"
__version__ = "0.02"
__status__ = "testing"
__config__ = [
("enabled", "bool", "Activated", False),
]
__description__ = """Base chat bot plugin"""
__license__ = "GPLv3"
__authors__ = [("GammaC0de", "nitzo2001[AT]yahoo[DOT]com")]
SHORTCUT_COMMANDS = {
"a": "add",
"c": "collector",
"ca": "captcha",
"f": "freespace",
"h": "help",
"i": "info",
"l": "getLog",
"m": "more",
"p": "packinfo",
"q": "queue",
"r": "restart",
"rf": "restartfile",
"rp": "restartpackage",
"s": "status",
}
def __init__(self, *args, **kwargs):
self.max_lines = 256
self.more = []
BaseAddon.__init__(self, *args, **kwargs)
Thread.__init__(self)
self.daemon = True
def init(self):
self.event_map = {
"all_downloads_processed": "all_downloads_processed",
"pyload_updated": "pyload_updated",
}
def all_downloads_processed(self):
pass
def pyload_updated(self, etag):
pass
def activate(self):
Thread.start(self)
def run(self):
raise NotImplementedError
def do_bot_command(self, cmd, args):
cmd = self.SHORTCUT_COMMANDS.get(cmd.lower(), cmd.lower())
handler = getattr(self, "_cmd_{}".format(cmd), self._cmd_error)
return handler(args)
def _cmd_error(self, args):
return [self._("ERROR: invalid command, for a list of commands enter: help")]
def _cmd_status(self, args):
downloads = self.pyload.api.status_downloads()
if not downloads:
return [self._("INFO: There are no active downloads currently.")]
lines = [self._("ID - Name - Status - Speed - ETA - Progress")]
for data in downloads:
if data.status == 5:
temp_progress = data.format_wait
else:
temp_progress = "{}% ({})".format(data.percent, data.format_size)
lines.append(
"#{} - {} - {} - {} - {} - {}".format(
data.fid,
data.name,
data.statusmsg,
"{}".format(format.speed(data.speed)),
"{}".format(data.format_eta),
temp_progress,
)
)
return lines
def _cmd_queue(self, args):
packages = self.pyload.api.get_queue_data()
if not packages:
return [self._("INFO: There are no packages in queue.")]
lines = []
for pack in packages:
lines.append(
'PACKAGE #{}: "{}" with {} links.'.format(
pack.pid, pack.name, len(pack.links)
)
)
return lines
def _cmd_collector(self, args):
packages = self.pyload.api.get_collector_data()
if not packages:
return [self._("INFO: No packages in collector!")]
lines = []
for pack in packages:
lines.append(
'PACKAGE #{}: "{}" with {} links.'.format(
pack.pid, pack.name, len(pack.links)
)
)
return lines
def _cmd_info(self, args):
try:
file_id = int(args[0])
except IndexError:
return [
self._("ERROR: Missing argument"),
self._("Use info command like this: info <link id>"),
]
except ValueError:
return [self._("ERROR: invalid link id {}").format(args[0])]
try:
info = self.pyload.api.get_file_data(int(file_id))
except FileDoesNotExists:
return [self._("ERROR: Link doesn't exists.")]
return [
self._("LINK #{}: {} ({}) [{}][{}]").format(
info.fid, info.name, info.format_size, info.statusmsg, info
)
]
def _cmd_packinfo(self, args):
try:
id_or_name = args[0]
except IndexError:
return [
self._("ERROR: Missing argument"),
self._("ERROR: Use packinfo like this: packinfo <name|id>"),
]
lines = []
pack = self._get_package_by_name_or_id(id_or_name)
if not pack:
return [self._("ERROR: Package doesn't exists.")]
self.more = []
lines.append(
'PACKAGE #{}: "{}" with {} links:'.format(
pack.pid, pack.name, len(pack.links)
)
)
for pyfile in pack.links:
self.more.append(
"LINK #{}: {} ({}) [{}]".format(
pyfile.fid,
pyfile.name,
pyfile.format_size,
pyfile.statusmsg,
)
)
if len(self.more) < self.max_lines:
lines.extend(self.more)
self.more = []
else:
lines.extend(self.more[: self.max_lines])
self.more = self.more[self.max_lines :]
lines.append("{} more links to display.".format(len(self.more)))
return lines
def _cmd_more(self, args):
if not self.more:
return [self._("No more information to display.")]
lines = self.more[: self.max_lines]
self.more = self.more[self.max_lines :]
lines.append("{} more links to display.".format(len(self.more)))
return lines
def _cmd_unpause(self, args):
self.pyload.api.unpause_server()
return [self._("INFO: Starting downloads.")]
def _cmd_pause(self, args):
self.pyload.api.pause_server()
return [self._("INFO: No new downloads will be started.")]
def _cmd_togglepause(self, args):
if self.pyload.api.toggle_pause():
return [self._("INFO: Starting downloads.")]
else:
return [self._("INFO: No new downloads will be started.")]
def _cmd_add(self, args):
if len(args) < 2:
return [
self._('ERROR: Add links like this: "add <name|id> link(s)". '),
self._(
"This will add the link <link> to to the package name <name> / the package with id <id>!"
),
]
id_or_name = args[0].strip()
links = [x.strip() for x in args[1:]]
pack = self._get_package_by_name_or_id(id_or_name)
if not pack:
#: Create new package
id = self.pyload.api.add_package(id_or_name, links, 1)
return [
self._("INFO: Created new Package {} [#{}] with {} links.").format(
id_or_name, id, len(links)
)
]
self.pyload.api.add_files(pack.pid, links)
return [
self._("INFO: Added {} links to Package {} [#{}]").format(
len(links), pack.name, pack.pid
)
]
def _cmd_del(self, args):
if len(args) < 2:
return [
self._("ERROR: Use del command like this: del -p|-l <id> [...]"),
self._("(-p indicates that the ids are from packages,"),
self._("-l indicates that the ids are from links"),
]
if args[0] == "-p":
ret = self.pyload.api.delete_packages(int(arg) for arg in args[1:])
return [self._("INFO: Deleted {} packages!").format(len(args[1:]))]
elif args[0] == "-l":
ret = self.pyload.api.del_links(int(arg) for arg in args[1:])
return [self._("INFO: Deleted {} links!").format(len(args[1:]))]
else:
return [
self._("ERROR: Use del command like this: del <-p|-l> <id> [...]"),
self._("-p indicates that the ids are from packages,"),
self._("-l indicates that the ids are from links"),
]
def _cmd_push(self, args):
try:
package_id = int(args[0])
except IndexError:
return [
self._("ERROR: Missing argument"),
self._("Push package to queue like this: push <package id>"),
]
except ValueError:
return [self._("ERROR: invalid package id {}").format(args[0])]
try:
self.pyload.api.get_package_info(package_id)
except PackageDoesNotExists:
return [self._("ERROR: Package #{} does not exist.").format(package_id)]
self.pyload.api.push_to_queue(package_id)
return [self._("INFO: Pushed package #{} to queue.").format(package_id)]
def _cmd_pull(self, args):
try:
package_id = int(args[0])
except IndexError:
return [
self._("ERROR: Missing argument"),
self._("Pull package from queue like this: pull <package id>"),
]
except ValueError:
return [self._("ERROR: invalid package id {}").format(args[0])]
if not self.pyload.api.get_package_data(package_id):
return [self._("ERROR: Package #{} does not exist.").format(package_id)]
self.pyload.api.pull_from_queue(package_id)
return [
self._("INFO: Pulled package #{} from queue to collector.").format(
package_id
)
]
def _cmd_captcha(self, args):
"""
Captcha answer.
"""
if not args:
return [self._("ERROR: Captcha ID missing.")]
task = self.pyload.captcha_manager.get_task_by_id(args[0])
if not task:
return [
self._("ERROR: Captcha Task with ID {} does not exists.").format(
args[0]
)
]
task.set_result(" ".join(args[1:]))
return [self._("INFO: Result {} saved.").format(" ".join(args[1:]))]
def _cmd_freespace(self, args):
b = format.size(int(self.pyload.api.free_space()))
return [self._("INFO: Free space is {}.").format(b)]
def _cmd_restart(self, args):
self.pyload.api.restart()
return [self._("INFO: Done.")]
def _cmd_restartfailed(self, args):
self.pyload.api.restart_failed()
return [self._("INFO: Restarting all failed downloads.")]
def _cmd_restartfile(self, args):
try:
file_id = int(args[0])
except IndexError:
return [
self._("ERROR: Missing argument"),
self._("Use restartfile command like this: pull <package id>"),
]
except ValueError:
return [self._("ERROR: Invalid file id")]
if not self.pyload.api.get_file_data(file_id):
return [self._("ERROR: File #{} does not exist.").format(file_id)]
self.pyload.api.restart_file(file_id)
return [self._("INFO: Restart file #{}.").format(file_id)]
def _cmd_restartpackage(self, args):
try:
id_or_name = args[0]
except IndexError:
return [self._("ERROR: missing argument")]
pack = self._get_package_by_name_or_id(id_or_name)
if not pack:
return [self._("ERROR: Package {} does not exist.").format(id_or_name)]
self.pyload.api.restart_package(pack.pid)
return [self._("INFO: Restart package {} (#{}).").format(pack.name, pack.pid)]
def _cmd_deletefinished(self, args):
return [
self._("INFO: Deleted package ids: {}.").format(
self.pyload.api.delete_finished()
)
]
def _cmd_getlog(self, args):
"""Returns most recent log entries."""
self.more = []
lines = []
log = self.pyload.api.get_log()
for line in log:
if line:
if line[-1] == "\n":
line = line[:-1]
self.more.append("LOG: {}".format(line))
if args and args[0] == "last":
if len(args) < 2:
self.more = self.more[-self.max_lines :]
else:
self.more = self.more[-(int(args[1])) :]
if len(self.more) < self.max_lines:
lines.extend(self.more)
self.more = []
else:
lines.extend(self.more[: self.max_lines])
self.more = self.more[self.max_lines :]
lines.append("{} more logs to display.".format(len(self.more)))
return lines
def _cmd_help(self, args):
lines = [
"The following commands are available:",
"add <package|packid> <links> [...] Adds link to package. (creates new package if it does not exist)",
"captcha <id> <answer> Solve a captcha task with id <id>",
"collector Shows all packages in collector",
"del -p|-l <id> [...] Deletes all packages|links with the ids specified",
"deletefinished Deletes all finished files and completly finished packages",
"freespace Available free space at download directory in bytes",
"getlog [last [nb]] Returns most recent log entries",
"help Shows this help message",
"info <id> Shows info of the link with id <id>",
"more Shows more info when the result was truncated",
"packinfo <package|packid> Shows info of the package with id <id>",
"pause Stops the download (but not abort active downloads)",
"pull <id> Pull package from queue",
"push <id> Push package to queue",
"queue Shows all packages in the queue",
"restart Restart pyload core",
"restartfailed Restarts all failed files",
"restartfile <id> Resets file status, so it will be downloaded again",
"restartpackage <package|packid> Restarts a package, resets every containing files",
"status Show general download status",
"togglepause Toggle pause state",
"unpause Starts all downloads",
]
lines.append("Shortcuts:")
lines.append(
", ".join(
cmd_short + ": " + cmd_long
for cmd_short, cmd_long in self.SHORTCUT_COMMANDS.items()
)
)
return lines
def _get_package_by_name_or_id(self, id_or_name):
"""Return the first PackageData found or None."""
if id_or_name.isdigit():
try:
package_id = int(id_or_name)
pack = self.pyload.api.get_package_data(package_id)
except PackageDoesNotExists:
pack = self._get_package_by_name(id_or_name)
else:
pack = self._get_package_by_name(id_or_name)
return pack
def _get_package_by_name(self, name):
"""Return the first PackageData found or None."""
pq = self.pyload.api.get_queue_data()
for pack in pq:
if pack.name == name:
return pack
pc = self.pyload.api.get_collector()
for pack in pc:
if pack.name == name:
return pack
return None
|
example-iconfigurer | plugin | # encoding: utf-8
import ckan.plugins as plugins
import ckan.plugins.toolkit as toolkit
import ckanext.example_iconfigurer.blueprint as blueprint
from ckan.common import CKANConfig
from ckan.types import Schema
class ExampleIConfigurerPlugin(plugins.SingletonPlugin):
"""
An example IConfigurer plugin that shows:
1. How to to add a custom config tab in the admin pages by extending
`ckan/templates/admin/base.html` template.
2. How to make CKAN configuration options runtime-editable via
the web frontend or the API
"""
plugins.implements(plugins.IConfigurer)
plugins.implements(plugins.IBlueprint)
# IConfigurer
def update_config(self, config: CKANConfig):
# Add extension templates directory
toolkit.add_template_directory(config, "templates")
def update_config_schema(self, schema: Schema):
ignore_missing = toolkit.get_validator("ignore_missing")
unicode_safe = toolkit.get_validator("unicode_safe")
is_positive_integer = toolkit.get_validator("is_positive_integer")
schema.update(
{
# This is an existing CKAN core configuration option, we are just
# making it available to be editable at runtime
"ckan.datasets_per_page": [ignore_missing, is_positive_integer],
# This is a custom configuration option
"ckanext.example_iconfigurer.test_conf": [ignore_missing, unicode_safe],
}
)
return schema
# IBlueprint
def get_blueprint(self):
return blueprint.example_iconfigurer
|
Code | GestorPGN | # -*- coding: latin-1 -*-
import os
import random
import sys
import Code.SQL.Base as SQLBase
from Code import PGN, Gestor, Partida, Util
from Code.Constantes import *
from Code.QT import Iconos, PantallaPGN, QTUtil, QTUtil2, QTVarios
class GestorPGN(Gestor.Gestor):
def inicio(self, opcion):
self.tipoJuego = kJugPGN
self.finExit = False
self.estado = kFinJuego
self.nuestroFichero = self.configuracion.salvarFichero
self.siNuestroFichero = self.nuestroFichero and os.path.isfile(
self.nuestroFichero
)
liOpciones = [k_mainmenu]
self.muestraInicial = (
True # Para controlar si con un Cancel buscando un PGN se puede terminar
)
self.tablero.ponPosicion(self.procesador.posicionInicial)
self.mostrarIndicador(False)
self.pantalla.ponToolBar(liOpciones)
self.pantalla.activaJuego(False, False)
self.quitaCapturas()
self.refresh()
self.procesarAccion(opcion)
def procesarAccion(self, clave):
if clave == k_mainmenu:
self.finPartida()
elif clave == k_pgnPaste:
self.paste()
elif clave == k_pgnFichero:
self.fichero(siBuscar=True)
elif clave == k_anterior:
self.ficheroMostrar(self.dicDB, True, siAnterior=True)
elif clave == k_siguiente:
self.ficheroMostrar(self.dicDB, True, siSiguiente=True)
elif clave == k_pgnNuestroFichero:
self.fichero(siNuestro=True)
elif clave == k_trasteros:
self.trasterosMenu()
elif clave == k_pgnInformacion:
self.informacion()
elif clave == k_configurar:
self.configurar(siCambioTutor=True)
elif clave == k_utilidades:
liMasOpciones = (
("libros", _("Consult a book"), Iconos.Libros()),
(None, None, None),
("bookguide", _("Personal Opening Guide"), Iconos.BookGuide()),
(None, None, None),
(
"jugarSolo",
_X(_('Open in "%1"'), _("Create your own game")),
Iconos.JuegaSolo(),
),
(None, None, None),
("play", _("Play current position"), Iconos.MoverJugar()),
)
resp = self.utilidades(liMasOpciones)
if resp == "libros":
self.librosConsulta(False)
elif resp == "bookguide":
self.bookGuide()
elif resp == "jugarSolo":
self.procesador.jugarSolo(partida=self.partidaCompleta())
elif resp == "play":
self.jugarPosicionActual()
elif clave == k_pgnFicheroRepite:
self.ficheroRepite()
elif clave == k_jugadadia:
self.miniatura()
elif clave == k_pgnComandoExterno:
self.comandoExterno()
else:
Gestor.Gestor.rutinaAccionDef(self, clave)
def comandoExterno(self):
self.finExit = True
fichero = sys.argv[1]
if os.path.isfile(fichero):
siTemporal = os.path.dirname(fichero).lower() == "tmp"
self.pantalla.soloEdicionPGN(None if siTemporal else fichero)
self.ficheroTemporal = fichero
self.fichero(path=fichero)
else:
self.finPartida()
def finPartida(self):
if self.finExit:
fichero = sys.argv[1]
if os.path.dirname(fichero).lower() == "tmp":
Util.borraFichero(fichero)
self.procesador.procesarAccion(k_terminar)
self.procesador.pararMotores()
self.procesador.quitaKibitzers()
sys.exit(0)
else:
self.quitaCapturas()
self.procesador.inicio()
def finalX(self):
self.finPartida()
return False
def trasterosMenu(self):
menu = QTVarios.LCMenu(self.pantalla)
icoTras = Iconos.Trastero()
liTras = self.configuracion.liTrasteros
for ntras, uno in enumerate(liTras):
carpeta, trastero = uno
menu.opcion(ntras, "%s (%s)" % (trastero, carpeta), icoTras)
menu.separador()
resp = menu.lanza()
if resp is not None:
carpeta, trastero = liTras[resp]
path = os.path.join(carpeta, trastero)
self.fichero(path=path)
def paste(self):
texto = QTUtil.traePortapapeles()
if texto:
pgn = PGN.UnPGN()
encoding = Util.txt_encoding(texto)
if encoding != "latin1":
texto = texto.decode(encoding)
try:
pgn.leeTexto(texto)
except:
pgn.siError = True
if pgn.siError:
QTUtil2.mensError(
self.pantalla,
_(
"The text from the clipboard does not contain a chess game in PGN format"
),
)
self.finPartida()
return
self.pgnPaste = texto
self.mostrar(pgn, False)
def miniatura(self):
self.pensando(True)
fichero = "./IntFiles/miniaturas.gm"
tam = Util.tamFichero(fichero)
pos = random.randint(0, tam - 600)
with open(fichero) as fm:
fm.seek(pos)
fm.readline()
linea = fm.readline()
lig = linea.split("|")
liTags = []
pv = lig[-1]
for n in range(len(lig) - 1):
if "" in lig[n]:
k, v = lig[n].split("")
liTags.append((k, v))
p = Partida.PartidaCompleta(liTags=liTags)
p.leerPV(pv)
txt = p.pgn()
pgn = PGN.UnPGN()
pgn.leeTexto(txt)
self.pensando(False)
if pgn.siError:
return
self.pgnPaste = txt
self.mostrar(pgn, False)
def mostrar(self, pgn, siRepiteFichero, siBlancas=None):
self.pensando(True)
self.partida.leeOtra(pgn.partida)
self.partida.asignaApertura()
blancas = pgn.variable("WHITE")
negras = pgn.variable("BLACK")
resultado = pgn.variable("RESULT")
if siBlancas is None:
siBlancas = not self.partida.siEmpiezaConNegras
self.siJugamosConBlancas = siBlancas
self.tablero.ponerPiezasAbajo(siBlancas)
self.pantalla.activaJuego(True, False, siAyudas=False)
self.quitaAyudas()
self.ponRotulo1(
"%s : <b>%s</b><br>%s : <b>%s</b>"
% (_("White"), blancas, _("Black"), negras)
)
self.ponRotulo2("%s : <b>%s</b>" % (_("Result"), resultado))
self.tablero.desactivaTodas()
self.ponCapInfoPorDefecto()
if self.partida.siFenInicial():
self.ponteAlPrincipio()
else:
self.ponteAlPrincipioColor()
self.pensando(False)
liOpciones = [
k_mainmenu,
k_pgnInformacion,
] # ,k_pgnPaste,k_pgnFichero,k_jugadadia ]
# if self.configuracion.liTrasteros:
# liOpciones.append( k_trasteros )
if siRepiteFichero:
liOpciones.insert(2, k_pgnFicheroRepite)
liOpciones.insert(2, k_siguiente)
liOpciones.insert(2, k_anterior)
# if self.siNuestroFichero:
# liOpciones.append( k_pgnNuestroFichero )
liOpciones.append(k_configurar)
liOpciones.append(k_utilidades)
self.pantalla.ponToolBar(liOpciones)
self.muestraInicial = False
def fichero(self, siNuestro=False, siBuscar=False, path=""):
if path:
if not os.path.isfile(path):
return
self.siFicheroNuestro = siNuestro
if siNuestro:
path = self.nuestroFichero
elif siBuscar:
# Elegimos el fichero
files = QTVarios.select_pgns(self.pantalla)
if not files:
if self.muestraInicial:
self.finPartida()
return
if len(files) == 1:
path = files[0]
else:
path = self.configuracion.ficheroTemporal("pgn")
with open(path, "wb") as q:
for fich in files:
with open(fich, "rb") as f:
q.write(f.read())
# ~ else ya esta el nombre
fpgn = PGN.PGN(self.configuracion)
dicDB = fpgn.leeFichero(self.pantalla, path)
if dicDB is None:
return None
self.ficheroMostrar(dicDB, False)
def ficheroRepite(self):
self.ficheroMostrar(self.dicDB, True)
def ficheroMostrar(self, dicDB, siRepite, siAnterior=False, siSiguiente=False):
bd = SQLBase.DBBase(dicDB["PATHDB"])
if (not siRepite) and self.siFicheroNuestro:
orden = "ROWID DESC"
else:
orden = ""
dClavesTam = dicDB["DCLAVES"]
dbf = bd.dbf(
"GAMES", ",".join(dClavesTam.keys()), orden=orden
) # La lectura se hace en la pantalla, para que la haga en el mismo sitio tanto siRepite como si no
estadoWpgn = dicDB["ESTADOWPGN"] if siRepite else None
if siAnterior or siSiguiente:
siSeguir = True
siSeHaBorradoAlgo = False
dbf.leer()
recno = estadoWpgn.recno
if siAnterior:
if recno > 0:
recno -= 1
elif siSiguiente:
if recno < dbf.reccount() - 1:
recno += 1
dbf.goto(recno)
estadoWpgn.recno = recno
else:
siSeguir, estadoWpgn, siSeHaBorradoAlgo = PantallaPGN.elegirPGN(
self.pantalla, dbf, dClavesTam, self, estadoWpgn
)
if siSeguir:
self.pensando(True)
rid = dbf.rowid(dbf.recno)
self.dicDB = dicDB
dicDB["ESTADOWPGN"] = estadoWpgn
dbf.cerrar()
dbf = bd.dbfT(
"GAMES",
",".join(dClavesTam.keys()) + ",PGN",
condicion="ROWID=%d" % rid,
)
dbf.leer()
dbf.gotop()
dicDatos = dbf.dicValores()
self.pgnPaste = dicDatos["PGN"]
dbf.cerrar()
pgn = PGN.UnPGN()
pgn.leeTexto(self.pgnPaste)
siMostrar = not pgn.siError
self.pensando(False)
if not siMostrar:
QTUtil2.mensError(self.pantalla, _("This is not a valid PGN file"))
else:
siMostrar = False
bd.cerrar()
if siSeHaBorradoAlgo:
fpgn = PGN.PGN(self.configuracion)
fpgn.borraReferenciaA(dicDB["FICHERO"])
if siMostrar:
self.mostrar(pgn, True)
elif self.muestraInicial or self.finExit:
self.finPartida()
def informacion(self):
li = self.pgnPaste.split("\n")
menu = QTVarios.LCMenu(self.pantalla)
clave = ""
for linea in li:
if linea.startswith("["):
ti = linea.split('"')
if len(ti) == 3:
clave = ti[0][1:].strip()
siFecha = clave.upper().endswith("DATE")
if clave.upper() == "OPENING":
continue
clave = clave[0].upper() + clave[1:].lower()
valor = ti[1].strip()
if siFecha:
valor = valor.replace(".??", "").replace(".?", "")
valor = valor.strip("?")
if valor:
menu.opcion(
clave, "%s : %s" % (clave, valor), Iconos.PuntoAzul()
)
apertura = self.partida.apertura
if apertura:
menu.separador()
nom = apertura.trNombre
ape = _("Opening")
rotulo = nom if ape.upper() in nom.upper() else ("%s : %s" % (ape, nom))
menu.opcion(clave, rotulo, Iconos.PuntoNaranja())
menu.lanza()
def actualPGN(self):
# cabecera sera el inicio de pgnpaste
txt = self.pgnPaste.strip()
cab = ""
result = ""
for linea in txt.split("\n"):
if linea.startswith("["):
if "result" in linea.lower():
li = linea.split('"')
result = li[1]
cab += linea.strip() + "\n"
else:
break
return cab + "\n" + self.partida.pgnBase() + " " + result
def partidaCompleta(self):
txt = self.pgnPaste.strip()
liTags = []
for linea in txt.split("\n"):
if linea.startswith("["):
ti = linea.split('"')
if len(ti) == 3:
clave = ti[0][1:].strip()
valor = ti[1].strip()
liTags.append([clave, valor])
else:
break
pc = Partida.PartidaCompleta(liTags=liTags)
pc.leeOtra(self.partida)
return pc
|
pynocchio | bookmark_manager_dialog | import logging
from PyQt5 import QtCore, QtGui, QtSql, QtWidgets
from .uic_files import bookmark_manager_dialog_ui
from .utility import file_exist, get_dir_name
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class BookmarkManagerDialog(QtWidgets.QDialog):
SCALE_RATIO = 0.25
def __init__(self, controller, parent=None):
super(BookmarkManagerDialog, self).__init__(parent=parent)
self.ui = bookmark_manager_dialog_ui.Ui_Bookmark_Dialog()
self.ui.setupUi(self)
self.controller = controller
path = get_dir_name(controller.model.settings_manager.settings.fileName())
self.db = QtSql.QSqlDatabase().addDatabase("QSQLITE")
self.db.setDatabaseName(path + "/bookmark.db")
if self.db.open():
self.model = QtSql.QSqlTableModel(self, self.db)
self.model.setTable("Bookmark")
self.model.setEditStrategy(QtSql.QSqlTableModel.OnManualSubmit)
self.model.select()
self.model.setHeaderData(2, QtCore.Qt.Horizontal, "Name")
self.model.setHeaderData(3, QtCore.Qt.Horizontal, "Page")
self.ui.bookmark_table.setModel(self.model)
self.ui.bookmark_table.hideColumn(0)
self.ui.bookmark_table.hideColumn(1)
self.ui.bookmark_table.hideColumn(4)
self.ui.bookmark_table.horizontalHeader().setSectionResizeMode(
2, QtWidgets.QHeaderView.Stretch
)
self.ui.bookmark_table.horizontalHeader().setSectionResizeMode(
3, QtWidgets.QHeaderView.ResizeToContents
)
self.ui.button_remove.clicked.connect(self._remove_table_item)
self.ui.button_load.clicked.connect(self._get_comic_to_open)
selection = self.ui.bookmark_table.selectionModel()
selection.selectionChanged.connect(self.selection_changed)
self.no_cover_label = self.ui.page_image_label.pixmap().scaledToWidth(
self.width() * self.SCALE_RATIO, QtCore.Qt.SmoothTransformation
)
self.ui.page_image_label.setPixmap(self.no_cover_label)
if self.model.rowCount():
self.ui.button_load.setEnabled(True)
self.ui.button_remove.setEnabled(True)
logger.info("Database load!")
else:
logger.error("Unable to create db file!")
def selection_changed(self, selected):
model_indexes = selected.indexes()
if model_indexes:
pixmap = QtGui.QPixmap()
pixmap.loadFromData(model_indexes[4].data())
pixmap = pixmap.scaledToWidth(
self.width() * self.SCALE_RATIO, QtCore.Qt.SmoothTransformation
)
self.ui.page_image_label.setPixmap(pixmap)
self.ui.line_edit_path.setText(model_indexes[1].data())
else:
self.ui.page_image_label.setPixmap(self.no_cover_label)
self.ui.line_edit_path.setText("")
def _remove_table_item(self):
option = QtWidgets.QMessageBox().warning(
self,
self.tr("Delete bookmarks"),
self.tr("This action will delete your bookmarks! Proceed?"),
QtWidgets.QMessageBox.Ok | QtWidgets.QMessageBox.Cancel,
QtWidgets.QMessageBox.Ok,
)
if option == QtWidgets.QMessageBox.Ok:
for index in self.ui.bookmark_table.selectedIndexes():
self.model.removeRow(index.row())
self.model.submitAll()
self.ui.page_image_label.setPixmap(self.no_cover_label)
if not self.model.rowCount():
self.ui.button_load.setEnabled(False)
self.ui.button_remove.setEnabled(False)
def _get_comic_to_open(self):
selection_model = self.ui.bookmark_table.selectionModel()
if selection_model.hasSelection():
path = selection_model.selectedRows(1)[0].data()
page = selection_model.selectedRows(3)[0].data()
if file_exist(path):
self.controller.open_comics(path, page - 1)
self.close()
else:
option = QtWidgets.QMessageBox().warning(
self,
self.tr("Comic does not exist"),
self.tr(
"The selected comic does not exist! Would you "
"like to remove it from the bookmark list?"
),
QtWidgets.QMessageBox.Ok | QtWidgets.QMessageBox.Cancel,
QtWidgets.QMessageBox.Ok,
)
if option == QtWidgets.QMessageBox.Ok:
for index in self.ui.bookmark_table.selectedIndexes():
self.model.removeRow(index.row())
self.model.submitAll()
self.ui.page_image_label.setPixmap(self.no_cover_label)
if not self.model.rowCount():
self.ui.button_load.setEnabled(False)
self.ui.button_remove.setEnabled(False)
def close(self):
self.db.close()
super(BookmarkManagerDialog, self).close()
|
Import | Init | # FreeCAD init script of the Import module
# (c) 2001 Juergen Riegel
# ***************************************************************************
# * Copyright (c) 2002 Juergen Riegel <juergen.riegel@web.de> *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * FreeCAD is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Lesser General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with FreeCAD; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************/
# Append the open handler
# FreeCAD.addImportType("STEP 214 (*.step *.stp)","ImportGui")
# FreeCAD.addExportType("STEP 214 (*.step *.stp)","ImportGui")
# FreeCAD.addExportType("IGES files (*.iges *.igs)","ImportGui")
FreeCAD.addImportType("PLMXML files (*.plmxml)", "PlmXmlParser")
FreeCAD.addImportType("STEPZ Zip File Type (*.stpZ *.stpz)", "stepZ")
FreeCAD.addImportType("glTF (*.gltf *.glb)", "ImportGui")
FreeCAD.addExportType("STEPZ zip File Type (*.stpZ *.stpz)", "stepZ")
FreeCAD.addExportType("glTF (*.gltf *.glb)", "ImportGui")
|
fallback | aes | import os
import pyaes
from .._aes import AES
__all__ = ["aes"]
class AESBackend:
def _get_algo_cipher_type(self, algo):
if not algo.startswith("aes-") or algo.count("-") != 2:
raise ValueError("Unknown cipher algorithm {}".format(algo))
key_length, cipher_type = algo[4:].split("-")
if key_length not in ("128", "192", "256"):
raise ValueError("Unknown cipher algorithm {}".format(algo))
if cipher_type not in ("cbc", "ctr", "cfb", "ofb"):
raise ValueError("Unknown cipher algorithm {}".format(algo))
return cipher_type
def is_algo_supported(self, algo):
try:
self._get_algo_cipher_type(algo)
return True
except ValueError:
return False
def random(self, length):
return os.urandom(length)
def encrypt(self, data, key, algo="aes-256-cbc"):
cipher_type = self._get_algo_cipher_type(algo)
# Generate random IV
iv = os.urandom(16)
if cipher_type == "cbc":
cipher = pyaes.AESModeOfOperationCBC(key, iv=iv)
elif cipher_type == "ctr":
# The IV is actually a counter, not an IV but it does almost the
# same. Notice: pyaes always uses 1 as initial counter! Make sure
# not to call pyaes directly.
# We kinda do two conversions here: from byte array to int here, and
# from int to byte array in pyaes internals. It's possible to fix that
# but I didn't notice any performance changes so I'm keeping clean code.
iv_int = 0
for byte in iv:
iv_int = (iv_int * 256) + byte
counter = pyaes.Counter(iv_int)
cipher = pyaes.AESModeOfOperationCTR(key, counter=counter)
elif cipher_type == "cfb":
# Change segment size from default 8 bytes to 16 bytes for OpenSSL
# compatibility
cipher = pyaes.AESModeOfOperationCFB(key, iv, segment_size=16)
elif cipher_type == "ofb":
cipher = pyaes.AESModeOfOperationOFB(key, iv)
encrypter = pyaes.Encrypter(cipher)
ciphertext = encrypter.feed(data)
ciphertext += encrypter.feed()
return ciphertext, iv
def decrypt(self, ciphertext, iv, key, algo="aes-256-cbc"):
cipher_type = self._get_algo_cipher_type(algo)
if cipher_type == "cbc":
cipher = pyaes.AESModeOfOperationCBC(key, iv=iv)
elif cipher_type == "ctr":
# The IV is actually a counter, not an IV but it does almost the
# same. Notice: pyaes always uses 1 as initial counter! Make sure
# not to call pyaes directly.
# We kinda do two conversions here: from byte array to int here, and
# from int to byte array in pyaes internals. It's possible to fix that
# but I didn't notice any performance changes so I'm keeping clean code.
iv_int = 0
for byte in iv:
iv_int = (iv_int * 256) + byte
counter = pyaes.Counter(iv_int)
cipher = pyaes.AESModeOfOperationCTR(key, counter=counter)
elif cipher_type == "cfb":
# Change segment size from default 8 bytes to 16 bytes for OpenSSL
# compatibility
cipher = pyaes.AESModeOfOperationCFB(key, iv, segment_size=16)
elif cipher_type == "ofb":
cipher = pyaes.AESModeOfOperationOFB(key, iv)
decrypter = pyaes.Decrypter(cipher)
data = decrypter.feed(ciphertext)
data += decrypter.feed()
return data
def get_backend(self):
return "fallback"
aes = AES(AESBackend())
|
imports | import_status | """ import books from another app """
from bookwyrm import models
from bookwyrm.importers import GoodreadsImporter
from bookwyrm.models.import_job import import_item_task
from bookwyrm.settings import PAGE_LENGTH
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.core.paginator import Paginator
from django.shortcuts import get_object_or_404, redirect
from django.template.response import TemplateResponse
from django.utils import timezone
from django.utils.decorators import method_decorator
from django.views import View
from django.views.decorators.http import require_POST
# pylint: disable= no-self-use
@method_decorator(login_required, name="dispatch")
class ImportStatus(View):
"""status of an existing import"""
def get(self, request, job_id):
"""status of an import job"""
job = get_object_or_404(models.ImportJob, id=job_id)
if job.user != request.user:
raise PermissionDenied()
items = job.items.order_by("index")
item_count = items.count() or 1
paginated = Paginator(items, PAGE_LENGTH)
page = paginated.get_page(request.GET.get("page"))
manual_review_count = items.filter(
fail_reason__isnull=False, book_guess__isnull=False, book__isnull=True
).count()
fail_count = items.filter(
fail_reason__isnull=False, book_guess__isnull=True
).count()
pending_item_count = job.pending_item_count
data = {
"job": job,
"items": page,
"manual_review_count": manual_review_count,
"fail_count": fail_count,
"page_range": paginated.get_elided_page_range(
page.number, on_each_side=2, on_ends=1
),
"show_progress": True,
"item_count": item_count,
"complete_count": item_count - pending_item_count,
"percent": job.percent_complete,
# hours since last import item update
"inactive_time": (job.updated_date - timezone.now()).seconds / 60 / 60,
"legacy": not job.mappings,
}
return TemplateResponse(request, "import/import_status.html", data)
def post(self, request, job_id):
"""bring a legacy import into the latest format"""
job = get_object_or_404(models.ImportJob, id=job_id)
if job.user != request.user:
raise PermissionDenied()
GoodreadsImporter().update_legacy_job(job)
return redirect("import-status", job_id)
@login_required
@require_POST
def retry_item(request, job_id, item_id):
"""retry an item"""
item = get_object_or_404(
models.ImportItem, id=item_id, job__id=job_id, job__user=request.user
)
import_item_task.delay(item.id)
return redirect("import-status", job_id)
@login_required
@require_POST
def stop_import(request, job_id):
"""scrap that"""
job = get_object_or_404(models.ImportJob, id=job_id, user=request.user)
job.stop_job()
return redirect("import-status", job_id)
|
validate | _schemas | from typing import Any, Callable, FrozenSet, List, Literal, Optional, Pattern, Sequence, Set, Tuple, Type, Union
class SchemaContainer:
"""
A simple schema container.
"""
def __init__(self, schema):
self.schema = schema
class _CollectionSchemaContainer(SchemaContainer):
def __init__(self, *schemas):
super().__init__(schemas)
class AllSchema(_CollectionSchemaContainer):
"""
Collection of schemas where every schema must be valid.
The last validation result gets returned.
"""
class AnySchema(_CollectionSchemaContainer):
"""
Collection of schemas where at least one schema must be valid.
The first successful validation result gets returned.
"""
class NoneOrAllSchema(_CollectionSchemaContainer):
"""
Collection of schemas where every schema must be valid. If the initial input is None, all validations will be skipped.
The last validation result gets returned.
"""
class ListSchema(_CollectionSchemaContainer):
"""
Collection of schemas where every indexed schema must be valid, as well as the input type and length.
A new list of the validated input gets returned.
"""
class GetItemSchema:
"""
Get an item from the input.
Unless strict is set to True, item can be a tuple of items for recursive lookups.
If the item is not found in the last object of a recursive lookup, return the default.
Supported inputs are XML elements, regex matches and anything that implements __getitem__.
"""
def __init__(
self,
item: Union[Any, Tuple[Any]],
default: Any = None,
strict: bool = False,
):
self.item = item
self.default = default
self.strict = strict
class RegexSchema:
"""
A regex pattern that must match using the provided method.
"""
def __init__(
self,
pattern: Pattern,
method: Literal["search", "match", "fullmatch", "findall", "split", "sub", "subn"] = "search",
):
self.pattern = pattern
self.method = method
class TransformSchema:
"""
Transform the input using the specified function and args/keywords.
"""
def __init__(
self,
func: Callable,
*args,
**kwargs,
):
self.func = func
self.args = args
self.kwargs = kwargs
class OptionalSchema:
"""
An optional key set in a dict or dict in a :class:`UnionSchema`.
"""
def __init__(self, key: Any):
self.key = key
class AttrSchema(SchemaContainer):
"""
Validate attributes of an input object.
"""
class XmlElementSchema:
"""
Validate an XML element.
"""
# signature is weird because of backwards compatiblity
def __init__(
self,
tag: Optional[Any] = None,
text: Optional[Any] = None,
attrib: Optional[Any] = None,
tail: Optional[Any] = None,
):
self.tag = tag
self.attrib = attrib
self.text = text
self.tail = tail
class UnionGetSchema:
"""
Validate multiple :class:`GetItemSchema` schemas on the same input.
"""
def __init__(
self,
*getters,
seq: Type[Union[List, FrozenSet, Set, Tuple]] = tuple,
):
self.getters: Sequence[GetItemSchema] = tuple(GetItemSchema(getter) for getter in getters)
self.seq = seq
class UnionSchema(SchemaContainer):
"""
Validate multiple schemas on the same input.
Can be a tuple, list, set, frozenset or dict of schemas.
"""
|
migrations | 0046_event_names_properties_to_team | # Generated by Django 3.0.3 on 2020-04-14 18:47
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
def migrate_event_names_and_properties(apps, schema_editor):
Team = apps.get_model("posthog", "Team")
Event = apps.get_model("posthog", "Event")
class JsonKeys(models.Func):
function = "jsonb_object_keys"
for team in Team.objects.all():
events = Event.objects.filter(team=team)
keys = (
events.annotate(keys=JsonKeys("properties"))
.distinct("keys")
.order_by("keys")
.values_list("keys", flat=True)
)
names = events.distinct("event").values_list("event", flat=True)
team.event_keys = [key for key in keys]
team.event_names = [name for name in names]
team.save()
def noop(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
("posthog", "0045_add_timestamp_index"),
]
operations = [
migrations.AddField(
model_name="team",
name="event_names",
field=django.contrib.postgres.fields.jsonb.JSONField(default=list),
),
migrations.AddField(
model_name="team",
name="event_properties",
field=django.contrib.postgres.fields.jsonb.JSONField(default=list),
),
migrations.RunPython(migrate_event_names_and_properties, noop, elidable=True),
]
|
forms | endpoint | import json
from urllib.parse import urljoin
from flask import g, jsonify, redirect, render_template, request
from flask_cors import cross_origin
from formspree import settings
from formspree.forms import errors
from formspree.forms.errors import SubmitFormError
from formspree.forms.helpers import (
HASH,
KEYS_EXCLUDED_FROM_EMAIL,
assign_ajax,
get_temp_hostname,
http_form_to_dict,
ordered_storage,
referrer_to_path,
remove_www,
temp_store_hostname,
verify_captcha,
)
from formspree.forms.models import Form
from formspree.stuff import DB
from formspree.utils import IS_VALID_EMAIL, jsonerror, request_wants_json, url_domain
from jinja2.exceptions import TemplateNotFound
def get_host_and_referrer(received_data):
"""
Looks for stored hostname in redis (from captcha).
If it doesn't exist, uses the referer header.
"""
try:
return get_temp_hostname(received_data["_host_nonce"])
except KeyError:
return referrer_to_path(request.referrer), request.referrer
except ValueError as err:
g.log.error("Invalid hostname stored on Redis.", err=err)
raise SubmitFormError(
(
render_template(
"error.html",
title="Unable to submit form",
text="<p>We had a problem identifying to whom we should have submitted this form. "
"Please try submitting again. If it fails once more, please let us know at {email}</p>".format(
email=settings.CONTACT_EMAIL
),
),
500,
)
)
def validate_user_form(hashid, host):
"""
Gets a form from a hashid, created on the dashboard.
Checks to make sure the submission can be accepted by this form.
"""
form = Form.get_with_hashid(hashid)
if not form:
raise SubmitFormError(errors.bad_hashid_error(hashid))
# Check if it has been assigned about using AJAX or not
assign_ajax(form, request_wants_json())
if form.disabled:
raise SubmitFormError(errors.disabled_error())
if not form.host:
# add the host to the form
# ALERT: As a side effect, sets the form's host if not already set
form.host = host
DB.session.add(form)
DB.session.commit()
# it is an error when
# form is not sitewide, and submission came from a different host
# form is sitewide, but submission came from a host rooted somewhere else, or
elif (
(
not form.sitewide
and
# ending slashes can be safely ignored here:
form.host.rstrip("/") != host.rstrip("/")
)
or (
form.sitewide
and not remove_www( # removing www from both sides makes this a neutral operation:
host
).startswith(remove_www(form.host))
)
):
raise SubmitFormError(errors.mismatched_host_error(host, form))
return form
def get_or_create_form(email, host):
"""
Gets the form if it already exits, otherwise checks to ensure
that this is a valid new form submission. If so, creates a
new form.
"""
form = Form.query.filter_by(hash=HASH(email, host)).first()
if not form:
if request_wants_json():
# Can't create a new ajax form unless from the dashboard
ajax_error_str = (
"To prevent spam, only "
+ settings.UPGRADED_PLAN_NAME
+ " accounts may create AJAX forms."
)
raise SubmitFormError(jsonerror(400, {"error": ajax_error_str}))
if url_domain(settings.SERVICE_URL) in host:
# Bad user is trying to submit a form spoofing formspree.io
g.log.info(
"User attempting to create new form spoofing SERVICE_URL. Ignoring."
)
raise SubmitFormError(
(
render_template(
"error.html", title="Unable to submit form", text="Sorry"
),
400,
)
)
# all good, create form
form = Form(email, host)
# Check if it has been assigned using AJAX or not
assign_ajax(form, request_wants_json())
if form.disabled:
raise SubmitFormError(errors.disabled_error())
return form
def check_captcha(form, email_or_string, received_data, sorted_keys):
"""
Checks to see if a captcha page is required, if so renders it.
"""
captcha_verified = verify_captcha(received_data, request)
needs_captcha = not (request_wants_json() or captcha_verified or settings.TESTING)
# check if captcha is disabled
if form.has_feature("dashboard"):
needs_captcha = needs_captcha and not form.captcha_disabled
if needs_captcha:
data_copy = received_data.copy()
# Temporarily store hostname in redis while doing captcha
nonce = temp_store_hostname(form.host, request.referrer)
data_copy["_host_nonce"] = nonce
action = urljoin(settings.API_ROOT, email_or_string)
try:
if "_language" in received_data:
return render_template(
"forms/captcha_lang/{}.html".format(received_data["_language"]),
data=data_copy,
sorted_keys=sorted_keys,
action=action,
lang=received_data["_language"],
)
except TemplateNotFound:
g.log.error(
"Requested language not found for reCAPTCHA page, defaulting to English",
referrer=request.referrer,
lang=received_data["_language"],
)
pass
return render_template(
"forms/captcha.html",
data=data_copy,
sorted_keys=sorted_keys,
action=action,
lang=None,
)
def email_sent_success(status):
if request_wants_json():
return jsonify({"success": "email sent", "next": status["next"]})
return redirect(status["next"], code=302)
def no_email_sent_success(status):
if request_wants_json():
return jsonify(
{
"success": "no email sent, access submission archive on {} dashboard".format(
settings.SERVICE_NAME
),
"next": status["next"],
}
)
return redirect(status["next"], code=302)
def confirmation_sent_success(form, host, status):
if request_wants_json():
return jsonify({"success": "confirmation email sent"})
return render_template(
"forms/confirmation_sent.html",
email=form.email,
host=host,
resend=status["code"] == Form.STATUS_CONFIRMATION_DUPLICATED,
)
def response_for_status(form, host, referrer, status):
if status["code"] == Form.STATUS_EMAIL_SENT:
return email_sent_success(status)
if status["code"] == Form.STATUS_NO_EMAIL:
return no_email_sent_success(status)
if status["code"] == Form.STATUS_EMAIL_EMPTY:
return errors.empty_form_error(referrer)
if (
status["code"] == Form.STATUS_CONFIRMATION_SENT
or status["code"] == Form.STATUS_CONFIRMATION_DUPLICATED
):
return confirmation_sent_success(form, host, status)
if status["code"] == Form.STATUS_OVERLIMIT:
return errors.over_limit_error()
if status["code"] == Form.STATUS_REPLYTO_ERROR:
return errors.malformed_replyto_error(status)
return errors.generic_send_error(send)
@cross_origin(
allow_headers=["Accept", "Content-Type", "X-Requested-With", "Authorization"]
)
@ordered_storage
def send(email_or_string):
"""
Main endpoint, finds or creates the form row from the database,
checks validity and state of the form and sends either form data
or verification to email.
"""
g.log = g.log.bind(target=email_or_string)
if request.method == "GET":
return errors.bad_method_error()
if request.form:
received_data, sorted_keys = http_form_to_dict(request.form)
else:
received_data = request.get_json() or {}
sorted_keys = received_data.keys()
sorted_keys = [k for k in sorted_keys if k not in KEYS_EXCLUDED_FROM_EMAIL]
try:
# NOTE: host in this function generally refers to the referrer hostname.
host, referrer = get_host_and_referrer(received_data)
except SubmitFormError as vfe:
return vfe.response
if not host:
return errors.no_referrer_error()
g.log = g.log.bind(host=host, wants="json" if request_wants_json() else "html")
if not IS_VALID_EMAIL(email_or_string):
# in this case it can be a hashid identifying a
# form generated from the dashboard
try:
form = validate_user_form(email_or_string, host)
except SubmitFormError as vfe:
return vfe.response
else:
# in this case, it is a normal email
try:
form = get_or_create_form(email_or_string.lower(), host)
except SubmitFormError as vfe:
return vfe.response
# If form exists and is confirmed, send email
# otherwise send a confirmation email
if form.confirmed:
captcha_page = check_captcha(form, email_or_string, received_data, sorted_keys)
if captcha_page:
return captcha_page
status = form.send(received_data, sorted_keys, referrer)
else:
status = form.send_confirmation(store_data=received_data)
return response_for_status(form, host, referrer, status)
|
accounts | PremiumizeMe | # -*- coding: utf-8 -*-
import json
from ..base.multi_account import MultiAccount
class PremiumizeMe(MultiAccount):
__name__ = "PremiumizeMe"
__type__ = "account"
__version__ = "0.32"
__status__ = "testing"
__config__ = [
("mh_mode", "all;listed;unlisted", "Filter downloaders to use", "all"),
("mh_list", "str", "Downloader list (comma separated)", ""),
("mh_interval", "int", "Reload interval in hours", 12),
]
__description__ = """Premiumize.me account plugin"""
__license__ = "GPLv3"
__authors__ = [
("Florian Franzen", "FlorianFranzen@gmail.com"),
("GammaC0de", "nitzo2001[AT]yahoo[DOT]com"),
]
# See https://www.premiumize.me/api
API_URL = "https://www.premiumize.me/api/"
def api_respond(self, method, **kwargs):
json_data = self.load(self.API_URL + method, get=kwargs)
return json.loads(json_data)
def grab_hosters(self, user, password, data):
res = self.api_respond("services/list", apikey=password)
hosters = []
for _h in res["directdl"]:
hosters += res["aliases"][_h]
return hosters
def grab_info(self, user, password, data):
validuntil = None
trafficleft = None
premium = False
res = self.api_respond("account/info", apikey=password)
if res["status"] == "success":
premium = res["premium_until"] is not False
if premium:
validuntil = res["premium_until"]
trafficleft = -1
return {
"validuntil": validuntil,
"trafficleft": trafficleft,
"premium": premium,
}
def signin(self, user, password, data):
res = self.api_respond("account/info", apikey=password)
if res["status"] != "success":
self.log_error(
self._(
"Password for premiumize.me should be the API token - get it from: https://www.premiumize.me/account"
)
)
self.fail_login(res["message"])
elif res["customer_id"] != user:
self.log_error(
self._(
"username for premiumize.me should be the Customer ID - get it from: https://www.premiumize.me/account"
)
)
self.fail_login()
|
widgets | lfo_dialog | from sglib.lib.translate import _
from sgui.sgqt import *
from . import _shared
from .control import knob_control
from .playback_widget import playback_widget
def lfo_dialog(a_update_callback, a_save_callback):
"""Generic dialog for doing event transforms that are LFO-like.
The actual transforms are performed by the caller using the
event callbacks. The caller should create a list of event
objects and their original values.
"""
def ok_handler():
f_dialog.close()
f_dialog.retval = True
def update(*args):
f_vals = [x.control.value() for x in f_controls]
f_vals += [
x.control.value() if y.isChecked() else z.control.value()
for x, y, z in f_optional_controls
]
a_update_callback(*f_vals)
def save(*args):
a_save_callback()
def update_and_save(a_val=None):
update()
save()
f_dialog = QDialog()
f_dialog.setFixedSize(570, 200)
f_dialog.retval = False
f_dialog.setWindowTitle(_("LFO Tool"))
f_vlayout = QVBoxLayout(f_dialog)
f_layout = QGridLayout()
f_vlayout.addLayout(f_layout)
f_knob_size = 48
f_phase_knob = knob_control(
f_knob_size,
_("Phase"),
0,
save,
update,
0,
100,
0,
_shared.KC_DECIMAL,
tooltip=(
"The oscillator phase for the LFO. 0.0 starts at the "
"beginning, 1.0 at the end"
),
)
f_phase_knob.add_to_grid_layout(f_layout, 0)
f_start_freq_knob = knob_control(
f_knob_size,
_("Start Freq"),
0,
save,
update,
10,
400,
100,
_shared.KC_HZ_DECIMAL,
tooltip=("The frequency of the LFO at the start of the region, in hertz"),
)
f_start_freq_knob.add_to_grid_layout(f_layout, 5)
f_end_freq_knob = knob_control(
f_knob_size,
_("End Freq"),
0,
save,
update,
10,
400,
100,
_shared.KC_HZ_DECIMAL,
tooltip=("The frequency of the LFO at the end of the region, in hertz"),
)
f_end_freq_knob.add_to_grid_layout(f_layout, 10)
f_end_freq_cbox = QCheckBox()
f_end_freq_cbox.setToolTip("If checked, the End Freq. knob is enabled")
f_end_freq_cbox.stateChanged.connect(update_and_save)
f_layout.addWidget(f_end_freq_cbox, 5, 10)
f_start_amp_knob = knob_control(
f_knob_size,
_("Start Amp"),
0,
save,
update,
0,
127,
64,
_shared.KC_INTEGER,
tooltip=("The amplitude of the LFO at the start of the region"),
)
f_start_amp_knob.add_to_grid_layout(f_layout, 11)
f_end_amp_knob = knob_control(
f_knob_size,
_("End Amp"),
0,
save,
update,
0,
127,
64,
_shared.KC_INTEGER,
tooltip=("The amplitude of the LFO at the end of the region"),
)
f_end_amp_knob.add_to_grid_layout(f_layout, 12)
f_end_amp_cbox = QCheckBox()
f_end_amp_cbox.setToolTip("If checked, the End Amp. knob is enabled")
f_end_amp_cbox.stateChanged.connect(update_and_save)
f_layout.addWidget(f_end_amp_cbox, 5, 12)
f_start_center_knob = knob_control(
f_knob_size,
_("Start Center"),
0,
save,
update,
0,
127,
64,
_shared.KC_INTEGER,
tooltip=(
"Change the center line at the start of the LFO. Amplitude "
"should be less than full value if using this knob"
),
)
f_start_center_knob.add_to_grid_layout(f_layout, 15)
f_end_center_knob = knob_control(
f_knob_size,
_("End Center"),
0,
save,
update,
0,
127,
64,
_shared.KC_INTEGER,
tooltip=(
"Change the center line at the end of the LFO. Amplitude "
"should be less than full value if using this knob"
),
)
f_end_center_knob.add_to_grid_layout(f_layout, 16)
f_end_center_cbox = QCheckBox()
f_end_center_cbox.setToolTip("If checked, the End Center knob is enabled")
f_end_center_cbox.stateChanged.connect(update_and_save)
f_layout.addWidget(f_end_center_cbox, 5, 16)
def start_fade_changed(*args):
f_start, f_end = (
int(x.control.value()) for x in (f_start_fade_knob, f_end_fade_knob)
)
if f_start >= f_end:
f_end_fade_knob.control.setValue(f_start + 1)
else:
update()
f_start_fade_knob = knob_control(
f_knob_size,
_("Start Fade"),
0,
save,
start_fade_changed,
0,
99,
0,
_shared.KC_INTEGER,
tooltip="Fade in the start of the LFO",
)
f_start_fade_knob.add_to_grid_layout(f_layout, 20)
def end_fade_changed(*args):
f_start, f_end = (
int(x.control.value()) for x in (f_start_fade_knob, f_end_fade_knob)
)
if f_end <= f_start:
f_start_fade_knob.control.setValue(f_end - 1)
else:
update()
f_end_fade_knob = knob_control(
f_knob_size,
_("End Fade"),
0,
save,
end_fade_changed,
1,
100,
100,
_shared.KC_INTEGER,
tooltip="Fade out the end of the LFO",
)
f_end_fade_knob.add_to_grid_layout(f_layout, 25)
f_playback_widget = playback_widget()
# Does not work, also there is no longer button styling for it
# f_layout.addWidget(f_playback_widget.play_button, 1, 30)
# f_layout.addWidget(f_playback_widget.stop_button, 1, 31)
f_controls = (
f_phase_knob,
f_start_freq_knob,
f_start_amp_knob,
f_start_center_knob,
f_start_fade_knob,
f_end_fade_knob,
)
f_optional_controls = (
(f_end_freq_knob, f_end_freq_cbox, f_start_freq_knob),
(f_end_amp_knob, f_end_amp_cbox, f_start_amp_knob),
(f_end_center_knob, f_end_center_cbox, f_start_center_knob),
)
ok_cancel_layout = QHBoxLayout()
f_vlayout.addLayout(ok_cancel_layout)
f_ok_button = QPushButton(_("OK"))
ok_cancel_layout.addWidget(f_ok_button)
f_ok_button.pressed.connect(ok_handler)
f_cancel_button = QPushButton("Cancel")
ok_cancel_layout.addWidget(f_cancel_button)
f_cancel_button.pressed.connect(f_dialog.close)
update()
save()
f_dialog.move(0, 0)
f_dialog.exec(center=False)
return f_dialog.retval
|
unihandecode | jadecoder | # coding:utf-8
__license__ = "GPL 3"
__copyright__ = "2010, Hiroshi Miura <miurahr@linux.com>"
__docformat__ = "restructuredtext en"
"""
Decode unicode text to an ASCII representation of the text for Japanese.
Translate unicode string to ASCII roman string.
API is based on the python unidecode,
which is based on Ruby gem (http://rubyforge.org/projects/unidecode/)
and perl module Text::Unidecode
(http://search.cpan.org/~sburke/Text-Unidecode-0.04/).
This functionality is owned by Kakasi Japanese processing engine.
Copyright (c) 2010 Hiroshi Miura
"""
import re
from calibre.ebooks.unihandecode.jacodepoints import CODEPOINTS as JACODES
from calibre.ebooks.unihandecode.pykakasi.kakasi import kakasi
from calibre.ebooks.unihandecode.unicodepoints import CODEPOINTS
from calibre.ebooks.unihandecode.unidecoder import Unidecoder
class Jadecoder(Unidecoder):
kakasi = None
codepoints = {}
def __init__(self):
self.codepoints = CODEPOINTS
self.codepoints.update(JACODES)
self.kakasi = kakasi()
def decode(self, text):
try:
result = self.kakasi.do(text)
return re.sub(
"[^\x00-\x7f]", lambda x: self.replace_point(x.group()), result
)
except:
return re.sub("[^\x00-\x7f]", lambda x: self.replace_point(x.group()), text)
|
PackageManager | packman | """
Cocoa GUI for the Package Manager
This is a first generation of the Cocoa GUI, it inherits some of the nasty
features of the current Carbon version:
1. GUI blocks during some operations, such as downloading or installing
2. Checking on GUI packages may crash the application
The first item can only be solved by rewriting parts of the pimp module, the
second part will be solved by running at least some pimp related code in a
seperate process.
TODO:
- Make sure 'File -> Open...' actually works
XXX:
- save preferences in the favorites db (for databases that are in in there)?
"""
import sys
import threading
import webbrowser
import objc
import pimp
from Cocoa import *
from PyObjCTools import AppHelper
# File type for packman databases
DB_FILE_TYPE="Python Package Database"
# Extract class information from the NIB files
# - MainMenu: Global application stuff
# - OpenPanel: The 'Open URL...' window
# - PackageDatabase: Document window
def setString(field, value):
"""
Set an NSTextField to the specified value. Clears the field if 'value'
is None.
"""
if value is None:
field.setStringValue_("")
else:
field.setStringValue_(value)
##
# We break the abstraction of some of the objects in the pimp module. That
# is necessary because we cannot get at the required information using the
# public interfaces :-(
#
def DB_DESCRIPTION(pimpDB):
return pimpDB._description
def DB_MAINTAINER(pimpDB):
return pimpDB._maintainer
def DB_URL(pimpDB):
return pimpDB._urllist[0]
def PKG_HIDDEN(package):
""" Return True iff the package is a hidden package """
return (package._dict.get('Download-URL', None) is None)
class PackageDatabase (NSDocument):
"""
The document class for a package database
"""
databaseMaintainer = objc.IBOutlet()
databaseName = objc.IBOutlet()
installButton = objc.IBOutlet()
installDependencies = objc.IBOutlet()
installationLocation = objc.IBOutlet()
installationLog = objc.IBOutlet()
installationPanel = objc.IBOutlet()
installationProgress = objc.IBOutlet()
installationTitle = objc.IBOutlet()
itemDescription = objc.IBOutlet()
itemHome = objc.IBOutlet()
itemInstalled = objc.IBOutlet()
itemStatus = objc.IBOutlet()
overwrite = objc.IBOutlet()
packageTable = objc.IBOutlet()
prerequisitesTable = objc.IBOutlet()
progressOK = objc.IBOutlet()
showHidden = objc.IBOutlet()
verbose = objc.IBOutlet()
def init(self):
"""
Initialize the document without a database
"""
self = super(PackageDatabase, self).init()
if self is None: return None
self.pimp = None
self._packages = []
return self
def initWithContentsOfFile_ofType_(self, path, type):
"""
Open a local database.
"""
self = self.init()
if self is None: return self
url = NSURL.fileURLWithPath_(path)
self.openDB(url.absoluteString())
return self
def __del__(self):
""" Clean up after ourselves """
if hasattr(self, 'timer'):
self.timer.invalidate()
del self.timer
def close(self):
if hasattr(self, 'timer'):
self.timer.invalidate()
del self.timer
super(PackageDatabase, self).close()
def setDB(self, pimpURL, pimpDB):
self.pimp = pimpDB
self._packages = pimpDB.list()
self._prerequisites = []
if self.databaseName is not None:
self.databaseName.setStringValue_(DB_DESCRIPTION(self.pimp))
self.databaseMaintainer.setStringValue_(DB_MAINTAINER(self.pimp))
if self.packageTable is not None:
self.packageTable.reloadData()
self.tableViewSelectionDidChange_(None)
self.setFileName_(pimpURL)
self.pimpURL = pimpURL
if hasattr(self, 'timer'):
self.timer.invalidate()
self.timer = NSTimer.scheduledTimerWithTimeInterval_target_selector_userInfo_repeats_(
10.0,
self,
self.checkUpdates_,
None,
True)
def openDB(self, dbUrl=None):
"""
Open a database at the specified URL
"""
prefs = pimp.PimpPreferences()
if dbUrl is not None:
prefs.pimpDatabase = dbUrl
else:
prefs.pimpDatabase = pimp.DEFAULT_PIMPDATABASE
db = pimp.PimpDatabase(prefs)
db.appendURL(prefs.pimpDatabase)
self.setDB(dbUrl, db)
def checkUpdates_(self, sender):
"""
Refresh the package information, the user may have installed or
removed a package. This method is called once in a while using a timer.
"""
if self.packageTable is None: return
self.sortPackages()
self.packageTable.reloadData()
def windowNibName(self):
""" Return the name of the document NIB """
return 'PackageDatabase'
def displayName(self):
""" Return the document name for inside the window title """
if self.pimp is None:
return "Untitled"
return DB_URL(self.pimp)
def awakeFromNib(self):
"""
Initialize the GUI now that the NIB has been loaded.
"""
if self.pimp is not None:
self.databaseName.setStringValue_(DB_DESCRIPTION(self.pimp))
self.databaseMaintainer.setStringValue_(DB_MAINTAINER(self.pimp))
else:
self.databaseName.setStringValue_("")
self.databaseMaintainer.setStringValue_("")
self.setBoolFromDefaults(self.verbose, 'verbose')
self.setBoolFromDefaults(
self.installDependencies, 'installDependencies')
self.setBoolFromDefaults(self.showHidden, 'showHidden')
self.setBoolFromDefaults(self.overwrite, 'forceInstallation')
b = NSUserDefaults.standardUserDefaults(
).boolForKey_('installSystemWide')
if b:
self.installationLocation.setState_atRow_column_(NSOnState, 0, 0)
else:
self.installationLocation.setState_atRow_column_(NSOnState, 1, 0)
self.sortPackages()
def setBoolFromDefaults(self, field, name):
defaults = NSUserDefaults.standardUserDefaults()
b = defaults.boolForKey_(name)
if b:
field.setState_(NSOnState)
else:
field.setState_(NSOffState)
def saveBoolToDefaults(self, field, name):
defaults = NSUserDefaults.standardUserDefaults()
defaults.setBool_forKey_(field.state() == NSOnState, name)
defaults.synchronize()
@objc.IBAction
def savePreferences_(self, sender):
self.saveBoolToDefaults(self.verbose, 'verbose')
self.saveBoolToDefaults(self.installDependencies, 'installDependencies')
self.saveBoolToDefaults(self.showHidden, 'showHidden')
self.saveBoolToDefaults(self.overwrite, 'forceInstallation')
self.saveBoolToDefaults(
self.installationLocation.cellAtRow_column_(0, 0),
'installSystemWide')
def packages(self):
return self._packages
def selectedPackage(self):
row = self.packageTable.selectedRow()
if row == -1: return None
return self._packages[row]
def tableViewSelectionDidChange_(self, obj):
"""
Update the detail view
"""
package = self.selectedPackage()
if package is None:
# No selected package, clear the detail view
setString(self.itemHome, None)
setString(self.itemStatus, None)
setString(self.itemInstalled, None)
self.itemDescription.setString_("")
self.installButton.setEnabled_(False)
self._prerequisites = []
self.prerequisitesTable.reloadData()
else:
# Update the detail view
setString(self.itemHome, package.homepage())
# XXX: Could we use ReST for the the description?
# Recognizing and 'activating' URL's would be fairly easy.
self.itemDescription.setString_(
package.description()
)
status, msg = package.installed()
setString(self.itemInstalled, status)
setString(self.itemStatus, msg)
self.installButton.setEnabled_(True)
self._prerequisites = package.prerequisites()
# XXX: Add the closure of all dependencies
self.prerequisitesTable.reloadData()
@objc.IBAction
def addToFavorites_(self, sender):
appdel = NSApplication.sharedApplication().delegate()
appdel.addFavorite(self.pimp._description, self.pimp._urllist[0])
#
# NSTableDataSource implementation, for the package list
#
def numberOfRowsInTableView_(self, view):
if not hasattr(self, 'pimp') or self.pimp is None:
return 0
if view is self.packageTable:
return len(self._packages)
else:
return len(self._prerequisites)
def tableView_objectValueForTableColumn_row_(self, view, col, row):
colname = col.identifier()
if view is self.packageTable:
package = self._packages[row]
shortdescription = None
else:
package, shortdescription = self._prerequisites[row]
if colname == 'installed':
# XXX: Nicer formatting
return getattr(package, colname)()[0]
return getattr(package, colname)()
def tableView_sortDescriptorsDidChange_(self, view, oldDescriptors):
if view is self.packageTable:
self.sortPackages()
def sortPackages(self):
"""
Sort the package list in the order wished for by the user.
"""
if self.pimp is None:
return
if self.packageTable is None:
return
sortInfo = [
(item.key(), item.ascending(), item.selector())
for item in self.packageTable.sortDescriptors()
]
if self.showHidden.state() == NSOnState:
self._packages = self.pimp.list()[:]
else:
self._packages = [ pkg
for pkg in self.pimp.list() if not PKG_HIDDEN(pkg) ]
if not sortInfo:
self.packageTable.reloadData()
self.tableViewSelectionDidChange_(None)
return
def cmpBySortInfo(l, r):
for key, ascending, meth in sortInfo:
if key == 'installed':
l_val = getattr(l, key)()[0]
r_val = getattr(r, key)()[0]
else:
l_val = getattr(l, key)()
r_val = getattr(r, key)()
if meth == 'compare:':
res = cmp(l_val, r_val)
else:
if isinstance(l_val, objc.pyobjc_unicode):
l_val = l_val.nsstring()
elif isinstance(l_val, (unicode, str)):
l_val = NSString.stringWithString_(l_val).nsstring()
res = getattr(l_val, meth)(r_val)
if not ascending:
res = -res
if res != 0:
return res
return 0
self._packages.sort(cmpBySortInfo)
self.packageTable.reloadData()
@objc.IBAction
def filterPackages_(self, sender):
"""
GUI action that is triggered when one of the view options
changes
"""
self.sortPackages()
@objc.IBAction
def visitHome_(self, sender):
"""
Open the homepage of the currently selected package in the
default webbrowser.
"""
package = self.selectedPackage()
if package is None:
return
home = package.homepage()
if home is None:
return
try:
webbrowser.open(home)
except Exception, msg:
NSBeginAlertSheet(
'Opening homepage failed',
'OK', None, None, self.windowForSheet(), None, None, None,
0, 'Could not open homepage: %s'%(msg,))
@objc.IBAction
def installPackage_(self, sender):
"""
Install the currently selected package
"""
package = self.selectedPackage()
if package is None: return
force = self.overwrite.state() == NSOnState
recursive = self.installDependencies.state() == NSOnState
pimpInstaller = pimp.PimpInstaller(self.pimp)
lst, messages = pimpInstaller.prepareInstall(package, force, recursive)
if messages:
NSBeginAlertSheet(
'Cannot install packages',
'OK', None, None,
self.windowForSheet(), None, None, None, 0,
'\n'.join(messages))
return
app = NSApplication.sharedApplication()
self.installationTitle.setStringValue_(
'Installing: %s ...'%(package.shortdescription(),))
self.installationProgress.setHidden_(False)
self.installationProgress.startAnimation_(self)
self.progressOK.setEnabled_(False)
ts = self.installationLog.textStorage()
ts.deleteCharactersInRange_((0, ts.length()))
app.beginSheet_modalForWindow_modalDelegate_didEndSelector_contextInfo_(
self.installationPanel,
self.windowForSheet(),
None, None, 0)
# I'm not sure if this accidental or not, but prepareInstall() returns
# a list of package in the order that they should be installed in,
# and install() installs them in the reverse order :-(
# XXX: This seems to be a bug in pimp.
self.runner = InstallerThread(
self,
pimpInstaller,
lst[::-1],
self.verbose.state() == NSOnState,
self.installationLog.textStorage()
)
self.runner.start()
@objc.IBAction
def closeProgress_(self, sender):
"""
Close the installation progress sheet
"""
self.installationPanel.close()
NSApplication.sharedApplication().endSheet_(self.installationPanel)
@objc.IBAction
def installationDone_(self, sender):
"""
The installer thread is ready, close the sheet.
"""
self.progressOK.setEnabled_(True)
self.installationProgress.setHidden_(False)
self.installationProgress.stopAnimation_(self)
messages = self.runner.result
if messages:
ts = self.installationLog.textStorage()
ts.appendAttributedString_(
NSAttributedString.alloc().initWithString_attributes_(
'\n\nCannot install packages\n\n',
{
NSFontAttributeName: NSFont.boldSystemFontOfSize_(12),
}
))
ts.appendAttributedString_(
NSAttributedString.alloc().initWithString_(
'\n'.join(messages) + '\n'))
self.packageTable.reloadData()
self.tableViewSelectionDidChange_(None)
class DownloadThread (threading.Thread):
"""
Thread for downloading a PackageManager database.
This is used by the application delegate to open databases.
"""
daemon_thread = True
def __init__(self, master, document, url):
"""
Initialize the thread.
master - NSObject implementing dbReceived: and dbProblem:
document - An PackageDatabase
url - The PackMan URL
"""
threading.Thread.__init__(self)
self.master = master
self.document = document
self.url = url
def run(self):
"""
Run the thread. This creates a new pimp.PimpDatabase, tells it to
download our database and then forwards the database to the
master. The last step is done on the main thread because of Cocoa
threading issues.
"""
pool = NSAutoreleasePool.alloc().init()
try:
prefs = pimp.PimpPreferences()
if self.url is not None:
prefs.pimpDatabase = self.url
else:
prefs.pimpDatabase = pimp.DEFAULT_PIMPDATABASE
db = pimp.PimpDatabase(prefs)
db.appendURL(prefs.pimpDatabase)
self.master.performSelectorOnMainThread_withObject_waitUntilDone_(
'dbReceived:', (self.document, self.url, db), False)
except:
self.master.performSelectorOnMainThread_withObject_waitUntilDone_(
'dbProblem:', (self.document, self.url, sys.exc_info()), False)
del pool
class InstallerThread (threading.Thread):
"""
A thread for installing packages.
Like downloading a database, installing (and downloading!) packages is
a time-consuming task that is better done on a seperate thread.
"""
daemon_thread = True
def __init__(self, document, installer, packages, verbose, textStorage):
threading.Thread.__init__(self)
self.document = document
self.installer = installer
self.packages = packages
self.verbose = verbose
self.textStorage = textStorage
self.result = None
def write(self, data):
self.textStorage.performSelectorOnMainThread_withObject_waitUntilDone_(
'appendAttributedString:',
NSAttributedString.alloc().initWithString_(data),
False)
def run(self):
pool = NSAutoreleasePool.alloc().init()
if self.verbose:
result = self.installer.install(self.packages, self)
else:
result = self.installer.install(self.packages, None)
self.write('\nDone.\n')
self.document.performSelectorOnMainThread_withObject_waitUntilDone_(
'installationDone:', None, False)
del pool
class URLOpener (NSObject):
"""
Model/controller for the 'File/Open URL...' panel
"""
okButton = objc.IBOutlet
urlField = objc.IBOutlet()
def __del__(self):
# XXX: I'm doing something wrong, this function is never called!
print "del URLOpener %#x"%(id(self),)
def awakeFromNib(self):
self.urlField.window().makeKeyAndOrderFront_(None)
@objc.IBAction
def doOpenURL_(self, sender):
url = self.urlField.stringValue()
if not url:
return
# Ask the application delegate to open the selected database
NSApplication.sharedApplication().delegate().openDatabase(url)
@objc.IBAction
def controlTextDidChange_(self, sender):
"""
The value of the URL input field changed, enable the OK button
if there is input, disable it otherwise.
"""
if self.urlField.stringValue() != "":
self.okButton.setEnabled_(True)
else:
self.okButton.setEnabled_(False)
class PackageManager (NSObject):
"""
Application controller: application-level callbacks and actions
"""
favoritesPanel = objc.IBOutlet()
favoritesTable = objc.IBOutlet()
favoritesTitle = objc.IBOutlet()
favoritesURL = objc.IBOutlet()
#
# Standard actions
#
def awakeFromNib(self):
"""
We've been restored from the NIB
"""
self.loadFavorites()
#
# Working with favorites
#
# The favorites are stored in the user defaults for the application.
def loadFavorites(self):
"""
Load our favorite database
"""
self.favorites = NSUserDefaults.standardUserDefaults().arrayForKey_(
'favorites')
if self.favorites is None:
self.favorites = []
else:
self.favorites = list(self.favorites)
def saveFavorites(self):
"""
Save the favorites database, must be called whenever self.favorites
is changed.
"""
defaults = NSUserDefaults.standardUserDefaults()
defaults.setObject_forKey_(
self.favorites,
'favorites')
defaults.synchronize()
def addFavorite(self, title, url):
"""
Add a new favorite, and save the database
"""
self.favorites.append({'title':title, 'URL':url})
self.favoritesTable.reloadData()
self.saveFavorites()
def menuNeedsUpdate_(self, menu):
"""
We're the delegate for the Favorites menu
Update the menu: it should list the entries in the favorites database.
"""
menuLen = menu.numberOfItems()
# Remove old items
for i in range(menuLen-1, 2, -1):
menu.removeItemAtIndex_(i)
# Insert new ones
for item in self.favorites:
title = item['title']
url = item['URL']
mi = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(
title, self.openFavorite_, "")
mi.setTarget_(self)
mi.setRepresentedObject_(item)
menu.addItem_(mi)
def tableViewSelectionDidChange_(self, obj):
"""
We're the delegate (and datasource) for the favorites list in the
edit pane for the favorites.
Update the input fields to show the current item.
"""
row = self.favoritesTable.selectedRow()
if row == -1:
self.favoritesTitle.setStringValue_('')
self.favoritesURL.setStringValue_('')
self.favoritesTitle.setEnabled_(False)
self.favoritesURL.setEnabled_(False)
else:
self.favoritesTitle.setStringValue_(self.favorites[row]['title'])
self.favoritesURL.setStringValue_(self.favorites[row]['URL'])
self.favoritesTitle.setEnabled_(True)
self.favoritesURL.setEnabled_(True)
def numberOfRowsInTableView_(self, view):
"""
We're the datasource for the favorites list in the Favorites panel
"""
if not hasattr(self, 'favorites'):
return 0
return len(self.favorites)
def tableView_objectValueForTableColumn_row_(self, view, col, row):
"""
We're the datasource for the favorites list in the Favorites panel
"""
return self.favorites[row]['title']
@objc.IBAction
def changeFavoritesTitle_(self, sender):
"""
Update the title of the currently selected favorite item
"""
row = self.favoritesTable.selectedRow()
if row == -1:
return
self.favorites[row]['title'] = self.favoritesTitle.stringValue()
self.saveFavorites()
self.favoritesTable.reloadData()
@objc.IBAction
def changeFavoritesUrl_(self, sender):
"""
Update the URL of the currently selected favorite item
"""
row = self.favoritesTable.selectedRow()
if row == -1:
return
self.favorites[row]['URL'] = self.favoritesURL.stringValue()
self.saveFavorites()
self.favoritesTable.reloadData()
@objc.IBAction
def openFavorite_(self, sender):
"""
Open a favorite database (action for entries in the Favorites menu)
"""
self.openDatabase(sender.representedObject()['URL'])
#
# Global actions/callbacks
#
def openDatabase(self, url):
"""
Create a new NSDocument for the database at the specified URL.
"""
doc = NSDocumentController.sharedDocumentController(
).openUntitledDocumentOfType_display_(DB_FILE_TYPE, False)
try:
downloader = DownloadThread(self, doc, url)
downloader.start()
except:
doc.close()
raise
def dbReceived_(self, (doc, url, db)):
doc.setDB(url, db)
doc.showWindows()
def dbProblem_(self, (doc, url, exc_info)):
NSRunAlertPanel(
"Cannot open database",
"Opening database at %s failed: %s"%(url, exc_info[1]),
"OK", None, None)
doc.close()
@objc.IBAction
def openURL_(self, sender):
"""
The user wants to open a package URL, show the user-interface.
"""
res = NSBundle.loadNibNamed_owner_('OpenPanel', self)
@objc.IBAction
def openStandardDatabase_(self, sender):
"""
Open the standard database.
"""
self.openDatabase(pimp.DEFAULT_PIMPDATABASE)
def applicationShouldOpenUntitledFile_(self, app):
"""
The default window is not an untitled window, but the default
database
"""
return False
def applicationDidFinishLaunching_(self, app):
"""
The application finished launching, show the default database.
"""
# XXX: We shouldn't open the standard database if the user explicitly
# opened another one!
self.openStandardDatabase_(None)
#
# Set some sensible defaults
#
NSUserDefaults.standardUserDefaults().registerDefaults_(
{
'verbose': True,
'installDependencies': True,
'showHidden': False,
'forceInstallation': False,
'installSystemWide': True,
})
#
# A nasty hack. For some reason sys.prefix is /usr/bin/../../System/..., while
# it is /System/... in Jack's PackageManager.app. At least one package
# manager database relies on sys.prefix being /System/... (Bob's additional
# packages).
#
import os
sys.prefix = os.path.abspath(sys.prefix)
AppHelper.runEventLoop()
|
models | plugin_auth_token | import binascii
from hmac import compare_digest
from typing import Tuple
from apps.auth_token import constants
from apps.auth_token.crypto import (
generate_plugin_token_string,
generate_plugin_token_string_and_salt,
hash_token_string,
)
from apps.auth_token.exceptions import InvalidToken
from apps.auth_token.models import BaseAuthToken
from apps.user_management.models import Organization
from django.db import models
class PluginAuthToken(BaseAuthToken):
objects: models.Manager["PluginAuthToken"]
salt = models.CharField(max_length=constants.AUTH_TOKEN_CHARACTER_LENGTH, null=True)
organization = models.ForeignKey(
to=Organization,
on_delete=models.CASCADE,
related_name="plugin_auth_tokens",
)
@classmethod
def create_auth_token(
cls, organization: Organization
) -> Tuple["PluginAuthToken", str]:
old_token = cls.objects.filter(organization=organization)
if old_token.exists():
old_token.delete()
token_string, salt = generate_plugin_token_string_and_salt(
organization.stack_id, organization.org_id
)
digest = hash_token_string(token_string)
auth_token = cls.objects.create(
token_key=token_string[: constants.TOKEN_KEY_LENGTH],
digest=digest,
salt=salt,
organization=organization,
)
return auth_token, token_string
@classmethod
def validate_token_string(cls, token: str, *args, **kwargs) -> "PluginAuthToken":
context = kwargs["context"]
for auth_token in cls.objects.filter(
token_key=token[: constants.TOKEN_KEY_LENGTH]
):
try:
stack_id = int(context["stack_id"])
org_id = int(context["org_id"])
salt = binascii.unhexlify(auth_token.salt)
recreated_token = generate_plugin_token_string(salt, stack_id, org_id)
digest = hash_token_string(recreated_token)
except (TypeError, binascii.Error):
raise InvalidToken
if compare_digest(digest, auth_token.digest) and token == recreated_token:
return auth_token
raise InvalidToken
|
comictaggerlib | taggerwindow | # coding=utf-8
"""The main window of the ComicTagger app"""
# Copyright 2012-2014 Anthony Beville
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import locale
import os
import pickle
import platform
import pprint
import re
import sys
import webbrowser
import ctversion
import utils
from autotagmatchwindow import AutoTagMatchWindow
from autotagprogresswindow import AutoTagProgressWindow
from autotagstartwindow import AutoTagStartWindow
from cbltransformer import CBLTransformer
from comicarchive import MetaDataStyle
from comicinfoxml import ComicInfoXml
from comictaggerlib.ui.qtutils import (centerWindowOnParent,
reduceWidgetFontSize)
from comicvinetalker import ComicVineTalker, ComicVineTalkerException
from coverimagewidget import CoverImageWidget
from crediteditorwindow import CreditEditorWindow
from exportwindow import ExportConflictOpts, ExportWindow
from filenameparser import FileNameParser
from fileselectionlist import FileSelectionList
from genericmetadata import GenericMetadata
from issueidentifier import IssueIdentifier
from logwindow import LogWindow
from optionalmsgdialog import OptionalMessageDialog
from pagebrowser import PageBrowserWindow
from pagelisteditor import PageListEditor
from PyQt4 import QtCore, QtGui, QtNetwork, uic
from renamewindow import RenameWindow
from settings import ComicTaggerSettings
from settingswindow import SettingsWindow
from versionchecker import VersionChecker
#from comicarchive import ComicArchive
#from pageloader import PageLoader
from volumeselectionwindow import VolumeSelectionWindow
#import signal
#from PyQt4.QtCore import QUrl, pyqtSignal
class OnlineMatchResults():
def __init__(self):
self.goodMatches = []
self.noMatches = []
self.multipleMatches = []
self.lowConfidenceMatches = []
self.writeFailures = []
self.fetchDataFailures = []
class MultipleMatch():
def __init__(self, ca, match_list):
self.ca = ca
self.matches = match_list
class TaggerWindow(QtGui.QMainWindow):
appName = "ComicTagger"
version = ctversion.version
def __init__(self, file_list, settings, parent=None, opts=None):
super(TaggerWindow, self).__init__(parent)
uic.loadUi(ComicTaggerSettings.getUIFile('taggerwindow.ui'), self)
self.settings = settings
#----------------------------------
# prevent multiple instances
socket = QtNetwork.QLocalSocket(self)
socket.connectToServer(settings.install_id)
alive = socket.waitForConnected(3000)
if alive:
print(
"Another application with key [{}] is already running".format(
settings.install_id))
# send file list to other instance
if len(file_list) > 0:
socket.write(pickle.dumps(file_list))
if not socket.waitForBytesWritten(3000):
print(socket.errorString().toLatin1())
socket.disconnectFromServer()
sys.exit()
else:
# listen on a socket to prevent multiple instances
self.socketServer = QtNetwork.QLocalServer(self)
self.socketServer.newConnection.connect(
self.onIncomingSocketConnection)
ok = self.socketServer.listen(settings.install_id)
if not ok:
if self.socketServer.serverError(
) == QtNetwork.QAbstractSocket.AddressInUseError:
#print("Resetting unresponsive socket with key [{}]".format(settings.install_id))
self.socketServer.removeServer(settings.install_id)
ok = self.socketServer.listen(settings.install_id)
if not ok:
print(
"Cannot start local socket with key [{}]. Reason: %s ".format(
settings.install_id,
self.socketServer.errorString()))
sys.exit()
#print("Registering as single instance with key [{}]".format(settings.install_id))
#----------------------------------
self.archiveCoverWidget = CoverImageWidget(
self.coverImageContainer, CoverImageWidget.ArchiveMode)
gridlayout = QtGui.QGridLayout(self.coverImageContainer)
gridlayout.addWidget(self.archiveCoverWidget)
gridlayout.setContentsMargins(0, 0, 0, 0)
self.pageListEditor = PageListEditor(self.tabPages)
gridlayout = QtGui.QGridLayout(self.tabPages)
gridlayout.addWidget(self.pageListEditor)
#---------------------------
self.fileSelectionList = FileSelectionList(
self.widgetListHolder, self.settings)
gridlayout = QtGui.QGridLayout(self.widgetListHolder)
gridlayout.addWidget(self.fileSelectionList)
self.fileSelectionList.selectionChanged.connect(
self.fileListSelectionChanged)
self.fileSelectionList.listCleared.connect(self.fileListCleared)
self.fileSelectionList.setSorting(
self.settings.last_filelist_sorted_column,
self.settings.last_filelist_sorted_order)
# we can't specify relative font sizes in the UI designer, so
# walk through all the lablels in the main form, and make them
# a smidge smaller
for child in self.scrollAreaWidgetContents.children():
if (isinstance(child, QtGui.QLabel)):
f = child.font()
if f.pointSize() > 10:
f.setPointSize(f.pointSize() - 2)
f.setItalic(True)
child.setFont(f)
self.scrollAreaWidgetContents.adjustSize()
self.setWindowIcon(
QtGui.QIcon(ComicTaggerSettings.getGraphic('app.png')))
if opts is not None and opts.data_style is not None and opts.data_style != MetaDataStyle.COMET:
# respect the command line option tag type
settings.last_selected_save_data_style = opts.data_style
settings.last_selected_load_data_style = opts.data_style
self.save_data_style = settings.last_selected_save_data_style
self.load_data_style = settings.last_selected_load_data_style
self.setAcceptDrops(True)
self.configMenus()
self.statusBar()
self.populateComboBoxes()
self.page_browser = None
self.resetApp()
# set up some basic field validators
validator = QtGui.QIntValidator(1900, 2099, self)
self.lePubYear.setValidator(validator)
validator = QtGui.QIntValidator(1, 12, self)
self.lePubMonth.setValidator(validator)
# TODO: for now keep it simple, ideally we should check the full date
validator = QtGui.QIntValidator(1, 31, self)
self.lePubDay.setValidator(validator)
validator = QtGui.QIntValidator(1, 99999, self)
self.leIssueCount.setValidator(validator)
self.leVolumeNum.setValidator(validator)
self.leVolumeCount.setValidator(validator)
self.leAltIssueNum.setValidator(validator)
self.leAltIssueCount.setValidator(validator)
# TODO set up an RE validator for issueNum that allows
# for all sorts of wacky things
# tweak some control fonts
reduceWidgetFontSize(self.lblFilename, 1)
reduceWidgetFontSize(self.lblArchiveType)
reduceWidgetFontSize(self.lblTagList)
reduceWidgetFontSize(self.lblPageCount)
# make sure some editable comboboxes don't take drop actions
self.cbFormat.lineEdit().setAcceptDrops(False)
self.cbMaturityRating.lineEdit().setAcceptDrops(False)
# hook up the callbacks
self.cbLoadDataStyle.currentIndexChanged.connect(self.setLoadDataStyle)
self.cbSaveDataStyle.currentIndexChanged.connect(self.setSaveDataStyle)
self.btnEditCredit.clicked.connect(self.editCredit)
self.btnAddCredit.clicked.connect(self.addCredit)
self.btnRemoveCredit.clicked.connect(self.removeCredit)
self.twCredits.cellDoubleClicked.connect(self.editCredit)
self.connectDirtyFlagSignals()
self.pageListEditor.modified.connect(self.setDirtyFlag)
self.pageListEditor.firstFrontCoverChanged.connect(
self.frontCoverChanged)
self.pageListEditor.listOrderChanged.connect(self.pageListOrderChanged)
self.tabWidget.currentChanged.connect(self.tabChanged)
self.updateStyleTweaks()
self.show()
self.setAppPosition()
if self.settings.last_form_side_width != -1:
self.splitter.setSizes(
[self.settings.last_form_side_width, self.settings.last_list_side_width])
self.raise_()
QtCore.QCoreApplication.processEvents()
self.resizeEvent(None)
self.splitter.splitterMoved.connect(self.splitterMovedEvent)
self.fileSelectionList.addAppAction(self.actionAutoIdentify)
self.fileSelectionList.addAppAction(self.actionAutoTag)
self.fileSelectionList.addAppAction(self.actionCopyTags)
self.fileSelectionList.addAppAction(self.actionRename)
self.fileSelectionList.addAppAction(self.actionRemoveAuto)
self.fileSelectionList.addAppAction(self.actionRepackage)
if len(file_list) != 0:
self.fileSelectionList.addPathList(file_list)
if self.settings.show_disclaimer:
checked = OptionalMessageDialog.msg(self, "Welcome!",
"""
Thanks for trying ComicTagger!<br><br>
Be aware that this is beta-level software, and consider it experimental.
You should use it very carefully when modifying your data files. As the
license says, it's "AS IS!"<br><br>
Also, be aware that writing tags to comic archives will change their file hashes,
which has implications with respect to other software packages. It's best to
use ComicTagger on local copies of your comics.<br><br>
Have fun!
"""
)
self.settings.show_disclaimer = not checked
if self.settings.ask_about_usage_stats:
reply = QtGui.QMessageBox.question(
self,
self.tr("Anonymous Stats"),
self.tr(
"Is it okay if ComicTagger occasionally sends some anonymous usage statistics? Nothing nefarious, "
"just trying to get a better idea of how the app is being used.\n\nThanks for your support!"),
QtGui.QMessageBox.Yes | QtGui.QMessageBox.Default,
QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
self.settings.send_usage_stats = True
self.settings.ask_about_usage_stats = False
if self.settings.check_for_new_version:
self.checkLatestVersionOnline()
def sigint_handler(self, *args):
# defer the actual close in the app loop thread
QtCore.QTimer.singleShot(200, self.close)
def resetApp(self):
self.archiveCoverWidget.clear()
self.comic_archive = None
self.dirtyFlag = False
self.clearForm()
self.pageListEditor.resetPage()
if self.page_browser is not None:
self.page_browser.reset()
self.updateAppTitle()
self.updateMenus()
self.updateInfoBox()
self.droppedFile = None
self.page_loader = None
def updateAppTitle(self):
if self.comic_archive is None:
self.setWindowTitle(self.appName)
else:
mod_str = ""
ro_str = ""
if self.dirtyFlag:
mod_str = " [modified]"
if not self.comic_archive.isWritable():
ro_str = " [read only]"
self.setWindowTitle(
self.appName +
" - " +
self.comic_archive.path +
mod_str +
ro_str)
def configMenus(self):
# File Menu
self.actionExit.setShortcut('Ctrl+Q')
self.actionExit.setStatusTip('Exit application')
self.actionExit.triggered.connect(self.close)
self.actionLoad.setShortcut('Ctrl+O')
self.actionLoad.setStatusTip('Load comic archive')
self.actionLoad.triggered.connect(self.selectFile)
self.actionLoadFolder.setShortcut('Ctrl+Shift+O')
self.actionLoadFolder.setStatusTip('Load folder with comic archives')
self.actionLoadFolder.triggered.connect(self.selectFolder)
self.actionWrite_Tags.setShortcut('Ctrl+S')
self.actionWrite_Tags.setStatusTip('Save tags to comic archive')
self.actionWrite_Tags.triggered.connect(self.commitMetadata)
self.actionAutoTag.setShortcut('Ctrl+T')
self.actionAutoTag.setStatusTip('Auto-tag multiple archives')
self.actionAutoTag.triggered.connect(self.autoTag)
self.actionCopyTags.setShortcut('Ctrl+C')
self.actionCopyTags.setStatusTip('Copy one tag style to another')
self.actionCopyTags.triggered.connect(self.copyTags)
self.actionRemoveAuto.setShortcut('Ctrl+D')
self.actionRemoveAuto.setStatusTip(
'Remove currently selected modify tag style from the archive')
self.actionRemoveAuto.triggered.connect(self.removeAuto)
self.actionRemoveCBLTags.setStatusTip(
'Remove ComicBookLover tags from comic archive')
self.actionRemoveCBLTags.triggered.connect(self.removeCBLTags)
self.actionRemoveCRTags.setStatusTip(
'Remove ComicRack tags from comic archive')
self.actionRemoveCRTags.triggered.connect(self.removeCRTags)
self.actionViewRawCRTags.setStatusTip(
'View raw ComicRack tag block from file')
self.actionViewRawCRTags.triggered.connect(self.viewRawCRTags)
self.actionViewRawCBLTags.setStatusTip(
'View raw ComicBookLover tag block from file')
self.actionViewRawCBLTags.triggered.connect(self.viewRawCBLTags)
self.actionRepackage.setShortcut('Ctrl+E')
self.actionRepackage.setStatusTip('Re-create archive as CBZ')
self.actionRepackage.triggered.connect(self.repackageArchive)
self.actionRename.setShortcut('Ctrl+N')
self.actionRename.setStatusTip('Rename archive based on tags')
self.actionRename.triggered.connect(self.renameArchive)
self.actionSettings.setShortcut('Ctrl+Shift+S')
self.actionSettings.setStatusTip('Configure ComicTagger')
self.actionSettings.triggered.connect(self.showSettings)
# Tag Menu
self.actionParse_Filename.setShortcut('Ctrl+F')
self.actionParse_Filename.setStatusTip(
'Try to extract tags from filename')
self.actionParse_Filename.triggered.connect(self.useFilename)
self.actionSearchOnline.setShortcut('Ctrl+W')
self.actionSearchOnline.setStatusTip('Search online for tags')
self.actionSearchOnline.triggered.connect(self.queryOnline)
self.actionAutoIdentify.setShortcut('Ctrl+I')
self.actionAutoIdentify.triggered.connect(self.autoIdentifySearch)
self.actionApplyCBLTransform.setShortcut('Ctrl+L')
self.actionApplyCBLTransform.setStatusTip(
'Modify tags specifically for CBL format')
self.actionApplyCBLTransform.triggered.connect(self.applyCBLTransform)
self.actionClearEntryForm.setShortcut('Ctrl+Shift+C')
self.actionClearEntryForm.setStatusTip(
'Clear all the data on the screen')
self.actionClearEntryForm.triggered.connect(self.clearForm)
# Window Menu
self.actionPageBrowser.setShortcut('Ctrl+P')
self.actionPageBrowser.setStatusTip('Show the page browser')
self.actionPageBrowser.triggered.connect(self.showPageBrowser)
# Help Menu
self.actionAbout.setStatusTip('Show the ' + self.appName + ' info')
self.actionAbout.triggered.connect(self.aboutApp)
self.actionWiki.triggered.connect(self.showWiki)
self.actionReportBug.triggered.connect(self.reportBug)
self.actionComicTaggerForum.triggered.connect(self.showForum)
# ToolBar
self.actionLoad.setIcon(
QtGui.QIcon(ComicTaggerSettings.getGraphic('open.png')))
self.actionLoadFolder.setIcon(
QtGui.QIcon(ComicTaggerSettings.getGraphic('longbox.png')))
self.actionWrite_Tags.setIcon(
QtGui.QIcon(ComicTaggerSettings.getGraphic('save.png')))
self.actionParse_Filename.setIcon(
QtGui.QIcon(ComicTaggerSettings.getGraphic('parse.png')))
self.actionSearchOnline.setIcon(
QtGui.QIcon(ComicTaggerSettings.getGraphic('search.png')))
self.actionAutoIdentify.setIcon(
QtGui.QIcon(ComicTaggerSettings.getGraphic('auto.png')))
self.actionAutoTag.setIcon(
QtGui.QIcon(ComicTaggerSettings.getGraphic('autotag.png')))
self.actionClearEntryForm.setIcon(
QtGui.QIcon(ComicTaggerSettings.getGraphic('clear.png')))
self.actionPageBrowser.setIcon(
QtGui.QIcon(ComicTaggerSettings.getGraphic('browse.png')))
self.toolBar.addAction(self.actionLoad)
self.toolBar.addAction(self.actionLoadFolder)
self.toolBar.addAction(self.actionWrite_Tags)
self.toolBar.addAction(self.actionSearchOnline)
self.toolBar.addAction(self.actionAutoIdentify)
self.toolBar.addAction(self.actionAutoTag)
self.toolBar.addAction(self.actionClearEntryForm)
self.toolBar.addAction(self.actionPageBrowser)
def repackageArchive(self):
ca_list = self.fileSelectionList.getSelectedArchiveList()
rar_count = 0
for ca in ca_list:
if ca.isRar():
rar_count += 1
if rar_count == 0:
QtGui.QMessageBox.information(
self,
self.tr("Export as Zip Archive"),
self.tr("No RAR archives selected!"))
return
if not self.dirtyFlagVerification(
"Export as Zip Archive",
"If you export archives as Zip now, unsaved data in the form may be lost. Are you sure?"):
return
if rar_count != 0:
dlg = ExportWindow(
self,
self.settings,
self.tr(
"You have selected {0} archive(s) to export to Zip format. New archives will be created in the same folder as the original.\n\nPlease choose options below, and select OK.\n".format(rar_count)))
dlg.adjustSize()
dlg.setModal(True)
if not dlg.exec_():
return
progdialog = QtGui.QProgressDialog(
"", "Cancel", 0, rar_count, self)
progdialog.setWindowTitle("Exporting as ZIP")
progdialog.setWindowModality(QtCore.Qt.ApplicationModal)
progdialog.show()
prog_idx = 0
new_archives_to_add = []
archives_to_remove = []
skipped_list = []
failed_list = []
success_count = 0
for ca in ca_list:
if ca.isRar():
QtCore.QCoreApplication.processEvents()
if progdialog.wasCanceled():
break
progdialog.setValue(prog_idx)
prog_idx += 1
progdialog.setLabelText(ca.path)
centerWindowOnParent(progdialog)
QtCore.QCoreApplication.processEvents()
original_path = os.path.abspath(ca.path)
export_name = os.path.splitext(original_path)[0] + ".cbz"
if os.path.lexists(export_name):
if dlg.fileConflictBehavior == ExportConflictOpts.dontCreate:
export_name = None
skipped_list.append(ca.path)
elif dlg.fileConflictBehavior == ExportConflictOpts.createUnique:
export_name = utils.unique_file(export_name)
if export_name is not None:
if ca.exportAsZip(export_name):
success_count += 1
if dlg.addToList:
new_archives_to_add.append(export_name)
if dlg.deleteOriginal:
archives_to_remove.append(ca)
os.unlink(ca.path)
else:
# last export failed, so remove the zip, if it
# exists
failed_list.append(ca.path)
if os.path.lexists(export_name):
os.remove(export_name)
progdialog.close()
self.fileSelectionList.addPathList(new_archives_to_add)
self.fileSelectionList.removeArchiveList(archives_to_remove)
summary = u"Successfully created {0} Zip archive(s).".format(
success_count)
if len(skipped_list) > 0:
summary += u"\n\nThe following {0} RAR archive(s) were skipped due to file name conflicts:\n".format(
len(skipped_list))
for f in skipped_list:
summary += u"\t{0}\n".format(f)
if len(failed_list) > 0:
summary += u"\n\nThe following {0} RAR archive(s) failed to export due to read/write errors:\n".format(
len(failed_list))
for f in failed_list:
summary += u"\t{0}\n".format(f)
dlg = LogWindow(self)
dlg.setText(summary)
dlg.setWindowTitle("Archive Export to Zip Summary")
dlg.exec_()
def aboutApp(self):
website = "http://code.google.com/p/comictagger"
email = "comictagger@gmail.com"
license_link = "http://www.apache.org/licenses/LICENSE-2.0"
license_name = "Apache License 2.0"
msgBox = QtGui.QMessageBox()
msgBox.setWindowTitle(self.tr("About " + self.appName))
msgBox.setTextFormat(QtCore.Qt.RichText)
msgBox.setIconPixmap(
QtGui.QPixmap(ComicTaggerSettings.getGraphic('about.png')))
msgBox.setText("<br><br><br>" +
self.appName +
" v" +
self.version +
"<br>" +
"©2014 Anthony Beville<br><br>" +
"<a href='{0}'>{0}</a><br><br>".format(website) +
"<a href='mailto:{0}'>{0}</a><br><br>".format(email) +
"License: <a href='{0}'>{1}</a>".format(license_link, license_name))
msgBox.setStandardButtons(QtGui.QMessageBox.Ok)
msgBox.exec_()
def dragEnterEvent(self, event):
self.droppedFiles = None
if event.mimeData().hasUrls():
# walk through the URL list and build a file list
for url in event.mimeData().urls():
if url.isValid() and url.scheme() == "file":
if self.droppedFiles is None:
self.droppedFiles = []
self.droppedFiles.append(url.toLocalFile())
if self.droppedFiles is not None:
event.accept()
def dropEvent(self, event):
# if self.dirtyFlagVerification("Open Archive",
# "If you open a new archive now, data in the form will be lost. Are you sure?"):
self.fileSelectionList.addPathList(self.droppedFiles)
event.accept()
def actualLoadCurrentArchive(self):
if self.metadata.isEmpty:
self.metadata = self.comic_archive.metadataFromFilename(
self.settings.parse_scan_info)
if len(self.metadata.pages) == 0:
self.metadata.setDefaultPageList(
self.comic_archive.getNumberOfPages())
self.updateCoverImage()
if self.page_browser is not None:
self.page_browser.setComicArchive(self.comic_archive)
self.page_browser.metadata = self.metadata
self.metadataToForm()
self.pageListEditor.setData(self.comic_archive, self.metadata.pages)
self.clearDirtyFlag() # also updates the app title
self.updateInfoBox()
self.updateMenus()
self.updateAppTitle()
def updateCoverImage(self):
cover_idx = self.metadata.getCoverPageIndexList()[0]
self.archiveCoverWidget.setArchive(self.comic_archive, cover_idx)
def updateMenus(self):
# First just disable all the questionable items
self.actionAutoTag.setEnabled(False)
self.actionCopyTags.setEnabled(False)
self.actionRemoveAuto.setEnabled(False)
self.actionRemoveCRTags.setEnabled(False)
self.actionRemoveCBLTags.setEnabled(False)
self.actionWrite_Tags.setEnabled(False)
self.actionRepackage.setEnabled(False)
self.actionViewRawCBLTags.setEnabled(False)
self.actionViewRawCRTags.setEnabled(False)
self.actionParse_Filename.setEnabled(False)
self.actionAutoIdentify.setEnabled(False)
self.actionRename.setEnabled(False)
self.actionApplyCBLTransform.setEnabled(False)
# now, selectively re-enable
if self.comic_archive is not None:
has_cix = self.comic_archive.hasCIX()
has_cbi = self.comic_archive.hasCBI()
self.actionParse_Filename.setEnabled(True)
self.actionAutoIdentify.setEnabled(True)
self.actionAutoTag.setEnabled(True)
self.actionRename.setEnabled(True)
self.actionApplyCBLTransform.setEnabled(True)
self.actionRepackage.setEnabled(True)
self.actionRemoveAuto.setEnabled(True)
self.actionRemoveCRTags.setEnabled(True)
self.actionRemoveCBLTags.setEnabled(True)
self.actionCopyTags.setEnabled(True)
if has_cix:
self.actionViewRawCRTags.setEnabled(True)
if has_cbi:
self.actionViewRawCBLTags.setEnabled(True)
if self.comic_archive.isWritable():
self.actionWrite_Tags.setEnabled(True)
def updateInfoBox(self):
ca = self.comic_archive
if ca is None:
self.lblFilename.setText("")
self.lblArchiveType.setText("")
self.lblTagList.setText("")
self.lblPageCount.setText("")
return
filename = os.path.basename(ca.path)
filename = os.path.splitext(filename)[0]
filename = FileNameParser().fixSpaces(filename, False)
self.lblFilename.setText(filename)
if ca.isZip():
self.lblArchiveType.setText("ZIP archive")
elif ca.isRar():
self.lblArchiveType.setText("RAR archive")
elif ca.isFolder():
self.lblArchiveType.setText("Folder archive")
else:
self.lblArchiveType.setText("")
page_count = " ({0} pages)".format(ca.getNumberOfPages())
self.lblPageCount.setText(page_count)
tag_info = ""
if ca.hasCIX():
tag_info = u"• ComicRack tags"
if ca.hasCBI():
if tag_info != "":
tag_info += "\n"
tag_info += u"• ComicBookLover tags"
self.lblTagList.setText(tag_info)
def setDirtyFlag(self, param1=None, param2=None, param3=None):
if not self.dirtyFlag:
self.dirtyFlag = True
self.fileSelectionList.setModifiedFlag(True)
self.updateAppTitle()
def clearDirtyFlag(self):
if self.dirtyFlag:
self.dirtyFlag = False
self.fileSelectionList.setModifiedFlag(False)
self.updateAppTitle()
def connectDirtyFlagSignals(self):
# recursively connect the tab form child slots
self.connectChildDirtyFlagSignals(self.tabWidget)
def connectChildDirtyFlagSignals(self, widget):
if (isinstance(widget, QtGui.QLineEdit)):
widget.textEdited.connect(self.setDirtyFlag)
if (isinstance(widget, QtGui.QTextEdit)):
widget.textChanged.connect(self.setDirtyFlag)
if (isinstance(widget, QtGui.QComboBox)):
widget.currentIndexChanged.connect(self.setDirtyFlag)
if (isinstance(widget, QtGui.QCheckBox)):
widget.stateChanged.connect(self.setDirtyFlag)
# recursive call on chillun
for child in widget.children():
if child != self.pageListEditor:
self.connectChildDirtyFlagSignals(child)
def clearForm(self):
# get a minty fresh metadata object
self.metadata = GenericMetadata()
if self.comic_archive is not None:
self.metadata.setDefaultPageList(
self.comic_archive.getNumberOfPages())
# recursively clear the tab form
self.clearChildren(self.tabWidget)
# clear the dirty flag, since there is nothing in there now to lose
self.clearDirtyFlag()
self.pageListEditor.setData(self.comic_archive, self.metadata.pages)
def clearChildren(self, widget):
if (isinstance(widget, QtGui.QLineEdit) or
isinstance(widget, QtGui.QTextEdit)):
widget.setText("")
if (isinstance(widget, QtGui.QComboBox)):
widget.setCurrentIndex(0)
if (isinstance(widget, QtGui.QCheckBox)):
widget.setChecked(False)
if (isinstance(widget, QtGui.QTableWidget)):
while widget.rowCount() > 0:
widget.removeRow(0)
# recursive call on chillun
for child in widget.children():
self.clearChildren(child)
def metadataToForm(self):
# copy the the metadata object into to the form
# helper func
def assignText(field, value):
if value is not None:
field.setText(unicode(value))
md = self.metadata
assignText(self.leSeries, md.series)
assignText(self.leIssueNum, md.issue)
assignText(self.leIssueCount, md.issueCount)
assignText(self.leVolumeNum, md.volume)
assignText(self.leVolumeCount, md.volumeCount)
assignText(self.leTitle, md.title)
assignText(self.lePublisher, md.publisher)
assignText(self.lePubMonth, md.month)
assignText(self.lePubYear, md.year)
assignText(self.lePubDay, md.day)
assignText(self.leGenre, md.genre)
assignText(self.leImprint, md.imprint)
assignText(self.teComments, md.comments)
assignText(self.teNotes, md.notes)
assignText(self.leCriticalRating, md.criticalRating)
assignText(self.leStoryArc, md.storyArc)
assignText(self.leScanInfo, md.scanInfo)
assignText(self.leSeriesGroup, md.seriesGroup)
assignText(self.leAltSeries, md.alternateSeries)
assignText(self.leAltIssueNum, md.alternateNumber)
assignText(self.leAltIssueCount, md.alternateCount)
assignText(self.leWebLink, md.webLink)
assignText(self.teCharacters, md.characters)
assignText(self.teTeams, md.teams)
assignText(self.teLocations, md.locations)
if md.format is not None and md.format != "":
i = self.cbFormat.findText(md.format)
if i == -1:
self.cbFormat.setEditText(md.format)
else:
self.cbFormat.setCurrentIndex(i)
if md.maturityRating is not None and md.maturityRating != "":
i = self.cbMaturityRating.findText(md.maturityRating)
if i == -1:
self.cbMaturityRating.setEditText(md.maturityRating)
else:
self.cbMaturityRating.setCurrentIndex(i)
if md.language is not None:
i = self.cbLanguage.findData(md.language)
self.cbLanguage.setCurrentIndex(i)
if md.country is not None:
i = self.cbCountry.findText(md.country)
self.cbCountry.setCurrentIndex(i)
if md.manga is not None:
i = self.cbManga.findData(md.manga)
self.cbManga.setCurrentIndex(i)
if md.blackAndWhite is not None and md.blackAndWhite:
self.cbBW.setChecked(True)
assignText(self.teTags, utils.listToString(md.tags))
# !!! Should we clear the credits table or just avoid duplicates?
while self.twCredits.rowCount() > 0:
self.twCredits.removeRow(0)
if md.credits is not None and len(md.credits) != 0:
self.twCredits.setSortingEnabled(False)
row = 0
for credit in md.credits:
# if the role-person pair already exists, just skip adding it
# to the list
if self.isDupeCredit(credit['role'].title(), credit['person']):
continue
self.addNewCreditEntry(
row,
credit['role'].title(),
credit['person'],
(credit['primary'] if 'primary' in credit else False))
row += 1
self.twCredits.setSortingEnabled(True)
self.updateCreditColors()
def addNewCreditEntry(self, row, role, name, primary_flag=False):
self.twCredits.insertRow(row)
item_text = role
item = QtGui.QTableWidgetItem(item_text)
item.setFlags(QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
item.setData(QtCore.Qt.ToolTipRole, item_text)
self.twCredits.setItem(row, 1, item)
item_text = name
item = QtGui.QTableWidgetItem(item_text)
item.setData(QtCore.Qt.ToolTipRole, item_text)
item.setFlags(QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
self.twCredits.setItem(row, 2, item)
item = QtGui.QTableWidgetItem("")
item.setFlags(QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
self.twCredits.setItem(row, 0, item)
self.updateCreditPrimaryFlag(row, primary_flag)
def isDupeCredit(self, role, name):
r = 0
while r < self.twCredits.rowCount():
if (self.twCredits.item(r, 1).text() == role and
self.twCredits.item(r, 2).text() == name):
return True
r = r + 1
return False
def formToMetadata(self):
# helper func
def xlate(data, type_str):
s = u"{0}".format(data).strip()
if s == "":
return None
elif type_str == "str":
return s
else:
return int(s)
# copy the data from the form into the metadata
md = self.metadata
md.series = xlate(self.leSeries.text(), "str")
md.issue = xlate(self.leIssueNum.text(), "str")
md.issueCount = xlate(self.leIssueCount.text(), "int")
md.volume = xlate(self.leVolumeNum.text(), "int")
md.volumeCount = xlate(self.leVolumeCount.text(), "int")
md.title = xlate(self.leTitle.text(), "str")
md.publisher = xlate(self.lePublisher.text(), "str")
md.month = xlate(self.lePubMonth.text(), "int")
md.year = xlate(self.lePubYear.text(), "int")
md.day = xlate(self.lePubDay.text(), "int")
md.genre = xlate(self.leGenre.text(), "str")
md.imprint = xlate(self.leImprint.text(), "str")
md.comments = xlate(self.teComments.toPlainText(), "str")
md.notes = xlate(self.teNotes.toPlainText(), "str")
md.criticalRating = xlate(self.leCriticalRating.text(), "int")
md.maturityRating = xlate(self.cbMaturityRating.currentText(), "str")
md.storyArc = xlate(self.leStoryArc.text(), "str")
md.scanInfo = xlate(self.leScanInfo.text(), "str")
md.seriesGroup = xlate(self.leSeriesGroup.text(), "str")
md.alternateSeries = xlate(self.leAltSeries.text(), "str")
md.alternateNumber = xlate(self.leAltIssueNum.text(), "int")
md.alternateCount = xlate(self.leAltIssueCount.text(), "int")
md.webLink = xlate(self.leWebLink.text(), "str")
md.characters = xlate(self.teCharacters.toPlainText(), "str")
md.teams = xlate(self.teTeams.toPlainText(), "str")
md.locations = xlate(self.teLocations.toPlainText(), "str")
md.format = xlate(self.cbFormat.currentText(), "str")
md.country = xlate(self.cbCountry.currentText(), "str")
langiso = self.cbLanguage.itemData(
self.cbLanguage.currentIndex()).toString()
md.language = xlate(langiso, "str")
manga_code = self.cbManga.itemData(
self.cbManga.currentIndex()).toString()
md.manga = xlate(manga_code, "str")
# Make a list from the coma delimited tags string
tmp = xlate(self.teTags.toPlainText(), "str")
if tmp is not None:
def striplist(l):
return([x.strip() for x in l])
md.tags = striplist(tmp.split(","))
if (self.cbBW.isChecked()):
md.blackAndWhite = True
else:
md.blackAndWhite = False
# get the credits from the table
md.credits = list()
row = 0
while row < self.twCredits.rowCount():
role = u"{0}".format(self.twCredits.item(row, 1).text())
name = u"{0}".format(self.twCredits.item(row, 2).text())
primary_flag = self.twCredits.item(row, 0).text() != ""
md.addCredit(name, role, bool(primary_flag))
row += 1
md.pages = self.pageListEditor.getPageList()
def useFilename(self):
if self.comic_archive is not None:
# copy the form onto metadata object
self.formToMetadata()
new_metadata = self.comic_archive.metadataFromFilename(
self.settings.parse_scan_info)
if new_metadata is not None:
self.metadata.overlay(new_metadata)
self.metadataToForm()
def selectFolder(self):
self.selectFile(folder_mode=True)
def selectFile(self, folder_mode=False):
dialog = QtGui.QFileDialog(self)
if folder_mode:
dialog.setFileMode(QtGui.QFileDialog.Directory)
else:
dialog.setFileMode(QtGui.QFileDialog.ExistingFiles)
if self.settings.last_opened_folder is not None:
dialog.setDirectory(self.settings.last_opened_folder)
# dialog.setFileMode(QtGui.QFileDialog.Directory)
if not folder_mode:
if platform.system() != "Windows" and utils.which("unrar") is None:
archive_filter = "Comic archive files (*.cbz *.zip)"
else:
archive_filter = "Comic archive files (*.cbz *.zip *.cbr *.rar)"
filters = [
archive_filter,
"Any files (*)"
]
dialog.setNameFilters(filters)
if (dialog.exec_()):
fileList = dialog.selectedFiles()
# if self.dirtyFlagVerification("Open Archive",
# "If you open a new archive now, data in the form will be lost. Are you sure?"):
self.fileSelectionList.addPathList(fileList)
def autoIdentifySearch(self):
if self.comic_archive is None:
QtGui.QMessageBox.warning(
self,
self.tr("Automatic Identify Search"),
self.tr("You need to load a comic first!"))
return
self.queryOnline(autoselect=True)
def queryOnline(self, autoselect=False):
issue_number = unicode(self.leIssueNum.text()).strip()
if autoselect and issue_number == "":
QtGui.QMessageBox.information(
self,
"Automatic Identify Search",
"Can't auto-identify without an issue number (yet!)")
return
if unicode(self.leSeries.text()).strip() != "":
series_name = unicode(self.leSeries.text()).strip()
else:
QtGui.QMessageBox.information(
self,
self.tr("Online Search"),
self.tr("Need to enter a series name to search."))
return
year = str(self.lePubYear.text()).strip()
if year == "":
year = None
issue_count = str(self.leIssueCount.text()).strip()
if issue_count == "":
issue_count = None
cover_index_list = self.metadata.getCoverPageIndexList()
selector = VolumeSelectionWindow(
self,
series_name,
issue_number,
year,
issue_count,
cover_index_list,
self.comic_archive,
self.settings,
autoselect)
title = "Search: '" + series_name + "' - "
selector.setWindowTitle(title + "Select Series")
selector.setModal(True)
selector.exec_()
if selector.result():
# we should now have a volume ID
QtGui.QApplication.setOverrideCursor(
QtGui.QCursor(QtCore.Qt.WaitCursor))
# copy the form onto metadata object
self.formToMetadata()
try:
comicVine = ComicVineTalker()
new_metadata = comicVine.fetchIssueData(
selector.volume_id, selector.issue_number, self.settings)
except ComicVineTalkerException as e:
QtGui.QApplication.restoreOverrideCursor()
if e.code == ComicVineTalkerException.RateLimit:
QtGui.QMessageBox.critical(
self,
self.tr("Comic Vine Error"),
ComicVineTalker.getRateLimitMessage())
else:
QtGui.QMessageBox.critical(
self,
self.tr("Network Issue"),
self.tr("Could not connect to Comic Vine to get issue details.!"))
else:
QtGui.QApplication.restoreOverrideCursor()
if new_metadata is not None:
if self.settings.apply_cbl_transform_on_cv_import:
new_metadata = CBLTransformer(
new_metadata, self.settings).apply()
if self.settings.clear_form_before_populating_from_cv:
self.clearForm()
self.metadata.overlay(new_metadata)
# Now push the new combined data into the edit controls
self.metadataToForm()
else:
QtGui.QMessageBox.critical(
self, self.tr("Search"), self.tr(
"Could not find an issue {0} for that series".format(
selector.issue_number)))
def commitMetadata(self):
if (self.metadata is not None and self.comic_archive is not None):
reply = QtGui.QMessageBox.question(
self,
self.tr("Save Tags"),
self.tr(
"Are you sure you wish to save " +
MetaDataStyle.name[
self.save_data_style] +
" tags to this archive?"),
QtGui.QMessageBox.Yes,
QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
QtGui.QApplication.setOverrideCursor(
QtGui.QCursor(QtCore.Qt.WaitCursor))
self.formToMetadata()
success = self.comic_archive.writeMetadata(
self.metadata, self.save_data_style)
self.comic_archive.loadCache(
[MetaDataStyle.CBI, MetaDataStyle.CIX])
QtGui.QApplication.restoreOverrideCursor()
if not success:
QtGui.QMessageBox.warning(
self,
self.tr("Save failed"),
self.tr("The tag save operation seemed to fail!"))
else:
self.clearDirtyFlag()
self.updateInfoBox()
self.updateMenus()
#QtGui.QMessageBox.information(self, self.tr("Yeah!"), self.tr("File written."))
self.fileSelectionList.updateCurrentRow()
else:
QtGui.QMessageBox.information(
self, self.tr("Whoops!"), self.tr("No data to commit!"))
def setLoadDataStyle(self, s):
if self.dirtyFlagVerification(
"Change Tag Read Style",
"If you change read tag style now, data in the form will be lost. Are you sure?"):
self.load_data_style, b = self.cbLoadDataStyle.itemData(s).toInt()
self.settings.last_selected_load_data_style = self.load_data_style
self.updateMenus()
if self.comic_archive is not None:
self.loadArchive(self.comic_archive)
else:
self.cbLoadDataStyle.currentIndexChanged.disconnect(
self.setLoadDataStyle)
self.adjustLoadStyleCombo()
self.cbLoadDataStyle.currentIndexChanged.connect(
self.setLoadDataStyle)
def setSaveDataStyle(self, s):
self.save_data_style, b = self.cbSaveDataStyle.itemData(s).toInt()
self.settings.last_selected_save_data_style = self.save_data_style
self.updateStyleTweaks()
self.updateMenus()
def updateCreditColors(self):
inactive_color = QtGui.QColor(255, 170, 150)
active_palette = self.leSeries.palette()
active_color = active_palette.color(QtGui.QPalette.Base)
cix_credits = ComicInfoXml().getParseableCredits()
if self.save_data_style == MetaDataStyle.CIX:
# loop over credit table, mark selected rows
r = 0
while r < self.twCredits.rowCount():
if str(self.twCredits.item(r, 1).text()
).lower() not in cix_credits:
self.twCredits.item(
r, 1).setBackgroundColor(inactive_color)
else:
self.twCredits.item(r, 1).setBackgroundColor(active_color)
# turn off entire primary column
self.twCredits.item(r, 0).setBackgroundColor(inactive_color)
r = r + 1
if self.save_data_style == MetaDataStyle.CBI:
# loop over credit table, make all active color
r = 0
while r < self.twCredits.rowCount():
self.twCredits.item(r, 0).setBackgroundColor(active_color)
self.twCredits.item(r, 1).setBackgroundColor(active_color)
r = r + 1
def updateStyleTweaks(self):
# depending on the current data style, certain fields are disabled
inactive_color = QtGui.QColor(255, 170, 150)
active_palette = self.leSeries.palette()
inactive_palette1 = self.leSeries.palette()
inactive_palette1.setColor(QtGui.QPalette.Base, inactive_color)
inactive_palette2 = self.leSeries.palette()
inactive_palette3 = self.leSeries.palette()
inactive_palette3.setColor(QtGui.QPalette.Base, inactive_color)
inactive_palette3.setColor(QtGui.QPalette.Base, inactive_color)
# helper func
def enableWidget(item, enable):
inactive_palette3.setColor(item.backgroundRole(), inactive_color)
inactive_palette2.setColor(item.backgroundRole(), inactive_color)
inactive_palette3.setColor(item.foregroundRole(), inactive_color)
if enable:
item.setPalette(active_palette)
item.setAutoFillBackground(False)
if isinstance(item, QtGui.QCheckBox):
item.setEnabled(True)
elif isinstance(item, QtGui.QComboBox):
item.setEnabled(True)
else:
item.setReadOnly(False)
else:
item.setAutoFillBackground(True)
if isinstance(item, QtGui.QCheckBox):
item.setPalette(inactive_palette2)
item.setEnabled(False)
elif isinstance(item, QtGui.QComboBox):
item.setPalette(inactive_palette3)
item.setEnabled(False)
else:
item.setReadOnly(True)
item.setPalette(inactive_palette1)
cbi_only = [self.leVolumeCount, self.cbCountry,
self.leCriticalRating, self.teTags]
cix_only = [
self.leImprint, self.teNotes, self.cbBW, self.cbManga,
self.leStoryArc, self.leScanInfo, self.leSeriesGroup,
self.leAltSeries, self.leAltIssueNum, self.leAltIssueCount,
self.leWebLink, self.teCharacters, self.teTeams,
self.teLocations, self.cbMaturityRating, self.cbFormat
]
if self.save_data_style == MetaDataStyle.CIX:
for item in cix_only:
enableWidget(item, True)
for item in cbi_only:
enableWidget(item, False)
if self.save_data_style == MetaDataStyle.CBI:
for item in cbi_only:
enableWidget(item, True)
for item in cix_only:
enableWidget(item, False)
self.updateCreditColors()
self.pageListEditor.setMetadataStyle(self.save_data_style)
def cellDoubleClicked(self, r, c):
self.editCredit()
def addCredit(self):
self.modifyCredits("add")
def editCredit(self):
if (self.twCredits.currentRow() > -1):
self.modifyCredits("edit")
def updateCreditPrimaryFlag(self, row, primary):
# if we're clearing a flagm do it and quit
if not primary:
self.twCredits.item(row, 0).setText("")
return
# otherwise, we need to check for, and clear, other primaries with same
# role
role = str(self.twCredits.item(row, 1).text())
r = 0
while r < self.twCredits.rowCount():
if (self.twCredits.item(r, 0).text() != "" and str(
self.twCredits.item(r, 1).text()).lower() == role.lower()):
self.twCredits.item(r, 0).setText("")
r = r + 1
# Now set our new primary
self.twCredits.item(row, 0).setText("Yes")
def modifyCredits(self, action):
if action == "edit":
row = self.twCredits.currentRow()
role = self.twCredits.item(row, 1).text()
name = self.twCredits.item(row, 2).text()
primary = self.twCredits.item(row, 0).text() != ""
else:
role = ""
name = ""
primary = False
editor = CreditEditorWindow(
self, CreditEditorWindow.ModeEdit, role, name, primary)
editor.setModal(True)
editor.exec_()
if editor.result():
new_role, new_name, new_primary = editor.getCredits()
if new_name == name and new_role == role and new_primary == primary:
# nothing has changed, just quit
return
# name and role is the same, but primary flag changed
if new_name == name and new_role == role:
self.updateCreditPrimaryFlag(row, new_primary)
return
# check for dupes
ok_to_mod = True
if self.isDupeCredit(new_role, new_name):
# delete the dupe credit from list
reply = QtGui.QMessageBox.question(
self,
self.tr("Duplicate Credit!"),
self.tr(
"This will create a duplicate credit entry. Would you like to merge the entries, or create a duplicate?"),
self.tr("Merge"),
self.tr("Duplicate"))
if reply == 0:
# merge
if action == "edit":
# just remove the row that would be same
self.twCredits.removeRow(row)
# TODO -- need to find the row of the dupe, and
# possible change the primary flag
ok_to_mod = False
if ok_to_mod:
# modify it
if action == "edit":
self.twCredits.item(row, 1).setText(new_role)
self.twCredits.item(row, 2).setText(new_name)
self.updateCreditPrimaryFlag(row, new_primary)
else:
# add new entry
row = self.twCredits.rowCount()
self.addNewCreditEntry(
row, new_role, new_name, new_primary)
self.updateCreditColors()
self.setDirtyFlag()
def removeCredit(self):
row = self.twCredits.currentRow()
if row != -1:
self.twCredits.removeRow(row)
self.setDirtyFlag()
def showSettings(self):
settingswin = SettingsWindow(self, self.settings)
settingswin.setModal(True)
settingswin.exec_()
if settingswin.result():
pass
def setAppPosition(self):
if self.settings.last_main_window_width != 0:
self.move(
self.settings.last_main_window_x,
self.settings.last_main_window_y)
self.resize(
self.settings.last_main_window_width,
self.settings.last_main_window_height)
else:
screen = QtGui.QDesktopWidget().screenGeometry()
size = self.frameGeometry()
self.move((screen.width() - size.width()) / 2,
(screen.height() - size.height()) / 2)
def adjustLoadStyleCombo(self):
# select the current style
if (self.load_data_style == MetaDataStyle.CBI):
self.cbLoadDataStyle.setCurrentIndex(0)
elif (self.load_data_style == MetaDataStyle.CIX):
self.cbLoadDataStyle.setCurrentIndex(1)
def adjustSaveStyleCombo(self):
# select the current style
if (self.save_data_style == MetaDataStyle.CBI):
self.cbSaveDataStyle.setCurrentIndex(0)
elif (self.save_data_style == MetaDataStyle.CIX):
self.cbSaveDataStyle.setCurrentIndex(1)
self.updateStyleTweaks()
def populateComboBoxes(self):
# Add the entries to the tag style combobox
self.cbLoadDataStyle.addItem("ComicBookLover", MetaDataStyle.CBI)
self.cbLoadDataStyle.addItem("ComicRack", MetaDataStyle.CIX)
self.adjustLoadStyleCombo()
self.cbSaveDataStyle.addItem("ComicBookLover", MetaDataStyle.CBI)
self.cbSaveDataStyle.addItem("ComicRack", MetaDataStyle.CIX)
self.adjustSaveStyleCombo()
# Add the entries to the country combobox
self.cbCountry.addItem("", "")
for c in utils.countries:
self.cbCountry.addItem(c[1], c[0])
# Add the entries to the language combobox
self.cbLanguage.addItem("", "")
lang_dict = utils.getLanguageDict()
for key in sorted(lang_dict, cmp=locale.strcoll, key=lang_dict.get):
self.cbLanguage.addItem(lang_dict[key], key)
# Add the entries to the manga combobox
self.cbManga.addItem("", "")
self.cbManga.addItem("Yes", "Yes")
self.cbManga.addItem("Yes (Right to Left)", "YesAndRightToLeft")
self.cbManga.addItem("No", "No")
# Add the entries to the maturity combobox
self.cbMaturityRating.addItem("", "")
self.cbMaturityRating.addItem("Everyone", "")
self.cbMaturityRating.addItem("G", "")
self.cbMaturityRating.addItem("Early Childhood", "")
self.cbMaturityRating.addItem("Everyone 10+", "")
self.cbMaturityRating.addItem("PG", "")
self.cbMaturityRating.addItem("Kids to Adults", "")
self.cbMaturityRating.addItem("Teen", "")
self.cbMaturityRating.addItem("MA15+", "")
self.cbMaturityRating.addItem("Mature 17+", "")
self.cbMaturityRating.addItem("R18+", "")
self.cbMaturityRating.addItem("X18+", "")
self.cbMaturityRating.addItem("Adults Only 18+", "")
self.cbMaturityRating.addItem("Rating Pending", "")
# Add entries to the format combobox
self.cbFormat.addItem("")
self.cbFormat.addItem(".1")
self.cbFormat.addItem("-1")
self.cbFormat.addItem("1 Shot")
self.cbFormat.addItem("1/2")
self.cbFormat.addItem("1-Shot")
self.cbFormat.addItem("Annotation")
self.cbFormat.addItem("Annotations")
self.cbFormat.addItem("Annual")
self.cbFormat.addItem("Anthology")
self.cbFormat.addItem("B&W")
self.cbFormat.addItem("B/W")
self.cbFormat.addItem("B&&W")
self.cbFormat.addItem("Black & White")
self.cbFormat.addItem("Box Set")
self.cbFormat.addItem("Box-Set")
self.cbFormat.addItem("Crossover")
self.cbFormat.addItem("Director's Cut")
self.cbFormat.addItem("Epilogue")
self.cbFormat.addItem("Event")
self.cbFormat.addItem("FCBD")
self.cbFormat.addItem("Flyer")
self.cbFormat.addItem("Giant")
self.cbFormat.addItem("Giant Size")
self.cbFormat.addItem("Giant-Size")
self.cbFormat.addItem("Graphic Novel")
self.cbFormat.addItem("Hardcover")
self.cbFormat.addItem("Hard-Cover")
self.cbFormat.addItem("King")
self.cbFormat.addItem("King Size")
self.cbFormat.addItem("King-Size")
self.cbFormat.addItem("Limited Series")
self.cbFormat.addItem("Magazine")
self.cbFormat.addItem("-1")
self.cbFormat.addItem("NSFW")
self.cbFormat.addItem("One Shot")
self.cbFormat.addItem("One-Shot")
self.cbFormat.addItem("Point 1")
self.cbFormat.addItem("Preview")
self.cbFormat.addItem("Prologue")
self.cbFormat.addItem("Reference")
self.cbFormat.addItem("Review")
self.cbFormat.addItem("Reviewed")
self.cbFormat.addItem("Scanlation")
self.cbFormat.addItem("Script")
self.cbFormat.addItem("Series")
self.cbFormat.addItem("Sketch")
self.cbFormat.addItem("Special")
self.cbFormat.addItem("TPB")
self.cbFormat.addItem("Trade Paper Back")
self.cbFormat.addItem("WebComic")
self.cbFormat.addItem("Web Comic")
self.cbFormat.addItem("Year 1")
self.cbFormat.addItem("Year One")
def removeAuto(self):
self.removeTags(self.save_data_style)
def removeCBLTags(self):
self.removeTags(MetaDataStyle.CBI)
def removeCRTags(self):
self.removeTags(MetaDataStyle.CIX)
def removeTags(self, style):
# remove the indicated tags from the archive
ca_list = self.fileSelectionList.getSelectedArchiveList()
has_md_count = 0
for ca in ca_list:
if ca.hasMetadata(style):
has_md_count += 1
if has_md_count == 0:
QtGui.QMessageBox.information(
self, self.tr("Remove Tags"), self.tr(
"No archives with {0} tags selected!".format(
MetaDataStyle.name[style])))
return
if has_md_count != 0 and not self.dirtyFlagVerification(
"Remove Tags",
"If you remove tags now, unsaved data in the form will be lost. Are you sure?"):
return
if has_md_count != 0:
reply = QtGui.QMessageBox.question(
self,
self.tr("Remove Tags"),
self.tr(
"Are you sure you wish to remove the {0} tags from {1} archive(s)?".format(
MetaDataStyle.name[style],
has_md_count)),
QtGui.QMessageBox.Yes,
QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
progdialog = QtGui.QProgressDialog(
"", "Cancel", 0, has_md_count, self)
progdialog.setWindowTitle("Removing Tags")
progdialog.setWindowModality(QtCore.Qt.ApplicationModal)
progdialog.show()
prog_idx = 0
failed_list = []
success_count = 0
for ca in ca_list:
if ca.hasMetadata(style):
QtCore.QCoreApplication.processEvents()
if progdialog.wasCanceled():
break
progdialog.setValue(prog_idx)
prog_idx += 1
progdialog.setLabelText(ca.path)
centerWindowOnParent(progdialog)
QtCore.QCoreApplication.processEvents()
if ca.hasMetadata(style) and ca.isWritable():
if not ca.removeMetadata(style):
failed_list.append(ca.path)
else:
success_count += 1
ca.loadCache([MetaDataStyle.CBI, MetaDataStyle.CIX])
progdialog.close()
self.fileSelectionList.updateSelectedRows()
self.updateInfoBox()
self.updateMenus()
summary = u"Successfully removed tags in {0} archive(s).".format(
success_count)
if len(failed_list) > 0:
summary += u"\n\nThe remove operation failed in the following {0} archive(s):\n".format(
len(failed_list))
for f in failed_list:
summary += u"\t{0}\n".format(f)
dlg = LogWindow(self)
dlg.setText(summary)
dlg.setWindowTitle("Tag Remove Summary")
# dlg.adjustSize()
dlg.exec_()
def copyTags(self):
# copy the indicated tags in the archive
ca_list = self.fileSelectionList.getSelectedArchiveList()
has_src_count = 0
src_style = self.load_data_style
dest_style = self.save_data_style
if src_style == dest_style:
QtGui.QMessageBox.information(
self,
self.tr("Copy Tags"),
self.tr(
"Can't copy tag style onto itself." +
" Read style and modify style must be different."))
return
for ca in ca_list:
if ca.hasMetadata(src_style):
has_src_count += 1
if has_src_count == 0:
QtGui.QMessageBox.information(
self, self.tr("Copy Tags"), self.tr(
"No archives with {0} tags selected!".format(
MetaDataStyle.name[src_style])))
return
if has_src_count != 0 and not self.dirtyFlagVerification(
"Copy Tags",
"If you copy tags now, unsaved data in the form may be lost. Are you sure?"):
return
if has_src_count != 0:
reply = QtGui.QMessageBox.question(
self,
self.tr("Copy Tags"),
self.tr(
"Are you sure you wish to copy the {0} tags to {1} tags in {2} archive(s)?".format(
MetaDataStyle.name[src_style],
MetaDataStyle.name[dest_style],
has_src_count)),
QtGui.QMessageBox.Yes,
QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
progdialog = QtGui.QProgressDialog(
"", "Cancel", 0, has_src_count, self)
progdialog.setWindowTitle("Copying Tags")
progdialog.setWindowModality(QtCore.Qt.ApplicationModal)
progdialog.show()
prog_idx = 0
failed_list = []
success_count = 0
for ca in ca_list:
if ca.hasMetadata(src_style):
QtCore.QCoreApplication.processEvents()
if progdialog.wasCanceled():
break
progdialog.setValue(prog_idx)
prog_idx += 1
progdialog.setLabelText(ca.path)
centerWindowOnParent(progdialog)
QtCore.QCoreApplication.processEvents()
if ca.hasMetadata(src_style) and ca.isWritable():
md = ca.readMetadata(src_style)
if dest_style == MetaDataStyle.CBI and self.settings.apply_cbl_transform_on_bulk_operation:
md = CBLTransformer(md, self.settings).apply()
if not ca.writeMetadata(md, dest_style):
failed_list.append(ca.path)
else:
success_count += 1
ca.loadCache([MetaDataStyle.CBI, MetaDataStyle.CIX])
progdialog.close()
self.fileSelectionList.updateSelectedRows()
self.updateInfoBox()
self.updateMenus()
summary = u"Successfully copied tags in {0} archive(s).".format(
success_count)
if len(failed_list) > 0:
summary += u"\n\nThe copy operation failed in the following {0} archive(s):\n".format(
len(failed_list))
for f in failed_list:
summary += u"\t{0}\n".format(f)
dlg = LogWindow(self)
dlg.setText(summary)
dlg.setWindowTitle("Tag Copy Summary")
dlg.exec_()
def actualIssueDataFetch(self, match):
# now get the particular issue data
cv_md = None
QtGui.QApplication.setOverrideCursor(
QtGui.QCursor(QtCore.Qt.WaitCursor))
try:
comicVine = ComicVineTalker()
comicVine.wait_for_rate_limit = self.settings.wait_and_retry_on_rate_limit
cv_md = comicVine.fetchIssueData(
match['volume_id'], match['issue_number'], self.settings)
except ComicVineTalkerException:
print("Network error while getting issue details. Save aborted")
if cv_md is not None:
if self.settings.apply_cbl_transform_on_cv_import:
cv_md = CBLTransformer(cv_md, self.settings).apply()
QtGui.QApplication.restoreOverrideCursor()
return cv_md
def autoTagLog(self, text):
IssueIdentifier.defaultWriteOutput(text)
if self.atprogdialog is not None:
self.atprogdialog.textEdit.insertPlainText(text)
self.atprogdialog.textEdit.ensureCursorVisible()
QtCore.QCoreApplication.processEvents()
QtCore.QCoreApplication.processEvents()
QtCore.QCoreApplication.processEvents()
def identifyAndTagSingleArchive(self, ca, match_results, dlg):
success = False
ii = IssueIdentifier(ca, self.settings)
# read in metadata, and parse file name if not there
md = ca.readMetadata(self.save_data_style)
if md.isEmpty:
md = ca.metadataFromFilename(self.settings.parse_scan_info)
if dlg.ignoreLeadingDigitsInFilename and md.series is not None:
# remove all leading numbers
md.series = re.sub("([\d.]*)(.*)", "\\2", md.series)
# use the dialog specified search string
if dlg.searchString is not None:
md.series = dlg.searchString
if md is None or md.isEmpty:
print("No metadata given to search online with!")
return False, match_results
if dlg.dontUseYear:
md.year = None
if dlg.assumeIssueOne and (md.issue is None or md.issue == ""):
md.issue = "1"
ii.setAdditionalMetadata(md)
ii.onlyUseAdditionalMetaData = True
ii.waitAndRetryOnRateLimit = dlg.waitAndRetryOnRateLimit
ii.setOutputFunction(self.autoTagLog)
ii.cover_page_index = md.getCoverPageIndexList()[0]
ii.setCoverURLCallback(self.atprogdialog.setTestImage)
ii.setNameLengthDeltaThreshold(dlg.nameLengthMatchTolerance)
matches = ii.search()
result = ii.search_result
found_match = False
choices = False
low_confidence = False
no_match = False
if result == ii.ResultNoMatches:
pass
elif result == ii.ResultFoundMatchButBadCoverScore:
low_confidence = True
found_match = True
elif result == ii.ResultFoundMatchButNotFirstPage:
found_match = True
elif result == ii.ResultMultipleMatchesWithBadImageScores:
low_confidence = True
choices = True
elif result == ii.ResultOneGoodMatch:
found_match = True
elif result == ii.ResultMultipleGoodMatches:
choices = True
if choices:
if low_confidence:
self.autoTagLog(
"Online search: Multiple low-confidence matches. Save aborted\n")
match_results.lowConfidenceMatches.append(
MultipleMatch(ca, matches))
else:
self.autoTagLog(
"Online search: Multiple matches. Save aborted\n")
match_results.multipleMatches.append(
MultipleMatch(ca, matches))
elif low_confidence and not dlg.autoSaveOnLow:
self.autoTagLog(
"Online search: Low confidence match. Save aborted\n")
match_results.lowConfidenceMatches.append(
MultipleMatch(ca, matches))
elif not found_match:
self.autoTagLog("Online search: No match found. Save aborted\n")
match_results.noMatches.append(ca.path)
else:
# a single match!
if low_confidence:
self.autoTagLog(
"Online search: Low confidence match, but saving anyways, as indicated...\n")
# now get the particular issue data
cv_md = self.actualIssueDataFetch(matches[0])
if cv_md is None:
match_results.fetchDataFailures.append(ca.path)
if cv_md is not None:
md.overlay(cv_md)
if not ca.writeMetadata(md, self.save_data_style):
match_results.writeFailures.append(ca.path)
self.autoTagLog("Save failed ;-(\n")
else:
match_results.goodMatches.append(ca.path)
success = True
self.autoTagLog("Save complete!\n")
ca.loadCache([MetaDataStyle.CBI, MetaDataStyle.CIX])
return success, match_results
def autoTag(self):
ca_list = self.fileSelectionList.getSelectedArchiveList()
style = self.save_data_style
if len(ca_list) == 0:
QtGui.QMessageBox.information(
self, self.tr("Auto-Tag"), self.tr("No archives selected!"))
return
if not self.dirtyFlagVerification(
"Auto-Tag",
"If you auto-tag now, unsaved data in the form will be lost. Are you sure?"):
return
atstartdlg = AutoTagStartWindow(
self,
self.settings,
self.tr(
"You have selected {0} archive(s) to automatically identify and write {1} tags to.\n\n".format(
len(ca_list),
MetaDataStyle.name[style]) +
"Please choose options below, and select OK to Auto-Tag.\n"))
atstartdlg.adjustSize()
atstartdlg.setModal(True)
if not atstartdlg.exec_():
return
self.atprogdialog = AutoTagProgressWindow(self)
self.atprogdialog.setModal(True)
self.atprogdialog.show()
self.atprogdialog.progressBar.setMaximum(len(ca_list))
self.atprogdialog.setWindowTitle("Auto-Tagging")
self.autoTagLog(
u"==========================================================================\n")
self.autoTagLog(
u"Auto-Tagging Started for {0} items\n".format(len(ca_list)))
prog_idx = 0
match_results = OnlineMatchResults()
archives_to_remove = []
for ca in ca_list:
self.autoTagLog(
u"==========================================================================\n")
self.autoTagLog(
u"Auto-Tagging {0} of {1}\n".format(prog_idx + 1, len(ca_list)))
self.autoTagLog(u"{0}\n".format(ca.path))
cover_idx = ca.readMetadata(style).getCoverPageIndexList()[0]
image_data = ca.getPage(cover_idx)
self.atprogdialog.setArchiveImage(image_data)
self.atprogdialog.setTestImage(None)
QtCore.QCoreApplication.processEvents()
if self.atprogdialog.isdone:
break
self.atprogdialog.progressBar.setValue(prog_idx)
prog_idx += 1
self.atprogdialog.label.setText(ca.path)
centerWindowOnParent(self.atprogdialog)
QtCore.QCoreApplication.processEvents()
if ca.isWritable():
success, match_results = self.identifyAndTagSingleArchive(
ca, match_results, atstartdlg)
if success and atstartdlg.removeAfterSuccess:
archives_to_remove.append(ca)
self.atprogdialog.close()
if atstartdlg.removeAfterSuccess:
self.fileSelectionList.removeArchiveList(archives_to_remove)
self.fileSelectionList.updateSelectedRows()
self.loadArchive(self.fileSelectionList.getCurrentArchive())
self.atprogdialog = None
summary = u""
summary += u"Successfully tagged archives: {0}\n".format(
len(match_results.goodMatches))
if len(match_results.multipleMatches) > 0:
summary += u"Archives with multiple matches: {0}\n".format(
len(match_results.multipleMatches))
if len(match_results.lowConfidenceMatches) > 0:
summary += u"Archives with one or more low-confidence matches: {0}\n".format(
len(match_results.lowConfidenceMatches))
if len(match_results.noMatches) > 0:
summary += u"Archives with no matches: {0}\n".format(
len(match_results.noMatches))
if len(match_results.fetchDataFailures) > 0:
summary += u"Archives that failed due to data fetch errors: {0}\n".format(
len(match_results.fetchDataFailures))
if len(match_results.writeFailures) > 0:
summary += u"Archives that failed due to file writing errors: {0}\n".format(
len(match_results.writeFailures))
self.autoTagLog(summary)
sum_selectable = len(
match_results.multipleMatches) + len(match_results.lowConfidenceMatches)
if sum_selectable > 0:
summary += u"\n\nDo you want to manually select the ones with multiple matches and/or low-confidence matches now?"
reply = QtGui.QMessageBox.question(
self,
self.tr(u"Auto-Tag Summary"),
self.tr(summary),
QtGui.QMessageBox.Yes,
QtGui.QMessageBox.No)
match_results.multipleMatches.extend(
match_results.lowConfidenceMatches)
if reply == QtGui.QMessageBox.Yes:
matchdlg = AutoTagMatchWindow(
self,
match_results.multipleMatches,
style,
self.actualIssueDataFetch)
matchdlg.setModal(True)
matchdlg.exec_()
self.fileSelectionList.updateSelectedRows()
self.loadArchive(self.fileSelectionList.getCurrentArchive())
else:
QtGui.QMessageBox.information(
self, self.tr("Auto-Tag Summary"), self.tr(summary))
def dirtyFlagVerification(self, title, desc):
if self.dirtyFlag:
reply = QtGui.QMessageBox.question(
self,
self.tr(title),
self.tr(desc),
QtGui.QMessageBox.Yes,
QtGui.QMessageBox.No)
if reply != QtGui.QMessageBox.Yes:
return False
return True
def closeEvent(self, event):
if self.dirtyFlagVerification(
"Exit " +
self.appName,
"If you quit now, data in the form will be lost. Are you sure?"):
appsize = self.size()
self.settings.last_main_window_width = appsize.width()
self.settings.last_main_window_height = appsize.height()
self.settings.last_main_window_x = self.x()
self.settings.last_main_window_y = self.y()
self.settings.last_form_side_width = self.splitter.sizes()[0]
self.settings.last_list_side_width = self.splitter.sizes()[1]
self.settings.last_filelist_sorted_column, self.settings.last_filelist_sorted_order = self.fileSelectionList.getSorting()
self.settings.save()
event.accept()
else:
event.ignore()
def showPageBrowser(self):
if self.page_browser is None:
self.page_browser = PageBrowserWindow(self, self.metadata)
if self.comic_archive is not None:
self.page_browser.setComicArchive(self.comic_archive)
self.page_browser.finished.connect(self.pageBrowserClosed)
def pageBrowserClosed(self):
self.page_browser = None
def viewRawCRTags(self):
if self.comic_archive is not None and self.comic_archive.hasCIX():
dlg = LogWindow(self)
dlg.setText(self.comic_archive.readRawCIX())
dlg.setWindowTitle("Raw ComicRack Tag View")
dlg.exec_()
def viewRawCBLTags(self):
if self.comic_archive is not None and self.comic_archive.hasCBI():
dlg = LogWindow(self)
text = pprint.pformat(
json.loads(self.comic_archive.readRawCBI()), indent=4)
dlg.setText(text)
dlg.setWindowTitle("Raw ComicBookLover Tag View")
dlg.exec_()
def showWiki(self):
webbrowser.open("http://code.google.com/p/comictagger/wiki/Home?tm=6")
def reportBug(self):
webbrowser.open("http://code.google.com/p/comictagger/issues/list")
def showForum(self):
webbrowser.open("http://comictagger.forumotion.com/")
def frontCoverChanged(self, int):
self.metadata.pages = self.pageListEditor.getPageList()
self.updateCoverImage()
def pageListOrderChanged(self):
self.metadata.pages = self.pageListEditor.getPageList()
def applyCBLTransform(self):
self.formToMetadata()
self.metadata = CBLTransformer(self.metadata, self.settings).apply()
self.metadataToForm()
def renameArchive(self):
ca_list = self.fileSelectionList.getSelectedArchiveList()
if len(ca_list) == 0:
QtGui.QMessageBox.information(
self, self.tr("Rename"), self.tr("No archives selected!"))
return
if self.dirtyFlagVerification(
"File Rename",
"If you rename files now, unsaved data in the form will be lost. Are you sure?"):
dlg = RenameWindow(
self, ca_list, self.load_data_style, self.settings)
dlg.setModal(True)
if dlg.exec_():
self.fileSelectionList.updateSelectedRows()
self.loadArchive(self.comic_archive)
def fileListSelectionChanged(self, qvarFI):
fi = qvarFI.toPyObject()
self.loadArchive(fi.ca)
def loadArchive(self, comic_archive):
self.comic_archive = None
self.clearForm()
self.settings.last_opened_folder = os.path.abspath(
os.path.split(comic_archive.path)[0])
self.comic_archive = comic_archive
self.metadata = self.comic_archive.readMetadata(self.load_data_style)
if self.metadata is None:
self.metadata = GenericMetadata()
self.actualLoadCurrentArchive()
def fileListCleared(self):
self.resetApp()
def splitterMovedEvent(self, w1, w2):
scrollbar_w = 0
if self.scrollArea.verticalScrollBar().isVisible():
scrollbar_w = self.scrollArea.verticalScrollBar().width()
new_w = self.scrollArea.width() - scrollbar_w - 5
self.scrollAreaWidgetContents.resize(
new_w, self.scrollAreaWidgetContents.height())
def resizeEvent(self, ev):
self.splitterMovedEvent(0, 0)
def tabChanged(self, idx):
if idx == 0:
self.splitterMovedEvent(0, 0)
def checkLatestVersionOnline(self):
self.versionChecker = VersionChecker()
self.versionChecker.versionRequestComplete.connect(
self.versionCheckComplete)
self.versionChecker.asyncGetLatestVersion(
self.settings.install_id, self.settings.send_usage_stats)
def versionCheckComplete(self, new_version):
if (new_version != self.version and
new_version != self.settings.dont_notify_about_this_version):
website = "http://code.google.com/p/comictagger"
checked = OptionalMessageDialog.msg(
self,
"New version available!",
"New version ({0}) available!<br>(You are currently running {1})<br><br>".format(
new_version,
self.version) +
"Visit <a href='{0}'>{0}</a> for more info.<br><br>".format(website),
QtCore.Qt.Unchecked,
"Don't tell me about this version again")
if checked:
self.settings.dont_notify_about_this_version = new_version
def onIncomingSocketConnection(self):
# accept connection from other instance.
# read in the file list if they're giving it,
# and add to our own list
localSocket = self.socketServer.nextPendingConnection()
if localSocket.waitForReadyRead(3000):
byteArray = localSocket.readAll()
if len(byteArray) > 0:
obj = pickle.loads(byteArray)
localSocket.disconnectFromServer()
if isinstance(obj, list):
self.fileSelectionList.addPathList(obj)
else:
# print(localSocket.errorString().toLatin1())
pass
self.bringToTop()
def bringToTop(self):
if platform.system() == "Windows":
self.showNormal()
self.raise_()
self.activateWindow()
try:
import win32con
import win32gui
hwnd = self.effectiveWinId()
rect = win32gui.GetWindowRect(hwnd)
x = rect[0]
y = rect[1]
w = rect[2] - x
h = rect[3] - y
# mark it "always on top", just for a moment, to force it to
# the top
win32gui.SetWindowPos(
hwnd, win32con.HWND_TOPMOST, x, y, w, h, 0)
win32gui.SetWindowPos(
hwnd, win32con.HWND_NOTOPMOST, x, y, w, h, 0)
except Exception as e:
print "Whoops", e
elif platform.system() == "Darwin":
self.raise_()
self.showNormal()
self.activateWindow()
else:
flags = self.windowFlags()
self.setWindowFlags(
flags | QtCore.Qt.WindowStaysOnTopHint | QtCore.Qt.X11BypassWindowManagerHint)
QtCore.QCoreApplication.processEvents()
# self.show()
self.setWindowFlags(flags)
self.show()
|
src | DFFileExportAndUploadManager | # Copyright (c) 2022 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
import json
import threading
from json import JSONDecodeError
from typing import Any, Callable, Dict, List, Optional, Union
from cura.CuraApplication import CuraApplication
from PyQt6.QtCore import QUrl
from PyQt6.QtGui import QDesktopServices
from PyQt6.QtNetwork import QNetworkReply
from UM.FileHandler.FileHandler import FileHandler
from UM.Logger import Logger
from UM.Message import Message
from UM.Scene.SceneNode import SceneNode
from .BackwardsCompatibleMessage import getBackwardsCompatibleMessage
from .DFLibraryFileUploadRequest import DFLibraryFileUploadRequest
from .DFLibraryFileUploadResponse import DFLibraryFileUploadResponse
from .DFPrintJobUploadRequest import DFPrintJobUploadRequest
from .DFPrintJobUploadResponse import DFPrintJobUploadResponse
from .DigitalFactoryApiClient import DigitalFactoryApiClient
from .ExportFileJob import ExportFileJob
class DFFileExportAndUploadManager:
"""
Class responsible for exporting the scene and uploading the exported data to the Digital Factory Library. Since 3mf
and UFP files may need to be uploaded at the same time, this class keeps a single progress and success message for
both files and updates those messages according to the progress of both the file job uploads.
"""
def __init__(
self,
file_handlers: Dict[str, FileHandler],
nodes: List[SceneNode],
library_project_id: str,
library_project_name: str,
file_name: str,
formats: List[str],
on_upload_error: Callable[[], Any],
on_upload_success: Callable[[], Any],
on_upload_finished: Callable[[], Any],
on_upload_progress: Callable[[int], Any],
) -> None:
self._file_handlers: Dict[str, FileHandler] = file_handlers
self._nodes: List[SceneNode] = nodes
self._library_project_id: str = library_project_id
self._library_project_name: str = library_project_name
self._file_name: str = file_name
self._upload_jobs: List[ExportFileJob] = []
self._formats: List[str] = formats
self._api = DigitalFactoryApiClient(
application=CuraApplication.getInstance(),
on_error=lambda error: Logger.log("e", str(error)),
)
self._source_file_id: Optional[str] = None
# Functions of the parent class that should be called based on the upload process output
self._on_upload_error = on_upload_error
self._on_upload_success = on_upload_success
self._on_upload_finished = on_upload_finished
self._on_upload_progress = on_upload_progress
# Lock used for updating the progress message (since the progress is changed by two parallel upload jobs) or
# show the success message (once both upload jobs are done)
self._message_lock = threading.Lock()
self._file_upload_job_metadata: Dict[
str, Dict[str, Any]
] = self.initializeFileUploadJobMetadata()
self.progress_message = Message(
title="Uploading...",
text="Uploading files to '{}'".format(self._library_project_name),
progress=-1,
lifetime=0,
dismissable=False,
use_inactivity_timer=False,
)
self._generic_success_message = getBackwardsCompatibleMessage(
text="Your {} uploaded to '{}'.".format(
"file was"
if len(self._file_upload_job_metadata) <= 1
else "files were",
self._library_project_name,
),
title="Upload successful",
lifetime=30,
message_type_str="POSITIVE",
)
self._generic_success_message.addAction(
"open_df_project",
"Open project",
"open-folder",
"Open the project containing the file in Digital Library",
)
self._generic_success_message.actionTriggered.connect(
self._onMessageActionTriggered
)
def _onCuraProjectFileExported(self, job: ExportFileJob) -> None:
"""Handler for when the DF Library workspace file (3MF) has been created locally.
It can now be sent over the Digital Factory API.
"""
if not job.getOutput():
self._onJobExportError(job.getFileName())
return
self._file_upload_job_metadata[job.getFileName()][
"export_job_output"
] = job.getOutput()
request = DFLibraryFileUploadRequest(
content_type=job.getMimeType(),
file_name=job.getFileName(),
file_size=len(job.getOutput()),
library_project_id=self._library_project_id,
)
self._api.requestUpload3MF(
request,
on_finished=self._uploadFileData,
on_error=self._onRequestUploadCuraProjectFileFailed,
)
def _onPrintFileExported(self, job: ExportFileJob) -> None:
"""Handler for when the DF Library print job file (UFP) has been created locally.
It can now be sent over the Digital Factory API.
"""
if not job.getOutput():
self._onJobExportError(job.getFileName())
return
self._file_upload_job_metadata[job.getFileName()][
"export_job_output"
] = job.getOutput()
request = DFPrintJobUploadRequest(
content_type=job.getMimeType(),
job_name=job.getFileName(),
file_size=len(job.getOutput()),
library_project_id=self._library_project_id,
source_file_id=self._source_file_id,
)
self._api.requestUploadUFP(
request,
on_finished=self._uploadFileData,
on_error=self._onRequestUploadPrintFileFailed,
)
def _uploadFileData(
self,
file_upload_response: Union[
DFLibraryFileUploadResponse, DFPrintJobUploadResponse
],
) -> None:
"""Uploads the exported file data after the file or print job upload has been registered at the Digital Factory
Library API.
:param file_upload_response: The response received from the Digital Factory Library API.
"""
if isinstance(file_upload_response, DFLibraryFileUploadResponse):
file_name = file_upload_response.file_name
# store the `file_id` so it can be as `source_file_id` when uploading the print file
self._source_file_id = file_upload_response.file_id
elif isinstance(file_upload_response, DFPrintJobUploadResponse):
file_name = (
file_upload_response.job_name
if file_upload_response.job_name is not None
else ""
)
else:
Logger.log(
"e",
"Wrong response type received. Aborting uploading file to the Digital Library",
)
getBackwardsCompatibleMessage(
text="Upload error",
title=f"Failed to upload {file_name}. Received unexpected response from server.",
message_type_str="ERROR",
lifetime=0,
).show()
return
if file_name not in self._file_upload_job_metadata:
Logger.error(
f"API response for uploading doesn't match the file name we just uploaded: {file_name} was never uploaded."
)
getBackwardsCompatibleMessage(
text="Upload error",
title=f"Failed to upload {file_name}. Name doesn't match the one sent back in confirmation.",
message_type_str="ERROR",
lifetime=0,
).show()
return
with self._message_lock:
self.progress_message.show()
self._file_upload_job_metadata[file_name][
"file_upload_response"
] = file_upload_response
job_output = self._file_upload_job_metadata[file_name]["export_job_output"]
with self._message_lock:
self._file_upload_job_metadata[file_name]["upload_status"] = "uploading"
self._api.uploadExportedFileData(
file_upload_response,
job_output,
on_finished=self._onFileUploadFinished,
on_success=self._onUploadSuccess,
on_progress=self._onUploadProgress,
on_error=self._onUploadError,
)
self._handleNextUploadJob()
def _onUploadProgress(self, filename: str, progress: int) -> None:
"""
Updates the progress message according to the total progress of the two files and displays it to the user. It is
made thread-safe with a lock, since the progress can be updated by two separate upload jobs
:param filename: The name of the file for which we have progress (including the extension).
:param progress: The progress percentage
"""
with self._message_lock:
self._file_upload_job_metadata[filename]["upload_progress"] = progress
self._file_upload_job_metadata[filename]["upload_status"] = "uploading"
total_progress = self.getTotalProgress()
self.progress_message.setProgress(total_progress)
self.progress_message.show()
self._on_upload_progress(progress)
def _onUploadSuccess(self, filename: str) -> None:
"""
Sets the upload status to success and the progress of the file with the given filename to 100%. This function is
should be called only if the file has uploaded all of its data successfully (i.e. no error occurred during the
upload process).
:param filename: The name of the file that was uploaded successfully (including the extension).
"""
with self._message_lock:
self._file_upload_job_metadata[filename]["upload_status"] = "success"
self._file_upload_job_metadata[filename]["upload_progress"] = 100
self._on_upload_success()
def _onFileUploadFinished(self, filename: str) -> None:
"""
Callback that makes sure the correct messages are displayed according to the statuses of the individual jobs.
This function is called whenever an upload job has finished, regardless if it had errors or was successful.
Both jobs have to have finished for the messages to show.
:param filename: The name of the file that has finished uploading (including the extension).
"""
with self._message_lock:
# All files have finished their uploading process
if all(
[
(
file_upload_job["upload_progress"] == 100
and file_upload_job["upload_status"] != "uploading"
)
for file_upload_job in self._file_upload_job_metadata.values()
]
):
# Reset and hide the progress message
self.progress_message.setProgress(-1)
self.progress_message.hide()
# All files were successfully uploaded.
if all(
[
(file_upload_job["upload_status"] == "success")
for file_upload_job in self._file_upload_job_metadata.values()
]
):
# Show a single generic success message for all files
self._generic_success_message.show()
else: # One or more files failed to upload.
# Show individual messages for each file, according to their statuses
for (
filename,
upload_job_metadata,
) in self._file_upload_job_metadata.items():
if upload_job_metadata["upload_status"] == "success":
upload_job_metadata["file_upload_success_message"].show()
else:
upload_job_metadata["file_upload_failed_message"].show()
# Call the parent's finished function
self._on_upload_finished()
def _onJobExportError(self, filename: str) -> None:
"""
Displays an appropriate message when the process to export a file fails.
:param filename: The name of the file that failed to be exported (including the extension).
"""
Logger.log("d", "Error while exporting file '{}'".format(filename))
with self._message_lock:
# Set the progress to 100% when the upload job fails, to avoid having the progress message stuck
self._file_upload_job_metadata[filename]["upload_status"] = "failed"
self._file_upload_job_metadata[filename]["upload_progress"] = 100
self._file_upload_job_metadata[filename][
"file_upload_failed_message"
] = getBackwardsCompatibleMessage(
text="Failed to export the file '{}'. The upload process is aborted.".format(
filename
),
title="Export error",
message_type_str="ERROR",
lifetime=30,
)
self._on_upload_error()
self._onFileUploadFinished(filename)
def _onRequestUploadCuraProjectFileFailed(
self, reply: "QNetworkReply", network_error: "QNetworkReply.NetworkError"
) -> None:
"""
Displays an appropriate message when the request to upload the Cura project file (.3mf) to the Digital Library fails.
This means that something went wrong with the initial request to create a "file" entry in the digital library.
"""
reply_string = bytes(reply.readAll()).decode()
filename_3mf = self._file_name + ".3mf"
Logger.log(
"d",
"An error occurred while uploading the Cura project file '{}' to the Digital Library project '{}': {}".format(
filename_3mf, self._library_project_id, reply_string
),
)
with self._message_lock:
# Set the progress to 100% when the upload job fails, to avoid having the progress message stuck
self._file_upload_job_metadata[filename_3mf]["upload_status"] = "failed"
self._file_upload_job_metadata[filename_3mf]["upload_progress"] = 100
human_readable_error = self.extractErrorTitle(reply_string)
self._file_upload_job_metadata[filename_3mf][
"file_upload_failed_message"
] = getBackwardsCompatibleMessage(
text="Failed to upload the file '{}' to '{}'. {}".format(
filename_3mf, self._library_project_name, human_readable_error
),
title="File upload error",
message_type_str="ERROR",
lifetime=30,
)
self._on_upload_error()
self._onFileUploadFinished(filename_3mf)
def _onRequestUploadPrintFileFailed(
self, reply: "QNetworkReply", network_error: "QNetworkReply.NetworkError"
) -> None:
"""
Displays an appropriate message when the request to upload the print file (.ufp) to the Digital Library fails.
This means that something went wrong with the initial request to create a "file" entry in the digital library.
"""
reply_string = bytes(reply.readAll()).decode()
filename_ufp = self._file_name + ".ufp"
Logger.log(
"d",
"An error occurred while uploading the print job file '{}' to the Digital Library project '{}': {}".format(
filename_ufp, self._library_project_id, reply_string
),
)
with self._message_lock:
# Set the progress to 100% when the upload job fails, to avoid having the progress message stuck
self._file_upload_job_metadata[filename_ufp]["upload_status"] = "failed"
self._file_upload_job_metadata[filename_ufp]["upload_progress"] = 100
human_readable_error = self.extractErrorTitle(reply_string)
self._file_upload_job_metadata[filename_ufp][
"file_upload_failed_message"
] = getBackwardsCompatibleMessage(
title="File upload error",
text="Failed to upload the file '{}' to '{}'. {}".format(
filename_ufp, self._library_project_name, human_readable_error
),
message_type_str="ERROR",
lifetime=30,
)
self._on_upload_error()
self._onFileUploadFinished(filename_ufp)
@staticmethod
def extractErrorTitle(reply_body: Optional[str]) -> str:
error_title = ""
if reply_body:
try:
reply_dict = json.loads(reply_body)
except JSONDecodeError:
Logger.logException("w", "Unable to extract title from reply body")
return error_title
if (
"errors" in reply_dict
and len(reply_dict["errors"]) >= 1
and "title" in reply_dict["errors"][0]
):
error_title = reply_dict["errors"][0]["title"]
return error_title
def _onUploadError(
self, filename: str, reply: "QNetworkReply", error: "QNetworkReply.NetworkError"
) -> None:
"""
Displays the given message if uploading the mesh has failed due to a generic error (i.e. lost connection).
If one of the two files fail, this error function will set its progress as finished, to make sure that the
progress message doesn't get stuck.
:param filename: The name of the file that failed to upload (including the extension).
"""
reply_string = bytes(reply.readAll()).decode()
Logger.log(
"d",
"Error while uploading '{}' to the Digital Library project '{}'. Reply: {}".format(
filename, self._library_project_id, reply_string
),
)
with self._message_lock:
# Set the progress to 100% when the upload job fails, to avoid having the progress message stuck
self._file_upload_job_metadata[filename]["upload_status"] = "failed"
self._file_upload_job_metadata[filename]["upload_progress"] = 100
human_readable_error = self.extractErrorTitle(reply_string)
self._file_upload_job_metadata[filename][
"file_upload_failed_message"
] = getBackwardsCompatibleMessage(
title="File upload error",
text="Failed to upload the file '{}' to '{}'. {}".format(
self._file_name, self._library_project_name, human_readable_error
),
message_type_str="ERROR",
lifetime=30,
)
self._on_upload_error()
def getTotalProgress(self) -> int:
"""
Returns the total upload progress of all the upload jobs
:return: The average progress percentage
"""
return int(
sum(
[
file_upload_job["upload_progress"]
for file_upload_job in self._file_upload_job_metadata.values()
]
)
/ len(self._file_upload_job_metadata.values())
)
def _onMessageActionTriggered(self, message, action):
if action == "open_df_project":
project_url = "{}/app/library/project/{}?wait_for_new_files=true&utm_source=cura&utm_medium=software&utm_campaign=saved-library-file-message".format(
CuraApplication.getInstance().ultimakerDigitalFactoryUrl,
self._library_project_id,
)
QDesktopServices.openUrl(QUrl(project_url))
message.hide()
def start(self) -> None:
self._handleNextUploadJob()
def _handleNextUploadJob(self):
try:
job = self._upload_jobs.pop(0)
job.start()
except IndexError:
pass # Empty list, do nothing.
def initializeFileUploadJobMetadata(self) -> Dict[str, Any]:
metadata = {}
self._upload_jobs = []
if (
"3mf" in self._formats
and "3mf" in self._file_handlers
and self._file_handlers["3mf"]
):
filename_3mf = self._file_name + ".3mf"
metadata[filename_3mf] = {
"export_job_output": None,
"upload_progress": -1,
"upload_status": "",
"file_upload_response": None,
"file_upload_success_message": getBackwardsCompatibleMessage(
text="'{}' was uploaded to '{}'.".format(
filename_3mf, self._library_project_name
),
title="Upload successful",
message_type_str="POSITIVE",
lifetime=30,
),
"file_upload_failed_message": getBackwardsCompatibleMessage(
text="Failed to upload the file '{}' to '{}'.".format(
filename_3mf, self._library_project_name
),
title="File upload error",
message_type_str="ERROR",
lifetime=30,
),
}
job_3mf = ExportFileJob(
self._file_handlers["3mf"], self._nodes, self._file_name, "3mf"
)
job_3mf.finished.connect(self._onCuraProjectFileExported)
self._upload_jobs.append(job_3mf)
if (
"ufp" in self._formats
and "ufp" in self._file_handlers
and self._file_handlers["ufp"]
):
filename_ufp = self._file_name + ".ufp"
metadata[filename_ufp] = {
"export_job_output": None,
"upload_progress": -1,
"upload_status": "",
"file_upload_response": None,
"file_upload_success_message": getBackwardsCompatibleMessage(
text="'{}' was uploaded to '{}'.".format(
filename_ufp, self._library_project_name
),
title="Upload successful",
message_type_str="POSITIVE",
lifetime=30,
),
"file_upload_failed_message": getBackwardsCompatibleMessage(
text="Failed to upload the file '{}' to '{}'.".format(
filename_ufp, self._library_project_name
),
title="File upload error",
message_type_str="ERROR",
lifetime=30,
),
}
job_ufp = ExportFileJob(
self._file_handlers["ufp"], self._nodes, self._file_name, "ufp"
)
job_ufp.finished.connect(self._onPrintFileExported)
self._upload_jobs.append(job_ufp)
return metadata
|
models | promo_metrics | # The contents of this file are subject to the Common Public Attribution
# License Version 1.0. (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://code.reddit.com/LICENSE. The License is based on the Mozilla Public
# License Version 1.1, but Sections 14 and 15 have been added to cover use of
# software over a computer network and provide for limited attribution for the
# Original Developer. In addition, Exhibit A has been modified to be consistent
# with Exhibit B.
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for
# the specific language governing rights and limitations under the License.
#
# The Original Code is reddit.
#
# The Original Developer is the Initial Developer. The Initial Developer of
# the Original Code is reddit Inc.
#
# All portions of the code written by reddit are Copyright (c) 2006-2015 reddit
# Inc. All Rights Reserved.
###############################################################################
from itertools import product
from pycassa.types import IntegerType
from r2.lib.db import tdb_cassandra
from r2.lib.utils import tup
class PromoMetrics(tdb_cassandra.View):
"""
Cassandra data store for promotion metrics. Used for inventory prediction.
Usage:
# set metric value for many subreddits at once
> PromoMetrics.set('min_daily_pageviews.GET_listing',
{'funny': 63432, 'pics': 48829, 'books': 4})
# get metric value for one subreddit
> res = PromoMetrics.get('min_daily_pageviews.GET_listing', 'funny')
{'funny': 1234}
# get metric value for many subreddits
> res = PromoMetrics.get('min_daily_pageviews.GET_listing',
['funny', 'pics'])
{'funny':1234, 'pics':4321}
# get metric values for all subreddits
> res = PromoMetrics.get('min_daily_pageviews.GET_listing')
"""
_use_db = True
_value_type = "int"
_fetch_all_columns = True
@classmethod
def get(cls, metric_name, sr_names=None):
sr_names = tup(sr_names)
try:
metric = cls._byID(metric_name, properties=sr_names)
return metric._values() # might have additional values
except tdb_cassandra.NotFound:
return {}
@classmethod
def set(cls, metric_name, values_by_sr):
cls._set_values(metric_name, values_by_sr)
class LocationPromoMetrics(tdb_cassandra.View):
_use_db = True
_write_consistency_level = tdb_cassandra.CL.QUORUM
_read_consistency_level = tdb_cassandra.CL.ONE
_extra_schema_creation_args = {
"default_validation_class": IntegerType(),
}
@classmethod
def _rowkey(cls, location):
fields = [location.country, location.region, location.metro]
return "-".join(map(lambda field: field or "", fields))
@classmethod
def _column_name(cls, sr):
return sr.name
@classmethod
def get(cls, srs, locations):
srs, srs_is_single = tup(srs, ret_is_single=True)
locations, locations_is_single = tup(locations, ret_is_single=True)
is_single = srs_is_single and locations_is_single
rowkeys = {location: cls._rowkey(location) for location in locations}
columns = {sr: cls._column_name(sr) for sr in srs}
rcl = cls._read_consistency_level
metrics = cls._cf.multiget(
rowkeys.values(), columns.values(), read_consistency_level=rcl
)
ret = {}
for sr, location in product(srs, locations):
rowkey = rowkeys[location]
column = columns[sr]
impressions = metrics.get(rowkey, {}).get(column, 0)
ret[(sr, location)] = impressions
if is_single:
return ret.values()[0]
else:
return ret
@classmethod
def set(cls, metrics):
wcl = cls._write_consistency_level
with cls._cf.batch(write_consistency_level=wcl) as b:
for location, sr, impressions in metrics:
rowkey = cls._rowkey(location)
column = {cls._column_name(sr): impressions}
b.insert(rowkey, column)
|
markovify | chain | # markovify
# Copyright (c) 2015, Jeremy Singer-Vine
# Origin: https://github.com/jsvine/markovify
# MIT License: https://github.com/jsvine/markovify/blob/master/LICENSE.txt
import bisect
import json
import operator
import random
# Python3 compatibility
try: # pragma: no cover
basestring
except NameError: # pragma: no cover
basestring = str
BEGIN = "___BEGIN__"
END = "___END__"
def accumulate(iterable, func=operator.add):
"""
Cumulative calculations. (Summation, by default.)
Via: https://docs.python.org/3/library/itertools.html#itertools.accumulate
"""
it = iter(iterable)
total = next(it)
yield total
for element in it:
total = func(total, element)
yield total
class Chain(object):
"""
A Markov chain representing processes that have both beginnings and ends.
For example: Sentences.
"""
def __init__(self, corpus, state_size, model=None):
"""
`corpus`: A list of lists, where each outer list is a "run"
of the process (e.g., a single sentence), and each inner list
contains the steps (e.g., words) in the run. If you want to simulate
an infinite process, you can come very close by passing just one, very
long run.
`state_size`: An integer indicating the number of items the model
uses to represent its state. For text generation, 2 or 3 are typical.
"""
self.state_size = state_size
self.model = model or self.build(corpus, self.state_size)
self.precompute_begin_state()
def build(self, corpus, state_size):
"""
Build a Python representation of the Markov model. Returns a dict
of dicts where the keys of the outer dict represent all possible states,
and point to the inner dicts. The inner dicts represent all possibilities
for the "next" item in the chain, along with the count of times it
appears.
"""
# Using a DefaultDict here would be a lot more convenient, however the memory
# usage is far higher.
model = {}
for run in corpus:
items = ([BEGIN] * state_size) + run + [END]
for i in range(len(run) + 1):
state = tuple(items[i : i + state_size])
follow = items[i + state_size]
if state not in model:
model[state] = {}
if follow not in model[state]:
model[state][follow] = 0
model[state][follow] += 1
return model
def precompute_begin_state(self):
"""
Caches the summation calculation and available choices for BEGIN * state_size.
Significantly speeds up chain generation on large corpuses. Thanks, @schollz!
"""
begin_state = tuple([BEGIN] * self.state_size)
choices, weights = zip(*self.model[begin_state].items())
cumdist = list(accumulate(weights))
self.begin_cumdist = cumdist
self.begin_choices = choices
def move(self, state):
"""
Given a state, choose the next item at random.
"""
if state == tuple([BEGIN] * self.state_size):
choices = self.begin_choices
cumdist = self.begin_cumdist
else:
choices, weights = zip(*self.model[state].items())
cumdist = list(accumulate(weights))
r = random.random() * cumdist[-1]
selection = choices[bisect.bisect(cumdist, r)]
return selection
def gen(self, init_state=None):
"""
Starting either with a naive BEGIN state, or the provided `init_state`
(as a tuple), return a generator that will yield successive items
until the chain reaches the END state.
"""
state = init_state or (BEGIN,) * self.state_size
while True:
next_word = self.move(state)
if next_word == END:
break
yield next_word
state = tuple(state[1:]) + (next_word,)
def walk(self, init_state=None):
"""
Return a list representing a single run of the Markov model, either
starting with a naive BEGIN state, or the provided `init_state`
(as a tuple).
"""
return list(self.gen(init_state))
def to_json(self):
"""
Dump the model as a JSON object, for loading later.
"""
return json.dumps(list(self.model.items()))
@classmethod
def from_json(cls, json_thing):
"""
Given a JSON object or JSON string that was created by `self.to_json`,
return the corresponding markovify.Chain.
"""
if isinstance(json_thing, basestring):
obj = json.loads(json_thing)
else:
obj = json_thing
if isinstance(obj, list):
rehydrated = {tuple(item[0]): item[1] for item in obj}
elif isinstance(obj, dict):
rehydrated = obj
else:
raise ValueError("Object should be dict or list")
state_size = len(list(rehydrated.keys())[0])
inst = cls(None, state_size, rehydrated)
return inst
|
builtinAdditionPanes | cargoView | # =============================================================================
# Copyright (C) 2010 Diego Duclos
#
# This file is part of pyfa.
#
# pyfa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyfa. If not, see <http://www.gnu.org/licenses/>.
# =============================================================================
import gui.display as d
import gui.fitCommands as cmd
import gui.globalEvents as GE
# noinspection PyPackageRequirements
import wx
from gui.builtinMarketBrowser.events import ITEM_SELECTED, ItemSelected
from gui.contextMenu import ContextMenu
from gui.utils.staticHelpers import DragDropHelper
from service.fit import Fit
from service.market import Market
class CargoViewDrop(wx.DropTarget):
def __init__(self, dropFn, *args, **kwargs):
super(CargoViewDrop, self).__init__(*args, **kwargs)
self.dropFn = dropFn
# this is really transferring an EVE itemID
self.dropData = wx.TextDataObject()
self.SetDataObject(self.dropData)
def OnData(self, x, y, t):
if self.GetData():
dragged_data = DragDropHelper.data
data = dragged_data.split(":")
self.dropFn(x, y, data)
return t
# @todo: Was copied form another class and modified. Look through entire file, refine
class CargoView(d.Display):
DEFAULT_COLS = ["Base Icon", "Base Name", "attr:volume", "Price"]
def __init__(self, parent):
d.Display.__init__(self, parent, style=wx.BORDER_NONE)
self.lastFitId = None
self.mainFrame.Bind(GE.FIT_CHANGED, self.fitChanged)
self.mainFrame.Bind(ITEM_SELECTED, self.addItem)
self.Bind(wx.EVT_LEFT_DCLICK, self.onLeftDoubleClick)
self.Bind(wx.EVT_KEY_UP, self.kbEvent)
self.SetDropTarget(CargoViewDrop(self.handleListDrag))
self.Bind(wx.EVT_LIST_BEGIN_DRAG, self.startDrag)
self.Bind(wx.EVT_CONTEXT_MENU, self.spawnMenu)
def addItem(self, event):
item = Market.getInstance().getItem(event.itemID, eager="group")
if item is None or not (item.isCharge or item.isCommodity):
event.Skip()
return
fitID = self.mainFrame.getActiveFit()
fit = Fit.getInstance().getFit(fitID)
if not fit:
event.Skip()
return
modifiers = wx.GetMouseState().GetModifiers()
amount = 1
if modifiers == wx.MOD_CONTROL:
amount = 10
elif modifiers == wx.MOD_ALT:
amount = 100
elif modifiers == wx.MOD_CONTROL | wx.MOD_ALT:
amount = 1000
self.mainFrame.command.Submit(
cmd.GuiAddCargoCommand(fitID=fitID, itemID=item.ID, amount=amount)
)
self.mainFrame.additionsPane.select("Cargo")
event.Skip()
def handleListDrag(self, x, y, data):
"""
Handles dragging of items from various pyfa displays which support it
data is list with two indices:
data[0] is hard-coded str of originating source
data[1] is typeID or index of data we want to manipulate
"""
if data[0] == "fitting":
self.swapModule(x, y, int(data[1]))
elif data[0] == "market":
fitID = self.mainFrame.getActiveFit()
if fitID:
self.mainFrame.command.Submit(
cmd.GuiAddCargoCommand(fitID=fitID, itemID=int(data[1]), amount=1)
)
def startDrag(self, event):
row = event.GetIndex()
if row != -1:
data = wx.TextDataObject()
try:
dataStr = "cargo:{}".format(self.cargo[row].itemID)
except IndexError:
return
data.SetText(dataStr)
self.unselectAll()
self.Select(row, True)
dropSource = wx.DropSource(self)
dropSource.SetData(data)
DragDropHelper.data = dataStr
dropSource.DoDragDrop()
def kbEvent(self, event):
keycode = event.GetKeyCode()
modifiers = event.GetModifiers()
if keycode == wx.WXK_ESCAPE and modifiers == wx.MOD_NONE:
self.unselectAll()
elif keycode == 65 and modifiers == wx.MOD_CONTROL:
self.selectAll()
elif (
keycode in (wx.WXK_DELETE, wx.WXK_NUMPAD_DELETE)
and modifiers == wx.MOD_NONE
):
cargos = self.getSelectedCargos()
self.removeCargos(cargos)
event.Skip()
def swapModule(self, x, y, modIdx):
"""Swap a module from fitting window with cargo"""
sFit = Fit.getInstance()
fit = sFit.getFit(self.mainFrame.getActiveFit())
dstRow, _ = self.HitTest((x, y))
if dstRow > -1:
try:
dstCargoItemID = getattr(self.cargo[dstRow], "itemID", None)
except IndexError:
dstCargoItemID = None
else:
dstCargoItemID = None
self.mainFrame.command.Submit(
cmd.GuiLocalModuleToCargoCommand(
fitID=self.mainFrame.getActiveFit(),
modPosition=modIdx,
cargoItemID=dstCargoItemID,
copy=wx.GetMouseState().GetModifiers() == wx.MOD_CONTROL,
)
)
def fitChanged(self, event):
event.Skip()
activeFitID = self.mainFrame.getActiveFit()
if activeFitID is not None and activeFitID not in event.fitIDs:
return
sFit = Fit.getInstance()
fit = sFit.getFit(activeFitID)
# self.Parent.Parent.DisablePage(self, not fit or fit.isStructure)
# Clear list and get out if current fitId is None
if activeFitID is None and self.lastFitId is not None:
self.DeleteAllItems()
self.lastFitId = None
return
self.original = fit.cargo if fit is not None else None
self.cargo = fit.cargo[:] if fit is not None else None
if self.cargo is not None:
self.cargo.sort(
key=lambda c: (
c.item.group.category.name,
c.item.group.name,
c.item.name,
)
)
if activeFitID != self.lastFitId:
self.lastFitId = activeFitID
item = self.GetNextItem(-1, wx.LIST_NEXT_ALL, wx.LIST_STATE_DONTCARE)
if item != -1:
self.EnsureVisible(item)
self.unselectAll()
self.populate(self.cargo)
self.refresh(self.cargo)
def onLeftDoubleClick(self, event):
row, _ = self.HitTest(event.Position)
if row != -1:
try:
cargo = self.cargo[row]
except IndexError:
return
self.removeCargos([cargo])
def removeCargos(self, cargos):
fitID = self.mainFrame.getActiveFit()
itemIDs = []
for cargo in cargos:
if cargo in self.original:
itemIDs.append(cargo.itemID)
self.mainFrame.command.Submit(
cmd.GuiRemoveCargosCommand(fitID=fitID, itemIDs=itemIDs)
)
def spawnMenu(self, event):
clickedPos = self.getRowByAbs(event.Position)
self.ensureSelection(clickedPos)
selection = self.getSelectedCargos()
mainCargo = None
if clickedPos != -1:
try:
cargo = self.cargo[clickedPos]
except IndexError:
pass
else:
if cargo in self.original:
mainCargo = cargo
itemContext = (
None
if mainCargo is None
else Market.getInstance().getCategoryByItem(mainCargo.item).displayName
)
menu = ContextMenu.getMenu(
self,
mainCargo,
selection,
("cargoItem", itemContext),
("cargoItemMisc", itemContext),
)
if menu:
self.PopupMenu(menu)
def getSelectedCargos(self):
cargos = []
for row in self.getSelectedRows():
try:
cargo = self.cargo[row]
except IndexError:
continue
cargos.append(cargo)
return cargos
def getTabExtraText(self):
fitID = self.mainFrame.getActiveFit()
if fitID is None:
return None
sFit = Fit.getInstance()
fit = sFit.getFit(fitID)
if fit is None:
return None
opt = sFit.serviceFittingOptions["additionsLabels"]
# Total amount of cargo items
if opt in (1, 2):
amount = len(fit.cargo)
return " ({})".format(amount) if amount else None
else:
return None
|
bitmessageqt | bitmessageui | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'bitmessageui.ui'
#
# Created: Mon Mar 23 22:18:07 2015
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
import settingsmixin
from blacklist import Blacklist
from bmconfigparser import config
from foldertree import AddressBookCompleter
from messagecompose import MessageCompose
from messageview import MessageView
from networkstatus import NetworkStatus
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(
context, text, disambig, encoding=QtCore.QCoreApplication.CodecForTr, n=None
):
if n is None:
return QtGui.QApplication.translate(context, text, disambig, _encoding)
else:
return QtGui.QApplication.translate(context, text, disambig, _encoding, n)
except AttributeError:
def _translate(
context, text, disambig, encoding=QtCore.QCoreApplication.CodecForTr, n=None
):
if n is None:
return QtGui.QApplication.translate(context, text, disambig)
else:
return QtGui.QApplication.translate(
context, text, disambig, QtCore.QCoreApplication.CodecForTr, n
)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(885, 580)
icon = QtGui.QIcon()
icon.addPixmap(
QtGui.QPixmap(_fromUtf8(":/newPrefix/images/can-icon-24px.png")),
QtGui.QIcon.Normal,
QtGui.QIcon.Off,
)
MainWindow.setWindowIcon(icon)
MainWindow.setTabShape(QtGui.QTabWidget.Rounded)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.gridLayout_10 = QtGui.QGridLayout(self.centralwidget)
self.gridLayout_10.setObjectName(_fromUtf8("gridLayout_10"))
self.tabWidget = QtGui.QTabWidget(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(
QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding
)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.tabWidget.sizePolicy().hasHeightForWidth())
self.tabWidget.setSizePolicy(sizePolicy)
self.tabWidget.setMinimumSize(QtCore.QSize(0, 0))
self.tabWidget.setBaseSize(QtCore.QSize(0, 0))
font = QtGui.QFont()
font.setPointSize(9)
self.tabWidget.setFont(font)
self.tabWidget.setTabPosition(QtGui.QTabWidget.North)
self.tabWidget.setTabShape(QtGui.QTabWidget.Rounded)
self.tabWidget.setObjectName(_fromUtf8("tabWidget"))
self.inbox = QtGui.QWidget()
self.inbox.setObjectName(_fromUtf8("inbox"))
self.gridLayout = QtGui.QGridLayout(self.inbox)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.horizontalSplitter_3 = settingsmixin.SSplitter()
self.horizontalSplitter_3.setObjectName(_fromUtf8("horizontalSplitter_3"))
self.verticalSplitter_12 = settingsmixin.SSplitter()
self.verticalSplitter_12.setObjectName(_fromUtf8("verticalSplitter_12"))
self.verticalSplitter_12.setOrientation(QtCore.Qt.Vertical)
self.treeWidgetYourIdentities = settingsmixin.STreeWidget(self.inbox)
self.treeWidgetYourIdentities.setObjectName(
_fromUtf8("treeWidgetYourIdentities")
)
self.treeWidgetYourIdentities.resize(
200, self.treeWidgetYourIdentities.height()
)
icon1 = QtGui.QIcon()
icon1.addPixmap(
QtGui.QPixmap(_fromUtf8(":/newPrefix/images/identities.png")),
QtGui.QIcon.Selected,
QtGui.QIcon.Off,
)
self.treeWidgetYourIdentities.headerItem().setIcon(0, icon1)
self.verticalSplitter_12.addWidget(self.treeWidgetYourIdentities)
self.pushButtonNewAddress = QtGui.QPushButton(self.inbox)
self.pushButtonNewAddress.setObjectName(_fromUtf8("pushButtonNewAddress"))
self.pushButtonNewAddress.resize(200, self.pushButtonNewAddress.height())
self.verticalSplitter_12.addWidget(self.pushButtonNewAddress)
self.verticalSplitter_12.setStretchFactor(0, 1)
self.verticalSplitter_12.setStretchFactor(1, 0)
self.verticalSplitter_12.setCollapsible(0, False)
self.verticalSplitter_12.setCollapsible(1, False)
self.verticalSplitter_12.handle(1).setEnabled(False)
self.horizontalSplitter_3.addWidget(self.verticalSplitter_12)
self.verticalSplitter_7 = settingsmixin.SSplitter()
self.verticalSplitter_7.setObjectName(_fromUtf8("verticalSplitter_7"))
self.verticalSplitter_7.setOrientation(QtCore.Qt.Vertical)
self.horizontalSplitterSearch = QtGui.QSplitter()
self.horizontalSplitterSearch.setObjectName(
_fromUtf8("horizontalSplitterSearch")
)
self.inboxSearchLineEdit = QtGui.QLineEdit(self.inbox)
self.inboxSearchLineEdit.setObjectName(_fromUtf8("inboxSearchLineEdit"))
self.horizontalSplitterSearch.addWidget(self.inboxSearchLineEdit)
self.inboxSearchOption = QtGui.QComboBox(self.inbox)
self.inboxSearchOption.setObjectName(_fromUtf8("inboxSearchOption"))
self.inboxSearchOption.addItem(_fromUtf8(""))
self.inboxSearchOption.addItem(_fromUtf8(""))
self.inboxSearchOption.addItem(_fromUtf8(""))
self.inboxSearchOption.addItem(_fromUtf8(""))
self.inboxSearchOption.addItem(_fromUtf8(""))
self.inboxSearchOption.setSizeAdjustPolicy(QtGui.QComboBox.AdjustToContents)
self.inboxSearchOption.setCurrentIndex(3)
self.horizontalSplitterSearch.addWidget(self.inboxSearchOption)
self.horizontalSplitterSearch.handle(1).setEnabled(False)
self.horizontalSplitterSearch.setStretchFactor(0, 1)
self.horizontalSplitterSearch.setStretchFactor(1, 0)
self.verticalSplitter_7.addWidget(self.horizontalSplitterSearch)
self.tableWidgetInbox = settingsmixin.STableWidget(self.inbox)
self.tableWidgetInbox.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)
self.tableWidgetInbox.setAlternatingRowColors(True)
self.tableWidgetInbox.setSelectionMode(
QtGui.QAbstractItemView.ExtendedSelection
)
self.tableWidgetInbox.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
self.tableWidgetInbox.setWordWrap(False)
self.tableWidgetInbox.setObjectName(_fromUtf8("tableWidgetInbox"))
self.tableWidgetInbox.setColumnCount(4)
self.tableWidgetInbox.setRowCount(0)
item = QtGui.QTableWidgetItem()
self.tableWidgetInbox.setHorizontalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
self.tableWidgetInbox.setHorizontalHeaderItem(1, item)
item = QtGui.QTableWidgetItem()
self.tableWidgetInbox.setHorizontalHeaderItem(2, item)
item = QtGui.QTableWidgetItem()
self.tableWidgetInbox.setHorizontalHeaderItem(3, item)
self.tableWidgetInbox.horizontalHeader().setCascadingSectionResizes(True)
self.tableWidgetInbox.horizontalHeader().setDefaultSectionSize(200)
self.tableWidgetInbox.horizontalHeader().setHighlightSections(False)
self.tableWidgetInbox.horizontalHeader().setMinimumSectionSize(27)
self.tableWidgetInbox.horizontalHeader().setSortIndicatorShown(False)
self.tableWidgetInbox.horizontalHeader().setStretchLastSection(True)
self.tableWidgetInbox.verticalHeader().setVisible(False)
self.tableWidgetInbox.verticalHeader().setDefaultSectionSize(26)
self.verticalSplitter_7.addWidget(self.tableWidgetInbox)
self.textEditInboxMessage = MessageView(self.inbox)
self.textEditInboxMessage.setBaseSize(QtCore.QSize(0, 500))
self.textEditInboxMessage.setReadOnly(True)
self.textEditInboxMessage.setObjectName(_fromUtf8("textEditInboxMessage"))
self.verticalSplitter_7.addWidget(self.textEditInboxMessage)
self.verticalSplitter_7.setStretchFactor(0, 0)
self.verticalSplitter_7.setStretchFactor(1, 1)
self.verticalSplitter_7.setStretchFactor(2, 2)
self.verticalSplitter_7.setCollapsible(0, False)
self.verticalSplitter_7.setCollapsible(1, False)
self.verticalSplitter_7.setCollapsible(2, False)
self.verticalSplitter_7.handle(1).setEnabled(False)
self.horizontalSplitter_3.addWidget(self.verticalSplitter_7)
self.horizontalSplitter_3.setStretchFactor(0, 0)
self.horizontalSplitter_3.setStretchFactor(1, 1)
self.horizontalSplitter_3.setCollapsible(0, False)
self.horizontalSplitter_3.setCollapsible(1, False)
self.gridLayout.addWidget(self.horizontalSplitter_3)
icon2 = QtGui.QIcon()
icon2.addPixmap(
QtGui.QPixmap(_fromUtf8(":/newPrefix/images/inbox.png")),
QtGui.QIcon.Normal,
QtGui.QIcon.Off,
)
self.tabWidget.addTab(self.inbox, icon2, _fromUtf8(""))
self.send = QtGui.QWidget()
self.send.setObjectName(_fromUtf8("send"))
self.gridLayout_7 = QtGui.QGridLayout(self.send)
self.gridLayout_7.setObjectName(_fromUtf8("gridLayout_7"))
self.horizontalSplitter = settingsmixin.SSplitter()
self.horizontalSplitter.setObjectName(_fromUtf8("horizontalSplitter"))
self.verticalSplitter_2 = settingsmixin.SSplitter()
self.verticalSplitter_2.setObjectName(_fromUtf8("verticalSplitter_2"))
self.verticalSplitter_2.setOrientation(QtCore.Qt.Vertical)
self.tableWidgetAddressBook = settingsmixin.STableWidget(self.send)
self.tableWidgetAddressBook.setAlternatingRowColors(True)
self.tableWidgetAddressBook.setSelectionMode(
QtGui.QAbstractItemView.ExtendedSelection
)
self.tableWidgetAddressBook.setSelectionBehavior(
QtGui.QAbstractItemView.SelectRows
)
self.tableWidgetAddressBook.setObjectName(_fromUtf8("tableWidgetAddressBook"))
self.tableWidgetAddressBook.setColumnCount(2)
self.tableWidgetAddressBook.setRowCount(0)
self.tableWidgetAddressBook.resize(200, self.tableWidgetAddressBook.height())
item = QtGui.QTableWidgetItem()
icon3 = QtGui.QIcon()
icon3.addPixmap(
QtGui.QPixmap(_fromUtf8(":/newPrefix/images/addressbook.png")),
QtGui.QIcon.Selected,
QtGui.QIcon.Off,
)
item.setIcon(icon3)
self.tableWidgetAddressBook.setHorizontalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
self.tableWidgetAddressBook.setHorizontalHeaderItem(1, item)
self.tableWidgetAddressBook.horizontalHeader().setCascadingSectionResizes(True)
self.tableWidgetAddressBook.horizontalHeader().setDefaultSectionSize(200)
self.tableWidgetAddressBook.horizontalHeader().setHighlightSections(False)
self.tableWidgetAddressBook.horizontalHeader().setStretchLastSection(True)
self.tableWidgetAddressBook.verticalHeader().setVisible(False)
self.verticalSplitter_2.addWidget(self.tableWidgetAddressBook)
self.addressBookCompleter = AddressBookCompleter()
self.addressBookCompleter.setCompletionMode(QtGui.QCompleter.PopupCompletion)
self.addressBookCompleter.setCaseSensitivity(QtCore.Qt.CaseInsensitive)
self.addressBookCompleterModel = QtGui.QStringListModel()
self.addressBookCompleter.setModel(self.addressBookCompleterModel)
self.pushButtonAddAddressBook = QtGui.QPushButton(self.send)
self.pushButtonAddAddressBook.setObjectName(
_fromUtf8("pushButtonAddAddressBook")
)
self.pushButtonAddAddressBook.resize(
200, self.pushButtonAddAddressBook.height()
)
self.verticalSplitter_2.addWidget(self.pushButtonAddAddressBook)
self.pushButtonFetchNamecoinID = QtGui.QPushButton(self.send)
self.pushButtonFetchNamecoinID.resize(
200, self.pushButtonFetchNamecoinID.height()
)
self.pushButtonFetchNamecoinID.setObjectName(
_fromUtf8("pushButtonFetchNamecoinID")
)
self.verticalSplitter_2.addWidget(self.pushButtonFetchNamecoinID)
self.verticalSplitter_2.setStretchFactor(0, 1)
self.verticalSplitter_2.setStretchFactor(1, 0)
self.verticalSplitter_2.setStretchFactor(2, 0)
self.verticalSplitter_2.setCollapsible(0, False)
self.verticalSplitter_2.setCollapsible(1, False)
self.verticalSplitter_2.setCollapsible(2, False)
self.verticalSplitter_2.handle(1).setEnabled(False)
self.verticalSplitter_2.handle(2).setEnabled(False)
self.horizontalSplitter.addWidget(self.verticalSplitter_2)
self.verticalSplitter = settingsmixin.SSplitter()
self.verticalSplitter.setObjectName(_fromUtf8("verticalSplitter"))
self.verticalSplitter.setOrientation(QtCore.Qt.Vertical)
self.tabWidgetSend = QtGui.QTabWidget(self.send)
self.tabWidgetSend.setObjectName(_fromUtf8("tabWidgetSend"))
self.sendDirect = QtGui.QWidget()
self.sendDirect.setObjectName(_fromUtf8("sendDirect"))
self.gridLayout_8 = QtGui.QGridLayout(self.sendDirect)
self.gridLayout_8.setObjectName(_fromUtf8("gridLayout_8"))
self.verticalSplitter_5 = settingsmixin.SSplitter()
self.verticalSplitter_5.setObjectName(_fromUtf8("verticalSplitter_5"))
self.verticalSplitter_5.setOrientation(QtCore.Qt.Vertical)
self.gridLayout_2 = QtGui.QGridLayout()
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.label_3 = QtGui.QLabel(self.sendDirect)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.gridLayout_2.addWidget(self.label_3, 2, 0, 1, 1)
self.label_2 = QtGui.QLabel(self.sendDirect)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout_2.addWidget(self.label_2, 0, 0, 1, 1)
self.lineEditSubject = QtGui.QLineEdit(self.sendDirect)
self.lineEditSubject.setText(_fromUtf8(""))
self.lineEditSubject.setObjectName(_fromUtf8("lineEditSubject"))
self.gridLayout_2.addWidget(self.lineEditSubject, 2, 1, 1, 1)
self.label = QtGui.QLabel(self.sendDirect)
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout_2.addWidget(self.label, 1, 0, 1, 1)
self.comboBoxSendFrom = QtGui.QComboBox(self.sendDirect)
self.comboBoxSendFrom.setMinimumSize(QtCore.QSize(300, 0))
self.comboBoxSendFrom.setObjectName(_fromUtf8("comboBoxSendFrom"))
self.gridLayout_2.addWidget(self.comboBoxSendFrom, 0, 1, 1, 1)
self.lineEditTo = QtGui.QLineEdit(self.sendDirect)
self.lineEditTo.setObjectName(_fromUtf8("lineEditTo"))
self.gridLayout_2.addWidget(self.lineEditTo, 1, 1, 1, 1)
self.lineEditTo.setCompleter(self.addressBookCompleter)
self.gridLayout_2_Widget = QtGui.QWidget()
self.gridLayout_2_Widget.setLayout(self.gridLayout_2)
self.verticalSplitter_5.addWidget(self.gridLayout_2_Widget)
self.textEditMessage = MessageCompose(self.sendDirect)
self.textEditMessage.setObjectName(_fromUtf8("textEditMessage"))
self.verticalSplitter_5.addWidget(self.textEditMessage)
self.verticalSplitter_5.setStretchFactor(0, 0)
self.verticalSplitter_5.setStretchFactor(1, 1)
self.verticalSplitter_5.setCollapsible(0, False)
self.verticalSplitter_5.setCollapsible(1, False)
self.verticalSplitter_5.handle(1).setEnabled(False)
self.gridLayout_8.addWidget(self.verticalSplitter_5, 0, 0, 1, 1)
self.tabWidgetSend.addTab(self.sendDirect, _fromUtf8(""))
self.sendBroadcast = QtGui.QWidget()
self.sendBroadcast.setObjectName(_fromUtf8("sendBroadcast"))
self.gridLayout_9 = QtGui.QGridLayout(self.sendBroadcast)
self.gridLayout_9.setObjectName(_fromUtf8("gridLayout_9"))
self.verticalSplitter_6 = settingsmixin.SSplitter()
self.verticalSplitter_6.setObjectName(_fromUtf8("verticalSplitter_6"))
self.verticalSplitter_6.setOrientation(QtCore.Qt.Vertical)
self.gridLayout_5 = QtGui.QGridLayout()
self.gridLayout_5.setObjectName(_fromUtf8("gridLayout_5"))
self.label_8 = QtGui.QLabel(self.sendBroadcast)
self.label_8.setObjectName(_fromUtf8("label_8"))
self.gridLayout_5.addWidget(self.label_8, 0, 0, 1, 1)
self.lineEditSubjectBroadcast = QtGui.QLineEdit(self.sendBroadcast)
self.lineEditSubjectBroadcast.setText(_fromUtf8(""))
self.lineEditSubjectBroadcast.setObjectName(
_fromUtf8("lineEditSubjectBroadcast")
)
self.gridLayout_5.addWidget(self.lineEditSubjectBroadcast, 1, 1, 1, 1)
self.label_7 = QtGui.QLabel(self.sendBroadcast)
self.label_7.setObjectName(_fromUtf8("label_7"))
self.gridLayout_5.addWidget(self.label_7, 1, 0, 1, 1)
self.comboBoxSendFromBroadcast = QtGui.QComboBox(self.sendBroadcast)
self.comboBoxSendFromBroadcast.setMinimumSize(QtCore.QSize(300, 0))
self.comboBoxSendFromBroadcast.setObjectName(
_fromUtf8("comboBoxSendFromBroadcast")
)
self.gridLayout_5.addWidget(self.comboBoxSendFromBroadcast, 0, 1, 1, 1)
self.gridLayout_5_Widget = QtGui.QWidget()
self.gridLayout_5_Widget.setLayout(self.gridLayout_5)
self.verticalSplitter_6.addWidget(self.gridLayout_5_Widget)
self.textEditMessageBroadcast = MessageCompose(self.sendBroadcast)
self.textEditMessageBroadcast.setObjectName(
_fromUtf8("textEditMessageBroadcast")
)
self.verticalSplitter_6.addWidget(self.textEditMessageBroadcast)
self.verticalSplitter_6.setStretchFactor(0, 0)
self.verticalSplitter_6.setStretchFactor(1, 1)
self.verticalSplitter_6.setCollapsible(0, False)
self.verticalSplitter_6.setCollapsible(1, False)
self.verticalSplitter_6.handle(1).setEnabled(False)
self.gridLayout_9.addWidget(self.verticalSplitter_6, 0, 0, 1, 1)
self.tabWidgetSend.addTab(self.sendBroadcast, _fromUtf8(""))
self.verticalSplitter.addWidget(self.tabWidgetSend)
self.tTLContainer = QtGui.QWidget()
self.tTLContainer.setSizePolicy(
QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed
)
self.horizontalLayout_5 = QtGui.QHBoxLayout()
self.tTLContainer.setLayout(self.horizontalLayout_5)
self.horizontalLayout_5.setObjectName(_fromUtf8("horizontalLayout_5"))
self.pushButtonTTL = QtGui.QPushButton(self.send)
sizePolicy = QtGui.QSizePolicy(
QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed
)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(
self.pushButtonTTL.sizePolicy().hasHeightForWidth()
)
self.pushButtonTTL.setSizePolicy(sizePolicy)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
self.pushButtonTTL.setPalette(palette)
font = QtGui.QFont()
font.setUnderline(True)
self.pushButtonTTL.setFont(font)
self.pushButtonTTL.setFlat(True)
self.pushButtonTTL.setObjectName(_fromUtf8("pushButtonTTL"))
self.horizontalLayout_5.addWidget(self.pushButtonTTL, 0, QtCore.Qt.AlignRight)
self.horizontalSliderTTL = QtGui.QSlider(self.send)
self.horizontalSliderTTL.setMinimumSize(QtCore.QSize(70, 0))
self.horizontalSliderTTL.setOrientation(QtCore.Qt.Horizontal)
self.horizontalSliderTTL.setInvertedAppearance(False)
self.horizontalSliderTTL.setInvertedControls(False)
self.horizontalSliderTTL.setObjectName(_fromUtf8("horizontalSliderTTL"))
self.horizontalLayout_5.addWidget(
self.horizontalSliderTTL, 0, QtCore.Qt.AlignLeft
)
self.labelHumanFriendlyTTLDescription = QtGui.QLabel(self.send)
sizePolicy = QtGui.QSizePolicy(
QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed
)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(
self.labelHumanFriendlyTTLDescription.sizePolicy().hasHeightForWidth()
)
self.labelHumanFriendlyTTLDescription.setSizePolicy(sizePolicy)
self.labelHumanFriendlyTTLDescription.setMinimumSize(QtCore.QSize(45, 0))
self.labelHumanFriendlyTTLDescription.setObjectName(
_fromUtf8("labelHumanFriendlyTTLDescription")
)
self.horizontalLayout_5.addWidget(
self.labelHumanFriendlyTTLDescription, 1, QtCore.Qt.AlignLeft
)
self.pushButtonClear = QtGui.QPushButton(self.send)
self.pushButtonClear.setObjectName(_fromUtf8("pushButtonClear"))
self.horizontalLayout_5.addWidget(self.pushButtonClear, 0, QtCore.Qt.AlignRight)
self.pushButtonSend = QtGui.QPushButton(self.send)
self.pushButtonSend.setObjectName(_fromUtf8("pushButtonSend"))
self.horizontalLayout_5.addWidget(self.pushButtonSend, 0, QtCore.Qt.AlignRight)
self.horizontalSliderTTL.setMaximumSize(
QtCore.QSize(105, self.pushButtonSend.height())
)
self.verticalSplitter.addWidget(self.tTLContainer)
self.tTLContainer.adjustSize()
self.verticalSplitter.setStretchFactor(1, 0)
self.verticalSplitter.setStretchFactor(0, 1)
self.verticalSplitter.setCollapsible(0, False)
self.verticalSplitter.setCollapsible(1, False)
self.verticalSplitter.handle(1).setEnabled(False)
self.horizontalSplitter.addWidget(self.verticalSplitter)
self.horizontalSplitter.setStretchFactor(0, 0)
self.horizontalSplitter.setStretchFactor(1, 1)
self.horizontalSplitter.setCollapsible(0, False)
self.horizontalSplitter.setCollapsible(1, False)
self.gridLayout_7.addWidget(self.horizontalSplitter, 0, 0, 1, 1)
icon4 = QtGui.QIcon()
icon4.addPixmap(
QtGui.QPixmap(_fromUtf8(":/newPrefix/images/send.png")),
QtGui.QIcon.Normal,
QtGui.QIcon.Off,
)
self.tabWidget.addTab(self.send, icon4, _fromUtf8(""))
self.subscriptions = QtGui.QWidget()
self.subscriptions.setObjectName(_fromUtf8("subscriptions"))
self.gridLayout_3 = QtGui.QGridLayout(self.subscriptions)
self.gridLayout_3.setObjectName(_fromUtf8("gridLayout_3"))
self.horizontalSplitter_4 = settingsmixin.SSplitter()
self.horizontalSplitter_4.setObjectName(_fromUtf8("horizontalSplitter_4"))
self.verticalSplitter_3 = settingsmixin.SSplitter()
self.verticalSplitter_3.setObjectName(_fromUtf8("verticalSplitter_3"))
self.verticalSplitter_3.setOrientation(QtCore.Qt.Vertical)
self.treeWidgetSubscriptions = settingsmixin.STreeWidget(self.subscriptions)
self.treeWidgetSubscriptions.setAlternatingRowColors(True)
self.treeWidgetSubscriptions.setSelectionMode(
QtGui.QAbstractItemView.SingleSelection
)
self.treeWidgetSubscriptions.setSelectionBehavior(
QtGui.QAbstractItemView.SelectRows
)
self.treeWidgetSubscriptions.setObjectName(_fromUtf8("treeWidgetSubscriptions"))
self.treeWidgetSubscriptions.resize(200, self.treeWidgetSubscriptions.height())
icon5 = QtGui.QIcon()
icon5.addPixmap(
QtGui.QPixmap(_fromUtf8(":/newPrefix/images/subscriptions.png")),
QtGui.QIcon.Selected,
QtGui.QIcon.Off,
)
self.treeWidgetSubscriptions.headerItem().setIcon(0, icon5)
self.verticalSplitter_3.addWidget(self.treeWidgetSubscriptions)
self.pushButtonAddSubscription = QtGui.QPushButton(self.subscriptions)
self.pushButtonAddSubscription.setObjectName(
_fromUtf8("pushButtonAddSubscription")
)
self.pushButtonAddSubscription.resize(
200, self.pushButtonAddSubscription.height()
)
self.verticalSplitter_3.addWidget(self.pushButtonAddSubscription)
self.verticalSplitter_3.setStretchFactor(0, 1)
self.verticalSplitter_3.setStretchFactor(1, 0)
self.verticalSplitter_3.setCollapsible(0, False)
self.verticalSplitter_3.setCollapsible(1, False)
self.verticalSplitter_3.handle(1).setEnabled(False)
self.horizontalSplitter_4.addWidget(self.verticalSplitter_3)
self.verticalSplitter_4 = settingsmixin.SSplitter()
self.verticalSplitter_4.setObjectName(_fromUtf8("verticalSplitter_4"))
self.verticalSplitter_4.setOrientation(QtCore.Qt.Vertical)
self.horizontalSplitter_2 = QtGui.QSplitter()
self.horizontalSplitter_2.setObjectName(_fromUtf8("horizontalSplitter_2"))
self.inboxSearchLineEditSubscriptions = QtGui.QLineEdit(self.subscriptions)
self.inboxSearchLineEditSubscriptions.setObjectName(
_fromUtf8("inboxSearchLineEditSubscriptions")
)
self.horizontalSplitter_2.addWidget(self.inboxSearchLineEditSubscriptions)
self.inboxSearchOptionSubscriptions = QtGui.QComboBox(self.subscriptions)
self.inboxSearchOptionSubscriptions.setObjectName(
_fromUtf8("inboxSearchOptionSubscriptions")
)
self.inboxSearchOptionSubscriptions.addItem(_fromUtf8(""))
self.inboxSearchOptionSubscriptions.addItem(_fromUtf8(""))
self.inboxSearchOptionSubscriptions.addItem(_fromUtf8(""))
self.inboxSearchOptionSubscriptions.addItem(_fromUtf8(""))
self.inboxSearchOptionSubscriptions.setSizeAdjustPolicy(
QtGui.QComboBox.AdjustToContents
)
self.inboxSearchOptionSubscriptions.setCurrentIndex(2)
self.horizontalSplitter_2.addWidget(self.inboxSearchOptionSubscriptions)
self.horizontalSplitter_2.handle(1).setEnabled(False)
self.horizontalSplitter_2.setStretchFactor(0, 1)
self.horizontalSplitter_2.setStretchFactor(1, 0)
self.verticalSplitter_4.addWidget(self.horizontalSplitter_2)
self.tableWidgetInboxSubscriptions = settingsmixin.STableWidget(
self.subscriptions
)
self.tableWidgetInboxSubscriptions.setEditTriggers(
QtGui.QAbstractItemView.NoEditTriggers
)
self.tableWidgetInboxSubscriptions.setAlternatingRowColors(True)
self.tableWidgetInboxSubscriptions.setSelectionMode(
QtGui.QAbstractItemView.ExtendedSelection
)
self.tableWidgetInboxSubscriptions.setSelectionBehavior(
QtGui.QAbstractItemView.SelectRows
)
self.tableWidgetInboxSubscriptions.setWordWrap(False)
self.tableWidgetInboxSubscriptions.setObjectName(
_fromUtf8("tableWidgetInboxSubscriptions")
)
self.tableWidgetInboxSubscriptions.setColumnCount(4)
self.tableWidgetInboxSubscriptions.setRowCount(0)
item = QtGui.QTableWidgetItem()
self.tableWidgetInboxSubscriptions.setHorizontalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
self.tableWidgetInboxSubscriptions.setHorizontalHeaderItem(1, item)
item = QtGui.QTableWidgetItem()
self.tableWidgetInboxSubscriptions.setHorizontalHeaderItem(2, item)
item = QtGui.QTableWidgetItem()
self.tableWidgetInboxSubscriptions.setHorizontalHeaderItem(3, item)
self.tableWidgetInboxSubscriptions.horizontalHeader().setCascadingSectionResizes(
True
)
self.tableWidgetInboxSubscriptions.horizontalHeader().setDefaultSectionSize(200)
self.tableWidgetInboxSubscriptions.horizontalHeader().setHighlightSections(
False
)
self.tableWidgetInboxSubscriptions.horizontalHeader().setMinimumSectionSize(27)
self.tableWidgetInboxSubscriptions.horizontalHeader().setSortIndicatorShown(
False
)
self.tableWidgetInboxSubscriptions.horizontalHeader().setStretchLastSection(
True
)
self.tableWidgetInboxSubscriptions.verticalHeader().setVisible(False)
self.tableWidgetInboxSubscriptions.verticalHeader().setDefaultSectionSize(26)
self.verticalSplitter_4.addWidget(self.tableWidgetInboxSubscriptions)
self.textEditInboxMessageSubscriptions = MessageView(self.subscriptions)
self.textEditInboxMessageSubscriptions.setBaseSize(QtCore.QSize(0, 500))
self.textEditInboxMessageSubscriptions.setReadOnly(True)
self.textEditInboxMessageSubscriptions.setObjectName(
_fromUtf8("textEditInboxMessageSubscriptions")
)
self.verticalSplitter_4.addWidget(self.textEditInboxMessageSubscriptions)
self.verticalSplitter_4.setStretchFactor(0, 0)
self.verticalSplitter_4.setStretchFactor(1, 1)
self.verticalSplitter_4.setStretchFactor(2, 2)
self.verticalSplitter_4.setCollapsible(0, False)
self.verticalSplitter_4.setCollapsible(1, False)
self.verticalSplitter_4.setCollapsible(2, False)
self.verticalSplitter_4.handle(1).setEnabled(False)
self.horizontalSplitter_4.addWidget(self.verticalSplitter_4)
self.horizontalSplitter_4.setStretchFactor(0, 0)
self.horizontalSplitter_4.setStretchFactor(1, 1)
self.horizontalSplitter_4.setCollapsible(0, False)
self.horizontalSplitter_4.setCollapsible(1, False)
self.gridLayout_3.addWidget(self.horizontalSplitter_4, 0, 0, 1, 1)
icon6 = QtGui.QIcon()
icon6.addPixmap(
QtGui.QPixmap(_fromUtf8(":/newPrefix/images/subscriptions.png")),
QtGui.QIcon.Normal,
QtGui.QIcon.Off,
)
self.tabWidget.addTab(self.subscriptions, icon6, _fromUtf8(""))
self.chans = QtGui.QWidget()
self.chans.setObjectName(_fromUtf8("chans"))
self.gridLayout_4 = QtGui.QGridLayout(self.chans)
self.gridLayout_4.setObjectName(_fromUtf8("gridLayout_4"))
self.horizontalSplitter_7 = settingsmixin.SSplitter()
self.horizontalSplitter_7.setObjectName(_fromUtf8("horizontalSplitter_7"))
self.verticalSplitter_17 = settingsmixin.SSplitter()
self.verticalSplitter_17.setObjectName(_fromUtf8("verticalSplitter_17"))
self.verticalSplitter_17.setOrientation(QtCore.Qt.Vertical)
self.treeWidgetChans = settingsmixin.STreeWidget(self.chans)
self.treeWidgetChans.setFrameShadow(QtGui.QFrame.Sunken)
self.treeWidgetChans.setLineWidth(1)
self.treeWidgetChans.setAlternatingRowColors(True)
self.treeWidgetChans.setSelectionMode(QtGui.QAbstractItemView.SingleSelection)
self.treeWidgetChans.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
self.treeWidgetChans.setObjectName(_fromUtf8("treeWidgetChans"))
self.treeWidgetChans.resize(200, self.treeWidgetChans.height())
icon7 = QtGui.QIcon()
icon7.addPixmap(
QtGui.QPixmap(_fromUtf8(":/newPrefix/images/can-icon-16px.png")),
QtGui.QIcon.Selected,
QtGui.QIcon.Off,
)
self.treeWidgetChans.headerItem().setIcon(0, icon7)
self.verticalSplitter_17.addWidget(self.treeWidgetChans)
self.pushButtonAddChan = QtGui.QPushButton(self.chans)
self.pushButtonAddChan.setObjectName(_fromUtf8("pushButtonAddChan"))
self.pushButtonAddChan.resize(200, self.pushButtonAddChan.height())
self.verticalSplitter_17.addWidget(self.pushButtonAddChan)
self.verticalSplitter_17.setStretchFactor(0, 1)
self.verticalSplitter_17.setStretchFactor(1, 0)
self.verticalSplitter_17.setCollapsible(0, False)
self.verticalSplitter_17.setCollapsible(1, False)
self.verticalSplitter_17.handle(1).setEnabled(False)
self.horizontalSplitter_7.addWidget(self.verticalSplitter_17)
self.verticalSplitter_8 = settingsmixin.SSplitter()
self.verticalSplitter_8.setObjectName(_fromUtf8("verticalSplitter_8"))
self.verticalSplitter_8.setOrientation(QtCore.Qt.Vertical)
self.horizontalSplitter_6 = QtGui.QSplitter()
self.horizontalSplitter_6.setObjectName(_fromUtf8("horizontalSplitter_6"))
self.inboxSearchLineEditChans = QtGui.QLineEdit(self.chans)
self.inboxSearchLineEditChans.setObjectName(
_fromUtf8("inboxSearchLineEditChans")
)
self.horizontalSplitter_6.addWidget(self.inboxSearchLineEditChans)
self.inboxSearchOptionChans = QtGui.QComboBox(self.chans)
self.inboxSearchOptionChans.setObjectName(_fromUtf8("inboxSearchOptionChans"))
self.inboxSearchOptionChans.addItem(_fromUtf8(""))
self.inboxSearchOptionChans.addItem(_fromUtf8(""))
self.inboxSearchOptionChans.addItem(_fromUtf8(""))
self.inboxSearchOptionChans.addItem(_fromUtf8(""))
self.inboxSearchOptionChans.addItem(_fromUtf8(""))
self.inboxSearchOptionChans.setSizeAdjustPolicy(
QtGui.QComboBox.AdjustToContents
)
self.inboxSearchOptionChans.setCurrentIndex(3)
self.horizontalSplitter_6.addWidget(self.inboxSearchOptionChans)
self.horizontalSplitter_6.handle(1).setEnabled(False)
self.horizontalSplitter_6.setStretchFactor(0, 1)
self.horizontalSplitter_6.setStretchFactor(1, 0)
self.verticalSplitter_8.addWidget(self.horizontalSplitter_6)
self.tableWidgetInboxChans = settingsmixin.STableWidget(self.chans)
self.tableWidgetInboxChans.setEditTriggers(
QtGui.QAbstractItemView.NoEditTriggers
)
self.tableWidgetInboxChans.setAlternatingRowColors(True)
self.tableWidgetInboxChans.setSelectionMode(
QtGui.QAbstractItemView.ExtendedSelection
)
self.tableWidgetInboxChans.setSelectionBehavior(
QtGui.QAbstractItemView.SelectRows
)
self.tableWidgetInboxChans.setWordWrap(False)
self.tableWidgetInboxChans.setObjectName(_fromUtf8("tableWidgetInboxChans"))
self.tableWidgetInboxChans.setColumnCount(4)
self.tableWidgetInboxChans.setRowCount(0)
item = QtGui.QTableWidgetItem()
self.tableWidgetInboxChans.setHorizontalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
self.tableWidgetInboxChans.setHorizontalHeaderItem(1, item)
item = QtGui.QTableWidgetItem()
self.tableWidgetInboxChans.setHorizontalHeaderItem(2, item)
item = QtGui.QTableWidgetItem()
self.tableWidgetInboxChans.setHorizontalHeaderItem(3, item)
self.tableWidgetInboxChans.horizontalHeader().setCascadingSectionResizes(True)
self.tableWidgetInboxChans.horizontalHeader().setDefaultSectionSize(200)
self.tableWidgetInboxChans.horizontalHeader().setHighlightSections(False)
self.tableWidgetInboxChans.horizontalHeader().setMinimumSectionSize(27)
self.tableWidgetInboxChans.horizontalHeader().setSortIndicatorShown(False)
self.tableWidgetInboxChans.horizontalHeader().setStretchLastSection(True)
self.tableWidgetInboxChans.verticalHeader().setVisible(False)
self.tableWidgetInboxChans.verticalHeader().setDefaultSectionSize(26)
self.verticalSplitter_8.addWidget(self.tableWidgetInboxChans)
self.textEditInboxMessageChans = MessageView(self.chans)
self.textEditInboxMessageChans.setBaseSize(QtCore.QSize(0, 500))
self.textEditInboxMessageChans.setReadOnly(True)
self.textEditInboxMessageChans.setObjectName(
_fromUtf8("textEditInboxMessageChans")
)
self.verticalSplitter_8.addWidget(self.textEditInboxMessageChans)
self.verticalSplitter_8.setStretchFactor(0, 0)
self.verticalSplitter_8.setStretchFactor(1, 1)
self.verticalSplitter_8.setStretchFactor(2, 2)
self.verticalSplitter_8.setCollapsible(0, False)
self.verticalSplitter_8.setCollapsible(1, False)
self.verticalSplitter_8.setCollapsible(2, False)
self.verticalSplitter_8.handle(1).setEnabled(False)
self.horizontalSplitter_7.addWidget(self.verticalSplitter_8)
self.horizontalSplitter_7.setStretchFactor(0, 0)
self.horizontalSplitter_7.setStretchFactor(1, 1)
self.horizontalSplitter_7.setCollapsible(0, False)
self.horizontalSplitter_7.setCollapsible(1, False)
self.gridLayout_4.addWidget(self.horizontalSplitter_7, 0, 0, 1, 1)
icon8 = QtGui.QIcon()
icon8.addPixmap(
QtGui.QPixmap(_fromUtf8(":/newPrefix/images/can-icon-16px.png")),
QtGui.QIcon.Normal,
QtGui.QIcon.Off,
)
self.tabWidget.addTab(self.chans, icon8, _fromUtf8(""))
self.blackwhitelist = Blacklist()
self.tabWidget.addTab(
self.blackwhitelist, QtGui.QIcon(":/newPrefix/images/blacklist.png"), ""
)
# Initialize the Blacklist or Whitelist
if config.get("bitmessagesettings", "blackwhitelist") == "white":
self.blackwhitelist.radioButtonWhitelist.click()
self.blackwhitelist.rerenderBlackWhiteList()
self.networkstatus = NetworkStatus()
self.tabWidget.addTab(
self.networkstatus, QtGui.QIcon(":/newPrefix/images/networkstatus.png"), ""
)
self.gridLayout_10.addWidget(self.tabWidget, 0, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 885, 27))
self.menubar.setObjectName(_fromUtf8("menubar"))
self.menuFile = QtGui.QMenu(self.menubar)
self.menuFile.setObjectName(_fromUtf8("menuFile"))
self.menuSettings = QtGui.QMenu(self.menubar)
self.menuSettings.setObjectName(_fromUtf8("menuSettings"))
self.menuHelp = QtGui.QMenu(self.menubar)
self.menuHelp.setObjectName(_fromUtf8("menuHelp"))
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setMaximumSize(QtCore.QSize(16777215, 22))
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.actionImport_keys = QtGui.QAction(MainWindow)
self.actionImport_keys.setObjectName(_fromUtf8("actionImport_keys"))
self.actionManageKeys = QtGui.QAction(MainWindow)
self.actionManageKeys.setCheckable(False)
self.actionManageKeys.setEnabled(True)
icon = QtGui.QIcon.fromTheme(_fromUtf8("dialog-password"))
self.actionManageKeys.setIcon(icon)
self.actionManageKeys.setObjectName(_fromUtf8("actionManageKeys"))
self.actionNetworkSwitch = QtGui.QAction(MainWindow)
self.actionNetworkSwitch.setObjectName(_fromUtf8("actionNetworkSwitch"))
self.actionExit = QtGui.QAction(MainWindow)
icon = QtGui.QIcon.fromTheme(_fromUtf8("application-exit"))
self.actionExit.setIcon(icon)
self.actionExit.setObjectName(_fromUtf8("actionExit"))
self.actionHelp = QtGui.QAction(MainWindow)
icon = QtGui.QIcon.fromTheme(_fromUtf8("help-contents"))
self.actionHelp.setIcon(icon)
self.actionHelp.setObjectName(_fromUtf8("actionHelp"))
self.actionSupport = QtGui.QAction(MainWindow)
icon = QtGui.QIcon.fromTheme(_fromUtf8("help-support"))
self.actionSupport.setIcon(icon)
self.actionSupport.setObjectName(_fromUtf8("actionSupport"))
self.actionAbout = QtGui.QAction(MainWindow)
icon = QtGui.QIcon.fromTheme(_fromUtf8("help-about"))
self.actionAbout.setIcon(icon)
self.actionAbout.setObjectName(_fromUtf8("actionAbout"))
self.actionSettings = QtGui.QAction(MainWindow)
icon = QtGui.QIcon.fromTheme(_fromUtf8("document-properties"))
self.actionSettings.setIcon(icon)
self.actionSettings.setObjectName(_fromUtf8("actionSettings"))
self.actionRegenerateDeterministicAddresses = QtGui.QAction(MainWindow)
icon = QtGui.QIcon.fromTheme(_fromUtf8("view-refresh"))
self.actionRegenerateDeterministicAddresses.setIcon(icon)
self.actionRegenerateDeterministicAddresses.setObjectName(
_fromUtf8("actionRegenerateDeterministicAddresses")
)
self.actionDeleteAllTrashedMessages = QtGui.QAction(MainWindow)
icon = QtGui.QIcon.fromTheme(_fromUtf8("user-trash"))
self.actionDeleteAllTrashedMessages.setIcon(icon)
self.actionDeleteAllTrashedMessages.setObjectName(
_fromUtf8("actionDeleteAllTrashedMessages")
)
self.actionJoinChan = QtGui.QAction(MainWindow)
icon = QtGui.QIcon.fromTheme(_fromUtf8("contact-new"))
self.actionJoinChan.setIcon(icon)
self.actionJoinChan.setObjectName(_fromUtf8("actionJoinChan"))
self.menuFile.addAction(self.actionManageKeys)
self.menuFile.addAction(self.actionDeleteAllTrashedMessages)
self.menuFile.addAction(self.actionRegenerateDeterministicAddresses)
self.menuFile.addAction(self.actionNetworkSwitch)
self.menuFile.addAction(self.actionExit)
self.menuSettings.addAction(self.actionSettings)
self.menuHelp.addAction(self.actionHelp)
self.menuHelp.addAction(self.actionSupport)
self.menuHelp.addAction(self.actionAbout)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuSettings.menuAction())
self.menubar.addAction(self.menuHelp.menuAction())
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(self.tabWidget.indexOf(self.inbox))
self.tabWidgetSend.setCurrentIndex(self.tabWidgetSend.indexOf(self.sendDirect))
QtCore.QMetaObject.connectSlotsByName(MainWindow)
MainWindow.setTabOrder(self.tableWidgetInbox, self.textEditInboxMessage)
MainWindow.setTabOrder(self.textEditInboxMessage, self.comboBoxSendFrom)
MainWindow.setTabOrder(self.comboBoxSendFrom, self.lineEditTo)
MainWindow.setTabOrder(self.lineEditTo, self.lineEditSubject)
MainWindow.setTabOrder(self.lineEditSubject, self.textEditMessage)
MainWindow.setTabOrder(self.textEditMessage, self.pushButtonAddSubscription)
# Popup menu actions container for the Sent page
# pylint: disable=attribute-defined-outside-init
self.sentContextMenuToolbar = QtGui.QToolBar()
# Popup menu actions container for chans tree
self.addressContextMenuToolbar = QtGui.QToolBar()
# Popup menu actions container for subscriptions tree
self.subscriptionsContextMenuToolbar = QtGui.QToolBar()
def updateNetworkSwitchMenuLabel(self, dontconnect=None):
if dontconnect is None:
dontconnect = config.safeGetBoolean("bitmessagesettings", "dontconnect")
self.actionNetworkSwitch.setText(
_translate("MainWindow", "Go online", None)
if dontconnect
else _translate("MainWindow", "Go offline", None)
)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "Bitmessage", None))
self.treeWidgetYourIdentities.headerItem().setText(
0, _translate("MainWindow", "Identities", None)
)
self.pushButtonNewAddress.setText(
_translate("MainWindow", "New Identity", None)
)
self.inboxSearchLineEdit.setPlaceholderText(
_translate("MainWindow", "Search", None)
)
self.inboxSearchOption.setItemText(0, _translate("MainWindow", "All", None))
self.inboxSearchOption.setItemText(1, _translate("MainWindow", "To", None))
self.inboxSearchOption.setItemText(2, _translate("MainWindow", "From", None))
self.inboxSearchOption.setItemText(3, _translate("MainWindow", "Subject", None))
self.inboxSearchOption.setItemText(4, _translate("MainWindow", "Message", None))
self.tableWidgetInbox.setSortingEnabled(True)
item = self.tableWidgetInbox.horizontalHeaderItem(0)
item.setText(_translate("MainWindow", "To", None))
item = self.tableWidgetInbox.horizontalHeaderItem(1)
item.setText(_translate("MainWindow", "From", None))
item = self.tableWidgetInbox.horizontalHeaderItem(2)
item.setText(_translate("MainWindow", "Subject", None))
item = self.tableWidgetInbox.horizontalHeaderItem(3)
item.setText(_translate("MainWindow", "Received", None))
self.tabWidget.setTabText(
self.tabWidget.indexOf(self.inbox),
_translate("MainWindow", "Messages", None),
)
self.tableWidgetAddressBook.setSortingEnabled(True)
item = self.tableWidgetAddressBook.horizontalHeaderItem(0)
item.setText(_translate("MainWindow", "Address book", None))
item = self.tableWidgetAddressBook.horizontalHeaderItem(1)
item.setText(_translate("MainWindow", "Address", None))
self.pushButtonAddAddressBook.setText(
_translate("MainWindow", "Add Contact", None)
)
self.pushButtonFetchNamecoinID.setText(
_translate("MainWindow", "Fetch Namecoin ID", None)
)
self.label_3.setText(_translate("MainWindow", "Subject:", None))
self.label_2.setText(_translate("MainWindow", "From:", None))
self.label.setText(_translate("MainWindow", "To:", None))
self.tabWidgetSend.setTabText(
self.tabWidgetSend.indexOf(self.sendDirect),
_translate("MainWindow", "Send ordinary Message", None),
)
self.label_8.setText(_translate("MainWindow", "From:", None))
self.label_7.setText(_translate("MainWindow", "Subject:", None))
self.tabWidgetSend.setTabText(
self.tabWidgetSend.indexOf(self.sendBroadcast),
_translate("MainWindow", "Send Message to your Subscribers", None),
)
self.pushButtonTTL.setText(_translate("MainWindow", "TTL:", None))
hours = 48
try:
hours = int(config.getint("bitmessagesettings", "ttl") / 60 / 60)
except:
pass
self.labelHumanFriendlyTTLDescription.setText(
_translate(
"MainWindow",
"%n hour(s)",
None,
QtCore.QCoreApplication.CodecForTr,
hours,
)
)
self.pushButtonClear.setText(_translate("MainWindow", "Clear", None))
self.pushButtonSend.setText(_translate("MainWindow", "Send", None))
self.tabWidget.setTabText(
self.tabWidget.indexOf(self.send), _translate("MainWindow", "Send", None)
)
self.treeWidgetSubscriptions.headerItem().setText(
0, _translate("MainWindow", "Subscriptions", None)
)
self.pushButtonAddSubscription.setText(
_translate("MainWindow", "Add new Subscription", None)
)
self.inboxSearchLineEditSubscriptions.setPlaceholderText(
_translate("MainWindow", "Search", None)
)
self.inboxSearchOptionSubscriptions.setItemText(
0, _translate("MainWindow", "All", None)
)
self.inboxSearchOptionSubscriptions.setItemText(
1, _translate("MainWindow", "From", None)
)
self.inboxSearchOptionSubscriptions.setItemText(
2, _translate("MainWindow", "Subject", None)
)
self.inboxSearchOptionSubscriptions.setItemText(
3, _translate("MainWindow", "Message", None)
)
self.tableWidgetInboxSubscriptions.setSortingEnabled(True)
item = self.tableWidgetInboxSubscriptions.horizontalHeaderItem(0)
item.setText(_translate("MainWindow", "To", None))
item = self.tableWidgetInboxSubscriptions.horizontalHeaderItem(1)
item.setText(_translate("MainWindow", "From", None))
item = self.tableWidgetInboxSubscriptions.horizontalHeaderItem(2)
item.setText(_translate("MainWindow", "Subject", None))
item = self.tableWidgetInboxSubscriptions.horizontalHeaderItem(3)
item.setText(_translate("MainWindow", "Received", None))
self.tabWidget.setTabText(
self.tabWidget.indexOf(self.subscriptions),
_translate("MainWindow", "Subscriptions", None),
)
self.treeWidgetChans.headerItem().setText(
0, _translate("MainWindow", "Chans", None)
)
self.pushButtonAddChan.setText(_translate("MainWindow", "Add Chan", None))
self.inboxSearchLineEditChans.setPlaceholderText(
_translate("MainWindow", "Search", None)
)
self.inboxSearchOptionChans.setItemText(
0, _translate("MainWindow", "All", None)
)
self.inboxSearchOptionChans.setItemText(1, _translate("MainWindow", "To", None))
self.inboxSearchOptionChans.setItemText(
2, _translate("MainWindow", "From", None)
)
self.inboxSearchOptionChans.setItemText(
3, _translate("MainWindow", "Subject", None)
)
self.inboxSearchOptionChans.setItemText(
4, _translate("MainWindow", "Message", None)
)
self.tableWidgetInboxChans.setSortingEnabled(True)
item = self.tableWidgetInboxChans.horizontalHeaderItem(0)
item.setText(_translate("MainWindow", "To", None))
item = self.tableWidgetInboxChans.horizontalHeaderItem(1)
item.setText(_translate("MainWindow", "From", None))
item = self.tableWidgetInboxChans.horizontalHeaderItem(2)
item.setText(_translate("MainWindow", "Subject", None))
item = self.tableWidgetInboxChans.horizontalHeaderItem(3)
item.setText(_translate("MainWindow", "Received", None))
self.tabWidget.setTabText(
self.tabWidget.indexOf(self.chans), _translate("MainWindow", "Chans", None)
)
self.blackwhitelist.retranslateUi()
self.tabWidget.setTabText(
self.tabWidget.indexOf(self.blackwhitelist),
_translate("blacklist", "Blacklist", None),
)
self.networkstatus.retranslateUi()
self.tabWidget.setTabText(
self.tabWidget.indexOf(self.networkstatus),
_translate("networkstatus", "Network Status", None),
)
self.menuFile.setTitle(_translate("MainWindow", "File", None))
self.menuSettings.setTitle(_translate("MainWindow", "Settings", None))
self.menuHelp.setTitle(_translate("MainWindow", "Help", None))
self.actionImport_keys.setText(_translate("MainWindow", "Import keys", None))
self.actionManageKeys.setText(_translate("MainWindow", "Manage keys", None))
self.actionExit.setText(_translate("MainWindow", "Quit", None))
self.actionExit.setShortcut(_translate("MainWindow", "Ctrl+Q", None))
self.actionHelp.setText(_translate("MainWindow", "Help", None))
self.actionHelp.setShortcut(_translate("MainWindow", "F1", None))
self.actionSupport.setText(_translate("MainWindow", "Contact support", None))
self.actionAbout.setText(_translate("MainWindow", "About", None))
self.actionSettings.setText(_translate("MainWindow", "Settings", None))
self.actionRegenerateDeterministicAddresses.setText(
_translate("MainWindow", "Regenerate deterministic addresses", None)
)
self.actionDeleteAllTrashedMessages.setText(
_translate("MainWindow", "Delete all trashed messages", None)
)
self.actionJoinChan.setText(
_translate("MainWindow", "Join / Create chan", None)
)
self.updateNetworkSwitchMenuLabel()
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
MainWindow = settingsmixin.SMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
papers | orcid | # -*- encoding: utf-8 -*-
# Dissemin: open access policy enforcement tool
# Copyright (C) 2014 Antonin Delpeuch
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
import logging
import requests
from django.conf import settings
from django.utils.functional import cached_property
from papers.baremodels import BareName
from papers.bibtex import parse_bibtex
from papers.doi import to_doi
from papers.errors import MetadataSourceException
from papers.name import most_similar_author, normalize_name_words, parse_comma_name
from papers.utils import jpath, parse_int, try_date, urlize
logger = logging.getLogger("dissemin." + __name__)
orcid_type_to_pubtype = {
"book": "book",
"book-chapter": "book-chapter",
"book-review": "other",
"dictionary-entry": "reference-entry",
"dissertation": "thesis",
"encyclopedia-entry": "reference-entry",
"edited-book": "book",
"journal-article": "journal-article",
"journal-issue": "journal-issue",
"magazine-article": "other",
"manual": "other",
"online-resource": "dataset",
"newsletter-article": "other",
"newspaper-article": "other",
"report": "report",
"research-tool": "other",
"supervised-student-publication": "other",
"test": "other",
"translation": "other",
"website": "other",
"working-paper": "preprint",
"conference-abstract": "other",
"conference-paper": "proceedings-article",
"conference-poster": "poster",
# Intellectual property section: skipped (-> 'other')
"data-set": "dataset",
}
def orcid_to_doctype(typ):
return orcid_type_to_pubtype.get(
typ.lower().replace("_", "-").replace(" ", "-"), "other"
)
def affiliate_author_with_orcid(ref_name, orcid, authors, initial_orcids=None):
"""
Given a reference name and an ORCiD for a researcher, find out which
author in the list is the most likely to be that author. This function
is run on author lists of papers listed in the ORCiD record so we expect
that one of the authors should be the same person as the ORCiD holder.
This just finds the most similar name and returns the appropriate orcids
list (None everywhere except for the most similar name where it is the ORCiD).
"""
max_sim_idx = most_similar_author(ref_name, authors)
orcids = [None] * len(authors)
if initial_orcids and len(initial_orcids) == len(authors):
orcids = initial_orcids
if max_sim_idx is not None:
orcids[max_sim_idx] = orcid
return orcids
class OrcidProfile(object):
"""
An orcid profile as returned by the ORCID public API (in JSON)
"""
def __init__(self, orcid_id=None, json=None, instance=settings.ORCID_BASE_DOMAIN):
"""
Create a profile by ORCID ID or by providing directly the parsed JSON payload.
"""
self.json = json
self.id = orcid_id
self.instance = instance
if self.instance not in ["orcid.org", "sandbox.orcid.org"]:
raise ValueError("Unexpected instance")
if orcid_id is not None and not json:
self.fetch()
def __getitem__(self, key):
return self.json[key]
def __iter__(self):
return self.json.__iter__()
def __contains__(self, key):
return self.json.__contains__(key)
def get(self, *args, **kwargs):
return self.json.get(*args, **kwargs)
def __repr__(self):
return "<OrcidProfile for {orcid}>".format(orcid=self.id)
@property
def api_uri(self):
"""
URI of the profile in the ORCid API
"""
return "https://pub.{instance}/v2.1/{orcid}/".format(
instance=self.instance, orcid=self.id
)
def request_element(self, path):
"""
Returns the base URL of the profile on the API
"""
headers = {"Accept": "application/orcid+json"}
url = self.api_uri + path
return requests.get(url, headers=headers).json()
def fetch(self):
"""
Fetches the profile by id using the public API.
This only fetches the summaries, subsequent requests will be made for works.
"""
try:
parsed = self.request_element("")
if parsed.get("orcid-identifier") is None:
# TEMPORARY: also check from the sandbox
if self.instance == "orcid.org":
self.instance = "sandbox.orcid.org"
return self.fetch()
raise ValueError
self.json = parsed
except (requests.exceptions.HTTPError, ValueError):
raise MetadataSourceException(
"The ORCiD {id} could not be found from {instance}".format(
id=self.id, instance=self.instance
)
)
except TypeError:
raise MetadataSourceException(
"The ORCiD {id} returned invalid JSON.".format(id=self.id)
)
@cached_property
def work_summaries(self):
"""
These represent striped-down versions of the works in the 2.0 API.
"""
return list(self._work_summaries_generator())
def _work_summaries_generator(self):
works_summary = self.request_element("works")
for group in works_summary.get("group") or []:
for summary in group.get("work-summary") or []:
yield OrcidWorkSummary(summary)
@property
def homepage(self):
"""
Extract an URL for that researcher (if any)
"""
lst = jpath("person/researcher-urls/researcher-url", self.json, default=[])
for url in lst:
val = jpath("url/value", url)
name = jpath("url-name", url)
if name is not None and (
"home" in name.lower() or "personal" in name.lower()
):
return urlize(val)
if len(lst):
return urlize(jpath("url/value", lst[0])) or None
@property
def institution(self):
"""
The name and identifier of the latest institution associated
with this researcher
"""
lst = jpath(
"activities-summary/employments/employment-summary", self.json, default=[]
)
lst += jpath(
"activities-summary/educations/education-summary", self.json, default=[]
)
for affiliation in lst:
disamb = jpath(
"organization/disambiguated-organization", affiliation, default={}
)
source = disamb.get("disambiguation-source")
inst_id = disamb.get("disambiguated-organization-identifier")
name = jpath("organization/name", affiliation)
country = jpath("organization/address/country", affiliation)
identifier = None
# we skip ringgold identifiers, because they suck:
# https://github.com/ORCID/ORCID-Source/issues/3297
if source and inst_id and source.lower() != "ringgold":
identifier = str(source).lower() + "-" + str(inst_id)
if name and country:
return {
"identifier": identifier,
"name": name,
"country": country,
}
return None
@property
def email(self):
# TODO
return None
@property
def name(self):
"""
Returns a parsed version of the "credit name" in the ORCID profile.
If there is no such name, returns the given and family names on the profile
(they should exist)
"""
name_item = jpath("person/name", self.json)
name = jpath("credit-name/value", name_item)
if name:
return parse_comma_name(name)
return (
normalize_name_words(jpath("given-names/value", name_item, "")),
normalize_name_words(jpath("family-name/value", name_item, "")),
)
@property
def other_names(self):
"""
Returns the list of other names listed on the ORCiD profile.
This includes the (given,family) name if a credit name was defined.
"""
person = jpath("person", self.json)
names = []
credit_name = jpath("name/credit-name/value", person)
if credit_name is not None:
names.append(
(
normalize_name_words(jpath("name/given-names/value", person, "")),
normalize_name_words(jpath("name/family-name/value", person, "")),
)
)
other_names = jpath("other-names/other-name", person, default=[])
for name in other_names:
val = name.get("content")
if val is not None:
names.append(parse_comma_name(val))
return names
def fetch_works(self, put_codes):
"""
Retrieves the full metadata of the given works in this profile.
"""
batch_size = 25
# TODO
i = 0
while i < len(put_codes):
batch = put_codes[i : (i + batch_size)]
i += batch_size
works_meta = self.request_element(
"works/" + ",".join([str(c) for c in batch])
)
for work in works_meta.get("bulk") or []:
yield OrcidWork(self, work)
class OrcidWorkSummary(object):
"""
In the 2.0 API ORCID returns "summaries" of publications, where not all the
metadata is included: this class represents that.
"""
def __init__(self, json):
"""
:param json: the JSON representation of the summary
"""
self.json = json
@property
def doi(self):
"""
Returns the DOI of this publication, if any.
"""
for external_id in jpath("external-ids/external-id", self.json, []):
if (
external_id.get("external-id-type") == "doi"
and external_id.get("external-id-relationship") == "SELF"
and external_id.get("external-id-value")
):
doi = to_doi(external_id.get("external-id-value"))
if doi:
return doi
return None
@property
def title(self):
"""
Returns the title of this publication (always provided)
"""
return jpath("title/title/value", self.json)
@property
def put_code(self):
return self.json.get("put-code")
def __str__(self):
return self.title or "(no title)"
def __repr__(self):
return '<OrcidWorkSummary for "{title}">'.format(
title=self.title or "(no title)"
)
class SkippedPaper(Exception):
pass
class OrcidWork(object):
def __init__(self, orcid_profile, json_representation):
self.profile = orcid_profile
self.json = json_representation
self.id = orcid_profile.id
self.skipped = False
self.skip_reason = None
try:
self.throw_skipped()
except SkippedPaper as e:
self.skipped = True
(self.skip_reason,) = e.args
@property
def title(self):
return self.j("work/title/title/value")
@property
def pubtype(self):
return orcid_to_doctype(self.j("work/type", "other"))
@property
def contributors(self):
def get_contrib(js):
return {
"orcid": jpath("contributor-orcid", js),
"name": jpath("credit-name/value", js),
}
return list(map(get_contrib, self.j("work/contributors/contributor", [])))
@property
def authors_from_contributors(self):
author_names = [c["name"] for c in self.contributors if c["name"] is not None]
return list(map(parse_comma_name, author_names))
@property
def authors(self):
"""
This provides the list of authors, determined from (in order of priority):
- the "contributors" field
- the BibTeX record
- using the researcher represented by the profile as single author
:returns: a list of names represented as string pairs
"""
return (
self.authors_from_contributors
or self.authors_from_bibtex
or [self.profile.name]
)
@property
def pubdate(self):
# Pubdate
# Remark(RaitoBezarius): we don't want to put 01 ; it could be
# interpreted as octal 1.
year = parse_int(self.j("work/publication-date/year/value"), 1970)
month = parse_int(self.j("work/publication-date/month/value"), 1)
day = parse_int(self.j("work/publication-date/day/value"), 1)
pubdate = (
try_date(year, month, day)
or try_date(year, month, 1)
or try_date(year, 1, 1)
)
if pubdate is None:
logger.info("Invalid publication date in ORCID publication, skipping")
raise SkippedPaper("INVALID_PUB_DATE")
else:
return pubdate
@property
def put_code(self):
"""
ORCiD internal id for the work
"""
return self.j("work/put-code")
@property
def api_uri(self):
"""
URI version of the above
"""
return self.profile.api_uri + "work/{put_code}".format(put_code=self.put_code)
def orcids(self, authors, initial_orcids):
return affiliate_author_with_orcid(
self.profile.name, self.id, authors, initial_orcids=initial_orcids
)
@property
def citation_format(self):
return self.j("work/citation/citation-type")
@property
def bibtex(self):
return self.j("work/citation/citation-value")
@property
def authors_from_bibtex(self):
if self.bibtex is not None:
try:
entry = parse_bibtex(self.bibtex)
if "author" not in entry or len(entry["author"]) == 0:
return []
else:
return entry["author"]
except ValueError:
return []
else:
return []
@property
def authors_and_orcids(self):
"""
:returns: two lists of equal length, the first with BareName objects
representing authors, the second with ORCID ids (or None) for
each of these authors
"""
authors = self.authors
orcids = affiliate_author_with_orcid(self.profile.name, self.id, authors)
names = [BareName.create_bare(first, last) for first, last in self.authors]
names_and_orcids = list(zip(names, orcids))
filtered = [(n, o) for n, o in names_and_orcids if n is not None]
final_names = [n for n, o in filtered]
final_orcids = [o for n, o in filtered]
return final_names, final_orcids
def j(self, path, default=None):
return jpath(path, self.json, default)
def throw_skipped(self):
if not self.title:
raise SkippedPaper("NO_TITLE")
if not self.authors:
raise SkippedPaper("NO_AUTHOR")
if not self.pubdate:
raise SkippedPaper("NO_PUBDATE")
def __repr__(self):
return "<OrcidWord %s written by %s>" % (
self.title or "(no title)",
", ".join(self.authors),
)
def __str__(self):
return self.title
@property
def splash_url(self):
return "https://{}/{}".format(settings.ORCID_BASE_DOMAIN, self.id)
def as_dict(self):
return {
"json": self.json,
"skipped": self.skipped,
"skip_reason": self.skip_reason,
}
|
frescobaldi-app | lydocinfo | # This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2013 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
Harvest information from a ly.document.DocumentBase instance.
This extends the ly.docinfo.DocInfo class with some behaviour specific to
Frescobaldi, such as variables.
With this module, information extracted from tokenized LilyPond source is
available to both text documents on disk and loaded Frescobaldi documents.
"""
import re
import ly.docinfo
class DocInfo(ly.docinfo.DocInfo):
"""Add Frescobaldi-specific stuff to ly.docinfo.DocInfo."""
def __init__(self, doc, variables):
"""Initialize with ly.document instance and variables dictionary."""
super().__init__(doc)
self.variables = variables
@ly.docinfo._cache
def version_string(self):
"""Return the version, but also looks in the variables and comments."""
version = super().version_string()
if version:
return version
version = self.variables.get("version")
if version:
return version
# parse whole document for non-lilypond comments
m = re.search(r'\\version\s*"(\d+\.\d+(\.\d+)*)"', self.document.plaintext())
if m:
return m.group(1)
|
pyelliptic | hash | """
Wrappers for hash functions from OpenSSL.
"""
# Copyright (C) 2011 Yann GUIBET <yannguibet@gmail.com>
# See LICENSE for details.
from .openssl import OpenSSL
# For python3
def _equals_bytes(a, b):
if len(a) != len(b):
return False
result = 0
for x, y in zip(a, b):
result |= x ^ y
return result == 0
def _equals_str(a, b):
if len(a) != len(b):
return False
result = 0
for x, y in zip(a, b):
result |= ord(x) ^ ord(y)
return result == 0
def equals(a, b):
"""Compare two strings or bytearrays"""
if isinstance(a, str):
return _equals_str(a, b)
return _equals_bytes(a, b)
def hmac_sha256(k, m):
"""
Compute the key and the message with HMAC SHA5256
"""
key = OpenSSL.malloc(k, len(k))
d = OpenSSL.malloc(m, len(m))
md = OpenSSL.malloc(0, 32)
i = OpenSSL.pointer(OpenSSL.c_int(0))
OpenSSL.HMAC(OpenSSL.EVP_sha256(), key, len(k), d, len(m), md, i)
return md.raw
def hmac_sha512(k, m):
"""
Compute the key and the message with HMAC SHA512
"""
key = OpenSSL.malloc(k, len(k))
d = OpenSSL.malloc(m, len(m))
md = OpenSSL.malloc(0, 64)
i = OpenSSL.pointer(OpenSSL.c_int(0))
OpenSSL.HMAC(OpenSSL.EVP_sha512(), key, len(k), d, len(m), md, i)
return md.raw
def pbkdf2(password, salt=None, i=10000, keylen=64):
"""Key derivation function using SHA256"""
if salt is None:
salt = OpenSSL.rand(8)
p_password = OpenSSL.malloc(password, len(password))
p_salt = OpenSSL.malloc(salt, len(salt))
output = OpenSSL.malloc(0, keylen)
OpenSSL.PKCS5_PBKDF2_HMAC(
p_password,
len(password),
p_salt,
len(p_salt),
i,
OpenSSL.EVP_sha256(),
keylen,
output,
)
return salt, output.raw
|
generated | compound | #!/usr/bin/env python
"""
Generated Mon Feb 9 19:08:05 2009 by generateDS.py.
"""
import sys
from xml.dom import Node, minidom
from . import compoundsuper as supermod
from .compoundsuper import MixedContainer
class DoxygenTypeSub(supermod.DoxygenType):
def __init__(self, version=None, compounddef=None):
supermod.DoxygenType.__init__(self, version, compounddef)
def find(self, details):
return self.compounddef.find(details)
supermod.DoxygenType.subclass = DoxygenTypeSub
# end class DoxygenTypeSub
class compounddefTypeSub(supermod.compounddefType):
def __init__(
self,
kind=None,
prot=None,
id=None,
compoundname="",
title="",
basecompoundref=None,
derivedcompoundref=None,
includes=None,
includedby=None,
incdepgraph=None,
invincdepgraph=None,
innerdir=None,
innerfile=None,
innerclass=None,
innernamespace=None,
innerpage=None,
innergroup=None,
templateparamlist=None,
sectiondef=None,
briefdescription=None,
detaileddescription=None,
inheritancegraph=None,
collaborationgraph=None,
programlisting=None,
location=None,
listofallmembers=None,
):
supermod.compounddefType.__init__(
self,
kind,
prot,
id,
compoundname,
title,
basecompoundref,
derivedcompoundref,
includes,
includedby,
incdepgraph,
invincdepgraph,
innerdir,
innerfile,
innerclass,
innernamespace,
innerpage,
innergroup,
templateparamlist,
sectiondef,
briefdescription,
detaileddescription,
inheritancegraph,
collaborationgraph,
programlisting,
location,
listofallmembers,
)
def find(self, details):
if self.id == details.refid:
return self
for sectiondef in self.sectiondef:
result = sectiondef.find(details)
if result:
return result
supermod.compounddefType.subclass = compounddefTypeSub
# end class compounddefTypeSub
class listofallmembersTypeSub(supermod.listofallmembersType):
def __init__(self, member=None):
supermod.listofallmembersType.__init__(self, member)
supermod.listofallmembersType.subclass = listofallmembersTypeSub
# end class listofallmembersTypeSub
class memberRefTypeSub(supermod.memberRefType):
def __init__(
self, virt=None, prot=None, refid=None, ambiguityscope=None, scope="", name=""
):
supermod.memberRefType.__init__(
self, virt, prot, refid, ambiguityscope, scope, name
)
supermod.memberRefType.subclass = memberRefTypeSub
# end class memberRefTypeSub
class compoundRefTypeSub(supermod.compoundRefType):
def __init__(
self,
virt=None,
prot=None,
refid=None,
valueOf_="",
mixedclass_=None,
content_=None,
):
supermod.compoundRefType.__init__(self, mixedclass_, content_)
supermod.compoundRefType.subclass = compoundRefTypeSub
# end class compoundRefTypeSub
class reimplementTypeSub(supermod.reimplementType):
def __init__(self, refid=None, valueOf_="", mixedclass_=None, content_=None):
supermod.reimplementType.__init__(self, mixedclass_, content_)
supermod.reimplementType.subclass = reimplementTypeSub
# end class reimplementTypeSub
class incTypeSub(supermod.incType):
def __init__(
self, local=None, refid=None, valueOf_="", mixedclass_=None, content_=None
):
supermod.incType.__init__(self, mixedclass_, content_)
supermod.incType.subclass = incTypeSub
# end class incTypeSub
class refTypeSub(supermod.refType):
def __init__(
self, prot=None, refid=None, valueOf_="", mixedclass_=None, content_=None
):
supermod.refType.__init__(self, mixedclass_, content_)
supermod.refType.subclass = refTypeSub
# end class refTypeSub
class refTextTypeSub(supermod.refTextType):
def __init__(
self,
refid=None,
kindref=None,
external=None,
valueOf_="",
mixedclass_=None,
content_=None,
):
supermod.refTextType.__init__(self, mixedclass_, content_)
supermod.refTextType.subclass = refTextTypeSub
# end class refTextTypeSub
class sectiondefTypeSub(supermod.sectiondefType):
def __init__(self, kind=None, header="", description=None, memberdef=None):
supermod.sectiondefType.__init__(self, kind, header, description, memberdef)
def find(self, details):
for memberdef in self.memberdef:
if memberdef.id == details.refid:
return memberdef
return None
supermod.sectiondefType.subclass = sectiondefTypeSub
# end class sectiondefTypeSub
class memberdefTypeSub(supermod.memberdefType):
def __init__(
self,
initonly=None,
kind=None,
volatile=None,
const=None,
raise_=None,
virt=None,
readable=None,
prot=None,
explicit=None,
new=None,
final=None,
writable=None,
add=None,
static=None,
remove=None,
sealed=None,
mutable=None,
gettable=None,
inline=None,
settable=None,
id=None,
templateparamlist=None,
type_=None,
definition="",
argsstring="",
name="",
read="",
write="",
bitfield="",
reimplements=None,
reimplementedby=None,
param=None,
enumvalue=None,
initializer=None,
exceptions=None,
briefdescription=None,
detaileddescription=None,
inbodydescription=None,
location=None,
references=None,
referencedby=None,
):
supermod.memberdefType.__init__(
self,
initonly,
kind,
volatile,
const,
raise_,
virt,
readable,
prot,
explicit,
new,
final,
writable,
add,
static,
remove,
sealed,
mutable,
gettable,
inline,
settable,
id,
templateparamlist,
type_,
definition,
argsstring,
name,
read,
write,
bitfield,
reimplements,
reimplementedby,
param,
enumvalue,
initializer,
exceptions,
briefdescription,
detaileddescription,
inbodydescription,
location,
references,
referencedby,
)
supermod.memberdefType.subclass = memberdefTypeSub
# end class memberdefTypeSub
class descriptionTypeSub(supermod.descriptionType):
def __init__(
self,
title="",
para=None,
sect1=None,
internal=None,
mixedclass_=None,
content_=None,
):
supermod.descriptionType.__init__(self, mixedclass_, content_)
supermod.descriptionType.subclass = descriptionTypeSub
# end class descriptionTypeSub
class enumvalueTypeSub(supermod.enumvalueType):
def __init__(
self,
prot=None,
id=None,
name="",
initializer=None,
briefdescription=None,
detaileddescription=None,
mixedclass_=None,
content_=None,
):
supermod.enumvalueType.__init__(self, mixedclass_, content_)
supermod.enumvalueType.subclass = enumvalueTypeSub
# end class enumvalueTypeSub
class templateparamlistTypeSub(supermod.templateparamlistType):
def __init__(self, param=None):
supermod.templateparamlistType.__init__(self, param)
supermod.templateparamlistType.subclass = templateparamlistTypeSub
# end class templateparamlistTypeSub
class paramTypeSub(supermod.paramType):
def __init__(
self,
type_=None,
declname="",
defname="",
array="",
defval=None,
briefdescription=None,
):
supermod.paramType.__init__(
self, type_, declname, defname, array, defval, briefdescription
)
supermod.paramType.subclass = paramTypeSub
# end class paramTypeSub
class linkedTextTypeSub(supermod.linkedTextType):
def __init__(self, ref=None, mixedclass_=None, content_=None):
supermod.linkedTextType.__init__(self, mixedclass_, content_)
supermod.linkedTextType.subclass = linkedTextTypeSub
# end class linkedTextTypeSub
class graphTypeSub(supermod.graphType):
def __init__(self, node=None):
supermod.graphType.__init__(self, node)
supermod.graphType.subclass = graphTypeSub
# end class graphTypeSub
class nodeTypeSub(supermod.nodeType):
def __init__(self, id=None, label="", link=None, childnode=None):
supermod.nodeType.__init__(self, id, label, link, childnode)
supermod.nodeType.subclass = nodeTypeSub
# end class nodeTypeSub
class childnodeTypeSub(supermod.childnodeType):
def __init__(self, relation=None, refid=None, edgelabel=None):
supermod.childnodeType.__init__(self, relation, refid, edgelabel)
supermod.childnodeType.subclass = childnodeTypeSub
# end class childnodeTypeSub
class linkTypeSub(supermod.linkType):
def __init__(self, refid=None, external=None, valueOf_=""):
supermod.linkType.__init__(self, refid, external)
supermod.linkType.subclass = linkTypeSub
# end class linkTypeSub
class listingTypeSub(supermod.listingType):
def __init__(self, codeline=None):
supermod.listingType.__init__(self, codeline)
supermod.listingType.subclass = listingTypeSub
# end class listingTypeSub
class codelineTypeSub(supermod.codelineType):
def __init__(
self, external=None, lineno=None, refkind=None, refid=None, highlight=None
):
supermod.codelineType.__init__(
self, external, lineno, refkind, refid, highlight
)
supermod.codelineType.subclass = codelineTypeSub
# end class codelineTypeSub
class highlightTypeSub(supermod.highlightType):
def __init__(self, class_=None, sp=None, ref=None, mixedclass_=None, content_=None):
supermod.highlightType.__init__(self, mixedclass_, content_)
supermod.highlightType.subclass = highlightTypeSub
# end class highlightTypeSub
class referenceTypeSub(supermod.referenceType):
def __init__(
self,
endline=None,
startline=None,
refid=None,
compoundref=None,
valueOf_="",
mixedclass_=None,
content_=None,
):
supermod.referenceType.__init__(self, mixedclass_, content_)
supermod.referenceType.subclass = referenceTypeSub
# end class referenceTypeSub
class locationTypeSub(supermod.locationType):
def __init__(
self,
bodystart=None,
line=None,
bodyend=None,
bodyfile=None,
file=None,
valueOf_="",
):
supermod.locationType.__init__(self, bodystart, line, bodyend, bodyfile, file)
supermod.locationType.subclass = locationTypeSub
# end class locationTypeSub
class docSect1TypeSub(supermod.docSect1Type):
def __init__(
self,
id=None,
title="",
para=None,
sect2=None,
internal=None,
mixedclass_=None,
content_=None,
):
supermod.docSect1Type.__init__(self, mixedclass_, content_)
supermod.docSect1Type.subclass = docSect1TypeSub
# end class docSect1TypeSub
class docSect2TypeSub(supermod.docSect2Type):
def __init__(
self,
id=None,
title="",
para=None,
sect3=None,
internal=None,
mixedclass_=None,
content_=None,
):
supermod.docSect2Type.__init__(self, mixedclass_, content_)
supermod.docSect2Type.subclass = docSect2TypeSub
# end class docSect2TypeSub
class docSect3TypeSub(supermod.docSect3Type):
def __init__(
self,
id=None,
title="",
para=None,
sect4=None,
internal=None,
mixedclass_=None,
content_=None,
):
supermod.docSect3Type.__init__(self, mixedclass_, content_)
supermod.docSect3Type.subclass = docSect3TypeSub
# end class docSect3TypeSub
class docSect4TypeSub(supermod.docSect4Type):
def __init__(
self,
id=None,
title="",
para=None,
internal=None,
mixedclass_=None,
content_=None,
):
supermod.docSect4Type.__init__(self, mixedclass_, content_)
supermod.docSect4Type.subclass = docSect4TypeSub
# end class docSect4TypeSub
class docInternalTypeSub(supermod.docInternalType):
def __init__(self, para=None, sect1=None, mixedclass_=None, content_=None):
supermod.docInternalType.__init__(self, mixedclass_, content_)
supermod.docInternalType.subclass = docInternalTypeSub
# end class docInternalTypeSub
class docInternalS1TypeSub(supermod.docInternalS1Type):
def __init__(self, para=None, sect2=None, mixedclass_=None, content_=None):
supermod.docInternalS1Type.__init__(self, mixedclass_, content_)
supermod.docInternalS1Type.subclass = docInternalS1TypeSub
# end class docInternalS1TypeSub
class docInternalS2TypeSub(supermod.docInternalS2Type):
def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None):
supermod.docInternalS2Type.__init__(self, mixedclass_, content_)
supermod.docInternalS2Type.subclass = docInternalS2TypeSub
# end class docInternalS2TypeSub
class docInternalS3TypeSub(supermod.docInternalS3Type):
def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None):
supermod.docInternalS3Type.__init__(self, mixedclass_, content_)
supermod.docInternalS3Type.subclass = docInternalS3TypeSub
# end class docInternalS3TypeSub
class docInternalS4TypeSub(supermod.docInternalS4Type):
def __init__(self, para=None, mixedclass_=None, content_=None):
supermod.docInternalS4Type.__init__(self, mixedclass_, content_)
supermod.docInternalS4Type.subclass = docInternalS4TypeSub
# end class docInternalS4TypeSub
class docURLLinkSub(supermod.docURLLink):
def __init__(self, url=None, valueOf_="", mixedclass_=None, content_=None):
supermod.docURLLink.__init__(self, mixedclass_, content_)
supermod.docURLLink.subclass = docURLLinkSub
# end class docURLLinkSub
class docAnchorTypeSub(supermod.docAnchorType):
def __init__(self, id=None, valueOf_="", mixedclass_=None, content_=None):
supermod.docAnchorType.__init__(self, mixedclass_, content_)
supermod.docAnchorType.subclass = docAnchorTypeSub
# end class docAnchorTypeSub
class docFormulaTypeSub(supermod.docFormulaType):
def __init__(self, id=None, valueOf_="", mixedclass_=None, content_=None):
supermod.docFormulaType.__init__(self, mixedclass_, content_)
supermod.docFormulaType.subclass = docFormulaTypeSub
# end class docFormulaTypeSub
class docIndexEntryTypeSub(supermod.docIndexEntryType):
def __init__(self, primaryie="", secondaryie=""):
supermod.docIndexEntryType.__init__(self, primaryie, secondaryie)
supermod.docIndexEntryType.subclass = docIndexEntryTypeSub
# end class docIndexEntryTypeSub
class docListTypeSub(supermod.docListType):
def __init__(self, listitem=None):
supermod.docListType.__init__(self, listitem)
supermod.docListType.subclass = docListTypeSub
# end class docListTypeSub
class docListItemTypeSub(supermod.docListItemType):
def __init__(self, para=None):
supermod.docListItemType.__init__(self, para)
supermod.docListItemType.subclass = docListItemTypeSub
# end class docListItemTypeSub
class docSimpleSectTypeSub(supermod.docSimpleSectType):
def __init__(self, kind=None, title=None, para=None):
supermod.docSimpleSectType.__init__(self, kind, title, para)
supermod.docSimpleSectType.subclass = docSimpleSectTypeSub
# end class docSimpleSectTypeSub
class docVarListEntryTypeSub(supermod.docVarListEntryType):
def __init__(self, term=None):
supermod.docVarListEntryType.__init__(self, term)
supermod.docVarListEntryType.subclass = docVarListEntryTypeSub
# end class docVarListEntryTypeSub
class docRefTextTypeSub(supermod.docRefTextType):
def __init__(
self,
refid=None,
kindref=None,
external=None,
valueOf_="",
mixedclass_=None,
content_=None,
):
supermod.docRefTextType.__init__(self, mixedclass_, content_)
supermod.docRefTextType.subclass = docRefTextTypeSub
# end class docRefTextTypeSub
class docTableTypeSub(supermod.docTableType):
def __init__(self, rows=None, cols=None, row=None, caption=None):
supermod.docTableType.__init__(self, rows, cols, row, caption)
supermod.docTableType.subclass = docTableTypeSub
# end class docTableTypeSub
class docRowTypeSub(supermod.docRowType):
def __init__(self, entry=None):
supermod.docRowType.__init__(self, entry)
supermod.docRowType.subclass = docRowTypeSub
# end class docRowTypeSub
class docEntryTypeSub(supermod.docEntryType):
def __init__(self, thead=None, para=None):
supermod.docEntryType.__init__(self, thead, para)
supermod.docEntryType.subclass = docEntryTypeSub
# end class docEntryTypeSub
class docHeadingTypeSub(supermod.docHeadingType):
def __init__(self, level=None, valueOf_="", mixedclass_=None, content_=None):
supermod.docHeadingType.__init__(self, mixedclass_, content_)
supermod.docHeadingType.subclass = docHeadingTypeSub
# end class docHeadingTypeSub
class docImageTypeSub(supermod.docImageType):
def __init__(
self,
width=None,
type_=None,
name=None,
height=None,
valueOf_="",
mixedclass_=None,
content_=None,
):
supermod.docImageType.__init__(self, mixedclass_, content_)
supermod.docImageType.subclass = docImageTypeSub
# end class docImageTypeSub
class docDotFileTypeSub(supermod.docDotFileType):
def __init__(self, name=None, valueOf_="", mixedclass_=None, content_=None):
supermod.docDotFileType.__init__(self, mixedclass_, content_)
supermod.docDotFileType.subclass = docDotFileTypeSub
# end class docDotFileTypeSub
class docTocItemTypeSub(supermod.docTocItemType):
def __init__(self, id=None, valueOf_="", mixedclass_=None, content_=None):
supermod.docTocItemType.__init__(self, mixedclass_, content_)
supermod.docTocItemType.subclass = docTocItemTypeSub
# end class docTocItemTypeSub
class docTocListTypeSub(supermod.docTocListType):
def __init__(self, tocitem=None):
supermod.docTocListType.__init__(self, tocitem)
supermod.docTocListType.subclass = docTocListTypeSub
# end class docTocListTypeSub
class docLanguageTypeSub(supermod.docLanguageType):
def __init__(self, langid=None, para=None):
supermod.docLanguageType.__init__(self, langid, para)
supermod.docLanguageType.subclass = docLanguageTypeSub
# end class docLanguageTypeSub
class docParamListTypeSub(supermod.docParamListType):
def __init__(self, kind=None, parameteritem=None):
supermod.docParamListType.__init__(self, kind, parameteritem)
supermod.docParamListType.subclass = docParamListTypeSub
# end class docParamListTypeSub
class docParamListItemSub(supermod.docParamListItem):
def __init__(self, parameternamelist=None, parameterdescription=None):
supermod.docParamListItem.__init__(
self, parameternamelist, parameterdescription
)
supermod.docParamListItem.subclass = docParamListItemSub
# end class docParamListItemSub
class docParamNameListSub(supermod.docParamNameList):
def __init__(self, parametername=None):
supermod.docParamNameList.__init__(self, parametername)
supermod.docParamNameList.subclass = docParamNameListSub
# end class docParamNameListSub
class docParamNameSub(supermod.docParamName):
def __init__(self, direction=None, ref=None, mixedclass_=None, content_=None):
supermod.docParamName.__init__(self, mixedclass_, content_)
supermod.docParamName.subclass = docParamNameSub
# end class docParamNameSub
class docXRefSectTypeSub(supermod.docXRefSectType):
def __init__(self, id=None, xreftitle=None, xrefdescription=None):
supermod.docXRefSectType.__init__(self, id, xreftitle, xrefdescription)
supermod.docXRefSectType.subclass = docXRefSectTypeSub
# end class docXRefSectTypeSub
class docCopyTypeSub(supermod.docCopyType):
def __init__(self, link=None, para=None, sect1=None, internal=None):
supermod.docCopyType.__init__(self, link, para, sect1, internal)
supermod.docCopyType.subclass = docCopyTypeSub
# end class docCopyTypeSub
class docCharTypeSub(supermod.docCharType):
def __init__(self, char=None, valueOf_=""):
supermod.docCharType.__init__(self, char)
supermod.docCharType.subclass = docCharTypeSub
# end class docCharTypeSub
class docParaTypeSub(supermod.docParaType):
def __init__(self, char=None, valueOf_=""):
supermod.docParaType.__init__(self, char)
self.parameterlist = []
self.simplesects = []
self.content = []
def buildChildren(self, child_, nodeName_):
supermod.docParaType.buildChildren(self, child_, nodeName_)
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(
MixedContainer.CategoryText,
MixedContainer.TypeNone,
"",
child_.nodeValue,
)
self.content.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == "ref":
obj_ = supermod.docRefTextType.factory()
obj_.build(child_)
self.content.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == "parameterlist":
obj_ = supermod.docParamListType.factory()
obj_.build(child_)
self.parameterlist.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == "simplesect":
obj_ = supermod.docSimpleSectType.factory()
obj_.build(child_)
self.simplesects.append(obj_)
supermod.docParaType.subclass = docParaTypeSub
# end class docParaTypeSub
def parse(inFilename):
doc = minidom.parse(inFilename)
rootNode = doc.documentElement
rootObj = supermod.DoxygenType.factory()
rootObj.build(rootNode)
return rootObj
|
backends | ripgrep | import re
from subprocess import PIPE, run
from typing import Generator, List
from archivebox.config import ARCHIVE_DIR, RIPGREP_VERSION, SEARCH_BACKEND_TIMEOUT
from archivebox.util import enforce_types
RG_IGNORE_EXTENSIONS = ("css", "js", "orig", "svg")
RG_ADD_TYPE = "--type-add"
RG_IGNORE_ARGUMENTS = f"ignore:*.{{{','.join(RG_IGNORE_EXTENSIONS)}}}"
RG_DEFAULT_ARGUMENTS = "-ilTignore" # Case insensitive(i), matching files results(l)
RG_REGEX_ARGUMENT = "-e"
TIMESTAMP_REGEX = r"\/([\d]+\.[\d]+)\/"
ts_regex = re.compile(TIMESTAMP_REGEX)
@enforce_types
def index(snapshot_id: str, texts: List[str]):
return
@enforce_types
def flush(snapshot_ids: Generator[str, None, None]):
return
@enforce_types
def search(text: str) -> List[str]:
if not RIPGREP_VERSION:
raise Exception(
"ripgrep binary not found, install ripgrep to use this search backend"
)
from core.models import Snapshot
rg_cmd = [
"rg",
RG_ADD_TYPE,
RG_IGNORE_ARGUMENTS,
RG_DEFAULT_ARGUMENTS,
RG_REGEX_ARGUMENT,
text,
str(ARCHIVE_DIR),
]
rg = run(rg_cmd, stdout=PIPE, stderr=PIPE, timeout=SEARCH_BACKEND_TIMEOUT)
file_paths = [p.decode() for p in rg.stdout.splitlines()]
timestamps = set()
for path in file_paths:
ts = ts_regex.findall(path)
if ts:
timestamps.add(ts[0])
snap_ids = [
str(id)
for id in Snapshot.objects.filter(timestamp__in=timestamps).values_list(
"pk", flat=True
)
]
return snap_ids
|
lib | websockets | # The contents of this file are subject to the Common Public Attribution
# License Version 1.0. (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://code.reddit.com/LICENSE. The License is based on the Mozilla Public
# License Version 1.1, but Sections 14 and 15 have been added to cover use of
# software over a computer network and provide for limited attribution for the
# Original Developer. In addition, Exhibit A has been modified to be consistent
# with Exhibit B.
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for
# the specific language governing rights and limitations under the License.
#
# The Original Code is reddit.
#
# The Original Developer is the Initial Developer. The Initial Developer of
# the Original Code is reddit Inc.
#
# All portions of the code written by reddit are Copyright (c) 2006-2015 reddit
# Inc. All Rights Reserved.
###############################################################################
"""Utilities for interfacing with the WebSocket server Sutro."""
import datetime
import json
import urllib
import urlparse
from baseplate.crypto import MessageSigner
from pylons import app_globals as g
from r2.lib import amqp
from r2.lib.filters import websafe_json
_WEBSOCKET_EXCHANGE = "sutro"
def send_broadcast(namespace, type, payload):
"""Broadcast an object to all WebSocket listeners in a namespace.
The message type is used to differentiate between different kinds of
payloads that may be sent. The payload will be encoded as a JSON object
before being sent to the client.
"""
frame = {
"type": type,
"payload": payload,
}
amqp.add_item(
routing_key=namespace, body=json.dumps(frame), exchange=_WEBSOCKET_EXCHANGE
)
def make_url(namespace, max_age):
"""Return a signed URL for the client to use for websockets.
The namespace determines which messages the client receives and max_age is
the number of seconds the URL is valid for.
"""
signer = MessageSigner(g.secrets["websocket"])
signature = signer.make_signature(
namespace, max_age=datetime.timedelta(seconds=max_age)
)
query_string = urllib.urlencode(
{
"m": signature,
}
)
return urlparse.urlunparse(
("wss", g.websocket_host, namespace, None, query_string, None)
)
|
gui | preferences_window | from __future__ import absolute_import
from gi.repository import Gdk, GObject, Gtk
from sunflower.gui.preferences.accelerators import AcceleratorOptions
from sunflower.gui.preferences.associations import AssociationsOptions
from sunflower.gui.preferences.bookmarks import BookmarksOptions
from sunflower.gui.preferences.commands import CommandsOptions
from sunflower.gui.preferences.display import DisplayOptions
from sunflower.gui.preferences.item_list import ItemListOptions
from sunflower.gui.preferences.operation import OperationOptions
from sunflower.gui.preferences.plugins import PluginsOptions
from sunflower.gui.preferences.terminal import TerminalOptions
from sunflower.gui.preferences.toolbar import ToolbarOptions
from sunflower.gui.preferences.view_and_edit import ViewEditOptions
class Column:
NAME = 0
WIDGET = 1
class PreferencesWindow(Gtk.Window):
"""Container class for options editors"""
def __init__(self, parent):
GObject.GObject.__init__(self, type=Gtk.WindowType.TOPLEVEL)
self._parent = parent
# configure window
self.set_title(_("Preferences"))
self.set_default_size(750, 500)
self.set_position(Gtk.WindowPosition.CENTER_ON_PARENT)
self.set_modal(True)
self.set_skip_taskbar_hint(True)
self.set_transient_for(parent)
self.set_wmclass("Sunflower", "Sunflower")
self.connect("delete_event", self._hide)
self.connect("key-press-event", self._handle_key_press)
# create user interface
header_bar = Gtk.HeaderBar.new()
header_bar.set_show_close_button(True)
header_bar.set_title(_("Preferences"))
self.set_titlebar(header_bar)
hbox = Gtk.HBox.new(False, 0)
# create tab stack and switcher
self._tabs = Gtk.Stack.new()
self._labels = Gtk.StackSidebar.new()
self._labels.set_stack(self._tabs)
self._labels.set_size_request(150, -1)
DisplayOptions(self, parent)
OperationOptions(self, parent)
ItemListOptions(self, parent)
TerminalOptions(self, parent)
ViewEditOptions(self, parent)
ToolbarOptions(self, parent)
BookmarksOptions(self, parent)
CommandsOptions(self, parent)
PluginsOptions(self, parent)
AcceleratorOptions(self, parent)
AssociationsOptions(self, parent)
# create buttons
self._button_save = Gtk.Button.new_with_label(_("Save"))
self._button_save.connect("clicked", self._save_options)
self._button_save.get_style_context().add_class("suggested-action")
self._button_revert = Gtk.Button.new_with_label(_("Revert"))
self._button_revert.connect("clicked", self._load_options)
# restart label
self._label_restart = Gtk.Label(
label="<i>{0}</i>".format(_("Program restart required!"))
)
self._label_restart.set_alignment(0.5, 0.5)
self._label_restart.set_use_markup(True)
self._label_restart.set_property("no-show-all", True)
# pack buttons
hbox.pack_start(self._labels, False, False, 0)
hbox.pack_start(self._tabs, True, True, 0)
header_bar.pack_start(self._label_restart)
header_bar.pack_end(self._button_save)
header_bar.pack_end(self._button_revert)
self.add(hbox)
def show(self, widget, tab_name=None):
"""Show dialog, focusing requested page, and reload options."""
self._load_options()
self.show_all()
if tab_name:
self._tabs.set_visible_child_name(tab_name)
return True
def _hide(self, widget=None, data=None):
"""Hide dialog"""
should_close = True
if self._button_save.get_sensitive():
dialog = Gtk.MessageDialog(
self,
Gtk.DialogFlags.DESTROY_WITH_PARENT,
Gtk.MessageType.QUESTION,
Gtk.ButtonsType.NONE,
_("There are unsaved changes.\nDo you want to save them?"),
)
dialog.add_buttons(
Gtk.STOCK_CANCEL,
Gtk.ResponseType.CANCEL,
Gtk.STOCK_NO,
Gtk.ResponseType.NO,
Gtk.STOCK_YES,
Gtk.ResponseType.YES,
)
dialog.set_default_response(Gtk.ResponseType.YES)
result = dialog.run()
dialog.destroy()
if result == Gtk.ResponseType.YES:
self._save_options()
elif result == Gtk.ResponseType.CANCEL:
should_close = False
if should_close:
self.hide()
return True # avoid destroying components
def _load_options(self, widget=None, data=None):
"""Change interface to present current state of configuration"""
# call all tabs to load their options
pages = filter(
lambda page: hasattr(page, "_load_options"), self._tabs.get_children()
)
list(map(lambda page: page._load_options(), pages))
# disable save button and hide label
self._button_save.set_sensitive(False)
self._button_revert.set_sensitive(False)
self._label_restart.hide()
def _save_options(self, widget=None, data=None):
"""Save options"""
# call all tabs to save their options
pages = filter(
lambda page: hasattr(page, "_save_options"), self._tabs.get_children()
)
list(map(lambda page: page._save_options(), pages))
# disable save button
self._button_save.set_sensitive(False)
self._button_revert.set_sensitive(False)
# call main window to propagate new settings
self._parent.apply_settings()
# write changes to configuration file
self._parent.save_config()
def _handle_key_press(self, widget, event, data=None):
"""Handle pressing keys"""
if event.keyval == Gdk.KEY_Escape:
self._hide()
def enable_save(self, widget=None, show_restart=None):
"""Enable save button"""
self._button_save.set_sensitive(True)
self._button_revert.set_sensitive(True)
# show label with message
if show_restart is not None and show_restart:
self._label_restart.show()
def add_tab(self, name, label, tab):
"""Add new tab to preferences window
If you are using SettingsPage class there's no need to call this
method manually, class constructor will do it automatically for you!
"""
self._tabs.add_titled(tab, name, label)
|
extractor | academicearth | from __future__ import unicode_literals
import re
from .common import InfoExtractor
class AcademicEarthCourseIE(InfoExtractor):
_VALID_URL = r"^https?://(?:www\.)?academicearth\.org/playlists/(?P<id>[^?#/]+)"
IE_NAME = "AcademicEarth:Course"
_TEST = {
"url": "http://academicearth.org/playlists/laws-of-nature/",
"info_dict": {
"id": "laws-of-nature",
"title": "Laws of Nature",
"description": "Introduce yourself to the laws of nature with these free online college lectures from Yale, Harvard, and MIT.",
},
"playlist_count": 3,
}
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
title = self._html_search_regex(
r'<h1 class="playlist-name"[^>]*?>(.*?)</h1>', webpage, "title"
)
description = self._html_search_regex(
r'<p class="excerpt"[^>]*?>(.*?)</p>', webpage, "description", fatal=False
)
urls = re.findall(
r'<li class="lecture-preview">\s*?<a target="_blank" href="([^"]+)">',
webpage,
)
entries = [self.url_result(u) for u in urls]
return {
"_type": "playlist",
"id": playlist_id,
"title": title,
"description": description,
"entries": entries,
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.