section stringlengths 2 30 | filename stringlengths 1 82 | text stringlengths 783 28M |
|---|---|---|
snippet | snippets | # This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2008 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
Accessing the snippets data.
"""
import collections
import functools
import itertools
import random
import re
import app
import icons
import symbols
textvars = collections.namedtuple("textvars", "text variables")
# cache parsed snippets
_cache = {}
# match variables in a '-*- ' line
_variables_re = re.compile(r"\s*?([a-z]+(?:-[a-z]+)*)(?::[ \t]*(.*?))?;")
# match expansions $$, $NAME or ${text} (the latter may contain escaped right brace: '\}')
_expansions_re = re.compile(
r"\$(?P<_bracket_>\{)?((?(_bracket_)(?:\\\}|[^\}])*|(?:\$|[A-Z]+(?:_[A-Z]+)*)))(?(_bracket_)\})"
)
# builtin snippets
from .builtin import builtin_snippets
def memoize(f):
"""Decorator memoizing stuff for a name."""
@functools.wraps(f)
def func(name):
try:
result = _cache[name][f]
except KeyError:
result = _cache.setdefault(name, {})[f] = f(name)
return result
return func
def unmemoize(f):
"""Decorator forgetting memoized information for a name."""
@functools.wraps(f)
def func(name, *args, **kwargs):
try:
del _cache[name]
except KeyError:
pass
return f(name, *args, **kwargs)
return func
def settings():
return app.settings("snippets")
def names():
"""Yields the names of available builtin snippets."""
s = settings()
return set(
filter(
lambda name: not s.value(name + "/deleted"),
itertools.chain(builtin_snippets, s.childGroups()),
)
)
def title(name, fallback=True):
"""Returns the title of the specified snippet or the empty string.
If fallback, returns a shortened display of the text if no title is
available.
"""
s = settings()
title = s.value(name + "/title")
if title:
return title
try:
t = builtin_snippets[name]
except KeyError:
pass
else:
if t.title:
return t.title() # call to translate
if fallback:
# no title found, send shorttext instead
return shorttext(name)
def text(name):
"""Returns the full snippet text for the name, or the empty string."""
text = settings().value(name + "/text")
if text:
return text
try:
t = builtin_snippets[name]
except KeyError:
return ""
return t.text
@memoize
def shorttext(name):
"""Returns the abridged text, in most cases usable for display or matching."""
return maketitle(get(name).text)
def maketitle(text):
"""Returns the text abridged, usable as a title."""
lines = _expansions_re.sub(" ... ", text).splitlines()
if not lines:
return ""
start, end = 0, len(lines) - 1
while start < end and (not lines[start] or lines[start].isspace()):
start += 1
while end > start and (not lines[end] or lines[end].isspace()):
end -= 1
if end == start:
return lines[start]
else:
return lines[start] + " ... " + lines[end]
@memoize
def get(name):
"""Returns a tuple (text, variables) for the specified name.
Equivalent to parse(text(name)). See parse().
"""
return parse(text(name))
def parse(text):
"""Parses a piece of text and returns a named tuple (text, variables).
text is the template text, with lines starting with '-*- ' removed.
variables is a dictionary containing variables read from lines starting
with '-*- '.
The syntax is as follows:
-*- name: value; name1: value2; (etc)
Names without value are also possible:
-*- name;
In that case the value is set to True.
"""
lines = text.split("\n")
start = 0
while start < len(lines) and lines[start].startswith("-*- "):
start += 1
t = "\n".join(lines[start:])
d = dict(m.groups(True) for l in lines[:start] for m in _variables_re.finditer(l))
return textvars(t, d)
def icon(name):
"""Returns an icon if defined."""
d = get(name).variables
icon = d.get("icon")
if icon:
return icons.get(icon)
icon = d.get("symbol")
if icon:
return symbols.icon(icon)
@unmemoize
def delete(name):
"""Deletes a snippet. For builtins, name/deleted is set to true."""
s = settings()
s.remove(name)
if name in builtin_snippets:
s.setValue(name + "/deleted", True)
def name(names):
"""Returns a name to be used for a new snippet..
names is a list of strings for which the newly returned name will be unique.
"""
while True:
u = f"n{random.random()*1000000:06.0f}"
if u not in names:
break
return u
@unmemoize
def save(name, text, title=None):
"""Stores a snippet."""
try:
t = builtin_snippets[name]
except KeyError:
# not builtin
pass
else:
# builtin
if not title or (t.title and title == t.title()):
title = None
if text == t.text:
text = None
s = settings()
if title or text:
s.beginGroup(name)
s.setValue("text", text) if text else s.remove("text")
s.setValue("title", title) if title else s.remove("title")
else:
# the snippet exactly matches the builtin, no saving needed
s.remove(name)
def isoriginal(name):
"""Returns True if the built-in snippet is not changed or deleted."""
return name in builtin_snippets and name not in settings().childGroups()
def expand(text):
r"""Yields tuples (text, expansion) for text.
Parses text for expressions like '$VAR_NAME', '${other text}' or '$$'.
An expansion starts with a '$' and is an uppercase word (which can have
single underscores in the middle), or other text between braces (which may
contain a right brace escaped: '\}', those are already unescaped by this
function).
One of (text, expansion) may be an empty string.
"""
pos = 0
for m in _expansions_re.finditer(text):
expansion = m.group(2) if not m.group(1) else m.group(2).replace("\\}", "}")
yield text[pos : m.start()], expansion
pos = m.end()
if pos < len(text):
yield text[pos:], ""
|
sabnzbd | emailer | #!/usr/bin/python3 -OO
# Copyright 2007-2023 The SABnzbd-Team (sabnzbd.org)
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
sabnzbd.emailer - Send notification emails
"""
import glob
import logging
import os
import re
import smtplib
import time
from email.message import EmailMessage
import sabnzbd
import sabnzbd.cfg as cfg
from Cheetah.Template import Template
from sabnzbd.constants import CHEETAH_DIRECTIVES, DEF_EMAIL_TMPL
from sabnzbd.misc import split_host, time_format, to_units
from sabnzbd.notifier import check_cat
RE_HEADER = re.compile(r"^([^:]+):(.*)")
def errormsg(msg):
logging.error(msg)
return msg
def get_email_date():
"""Return un-localized date string for the Date: field"""
# Get locale independent date/time string: "Sun May 22 20:15:12 2011"
day, month, dayno, hms, year = time.asctime(time.gmtime()).split()
return "%s, %s %s %s %s +0000" % (day, dayno, month, year, hms)
def send_email(message, email_to, test=None):
"""Send message if message non-empty and email-parms are set"""
# we should not use CFG if we are testing. we should use values
# from UI instead.
# email_to is replaced at send_with_template, since it can be an array
if test:
email_server = test.get("email_server")
email_from = test.get("email_from")
email_account = test.get("email_account")
email_pwd = test.get("email_pwd")
if email_pwd and not email_pwd.replace("*", ""):
# If all stars, get stored password instead
email_pwd = cfg.email_pwd()
else:
email_server = cfg.email_server()
email_from = cfg.email_from()
email_account = cfg.email_account()
email_pwd = cfg.email_pwd()
if not message.strip("\n\r\t "):
return "Skipped empty message"
# Prepare the email
email_message = _prepare_message(message)
if email_server and email_to and email_from:
server, port = split_host(email_server)
if not port:
port = 25
logging.debug("Connecting to server %s:%s", server, port)
try:
mailconn = smtplib.SMTP_SSL(server, port)
mailconn.ehlo()
logging.debug("Connected to server %s:%s", server, port)
except:
# Non SSL mail server
logging.debug(
"Non-SSL mail server detected reconnecting to server %s:%s",
server,
port,
)
try:
mailconn = smtplib.SMTP(server, port)
mailconn.ehlo()
except:
logging.info("Traceback: ", exc_info=True)
return errormsg(T("Failed to connect to mail server"))
# TLS support
if mailconn.ehlo_resp and re.search(
b"STARTTLS", mailconn.ehlo_resp, re.IGNORECASE
):
logging.debug("TLS mail server detected")
try:
mailconn.starttls()
mailconn.ehlo()
except:
logging.info("Traceback: ", exc_info=True)
return errormsg(T("Failed to initiate TLS connection"))
# Authentication
if (email_account != "") and (email_pwd != ""):
try:
mailconn.login(email_account, email_pwd)
except smtplib.SMTPHeloError:
return errormsg(
T("The server didn't reply properly to the helo greeting")
)
except smtplib.SMTPAuthenticationError:
return errormsg(T("Failed to authenticate to mail server"))
except smtplib.SMTPException:
return errormsg(T("No suitable authentication method was found"))
except:
logging.info("Traceback: ", exc_info=True)
return errormsg(T("Unknown authentication failure in mail server"))
try:
mailconn.sendmail(email_from, email_to, email_message)
msg = None
except smtplib.SMTPHeloError:
msg = errormsg("The server didn't reply properly to the helo greeting.")
except smtplib.SMTPRecipientsRefused:
msg = errormsg("The server rejected ALL recipients (no mail was sent).")
except smtplib.SMTPSenderRefused:
msg = errormsg("The server didn't accept the from_addr.")
except smtplib.SMTPDataError:
msg = errormsg(
"The server replied with an unexpected error code (other than a refusal of a recipient)."
)
except:
logging.info("Traceback: ", exc_info=True)
msg = errormsg(T("Failed to send e-mail"))
try:
mailconn.close()
except:
logging.info("Traceback: ", exc_info=True)
errormsg(T("Failed to close mail connection"))
if msg:
return msg
else:
logging.info("Notification e-mail successfully sent")
return T("Email succeeded")
else:
return T("Cannot send, missing required data")
def send_with_template(prefix, parm, test=None):
"""Send an email using template"""
parm["from"] = cfg.email_from()
parm["date"] = get_email_date()
ret = None
email_templates = []
path = cfg.email_dir.get_path()
if path and os.path.exists(path):
try:
email_templates = glob.glob(os.path.join(path, "%s-*.tmpl" % prefix))
except:
logging.error(T("Cannot find email templates in %s"), path)
else:
path = os.path.join(sabnzbd.DIR_PROG, DEF_EMAIL_TMPL)
tpath = os.path.join(path, "%s-%s.tmpl" % (prefix, cfg.language()))
if os.path.exists(tpath):
email_templates = [tpath]
else:
email_templates = [os.path.join(path, "%s-en.tmpl" % prefix)]
for template_file in email_templates:
logging.debug("Trying to send email using template %s", template_file)
if os.access(template_file, os.R_OK):
if test:
recipients = [test.get("email_to")]
else:
recipients = cfg.email_to()
if len(recipients):
for recipient in recipients:
# Force-open as UTF-8, otherwise Cheetah breaks it
with open(template_file, "r", encoding="utf-8") as template_fp:
parm["to"] = recipient
message = Template(
file=template_fp,
searchList=[parm],
compilerSettings=CHEETAH_DIRECTIVES,
)
ret = send_email(message.respond(), recipient, test)
else:
ret = T("No recipients given, no email sent")
else:
# Can't open or read file, stop
return errormsg(T("Cannot read %s") % template_file)
# Did we send any emails at all?
if not ret:
ret = T("No email templates found")
return ret
def endjob(
filename,
cat,
status,
path,
bytes_downloaded,
fail_msg,
stages,
script,
script_output,
script_ret,
test=None,
):
"""Send end-of-job email"""
# Is it allowed?
if not check_cat("misc", cat, keyword="email") and not test:
return None
# Translate the stage names
tr = sabnzbd.api.Ttemplate
if not status and fail_msg:
xstages = {tr("stage-fail"): (fail_msg,)}
else:
xstages = {}
for stage in stages:
lines = []
for line in stages[stage]:
if "\n" in line or "<br/>" in line:
lines.extend(line.replace("<br/>", "\n").split("\n"))
else:
lines.append(line)
xstages[tr("stage-" + stage.lower())] = lines
parm = {}
parm["status"] = status
parm["name"] = filename
parm["path"] = path
parm["msgid"] = ""
parm["stages"] = xstages
parm["script"] = script
parm["script_output"] = script_output
parm["script_ret"] = script_ret
parm["cat"] = cat
parm["size"] = "%sB" % to_units(bytes_downloaded)
parm["end_time"] = time.strftime(time_format("%Y-%m-%d %H:%M:%S"))
return send_with_template("email", parm, test)
def rss_mail(feed, jobs):
"""Send notification email containing list of files"""
parm = {"amount": len(jobs), "feed": feed, "jobs": jobs}
return send_with_template("rss", parm)
def badfetch_mail(msg, url):
"""Send notification email about failed NZB fetch"""
parm = {"url": url, "msg": msg}
return send_with_template("badfetch", parm)
def diskfull_mail():
"""Send email about disk full, no templates"""
if cfg.email_full():
return send_email(
T(
"""To: %s
From: %s
Date: %s
Subject: SABnzbd reports Disk Full
Hi,
SABnzbd has stopped downloading, because the disk is almost full.
Please make room and resume SABnzbd manually.
"""
)
% (cfg.email_to.get_string(), cfg.email_from(), get_email_date()),
cfg.email_to(),
)
else:
return ""
def _prepare_message(txt):
"""Parse the headers in the template to real headers"""
msg = EmailMessage()
payload = []
body = False
header = False
for line in txt.split("\n"):
if header and not line:
body = True
if body:
payload.append(line)
elif m := RE_HEADER.search(line):
# If we match a header
header = True
keyword = m.group(1).strip()
value = m.group(2).strip()
msg[keyword] = value
msg.set_content("\n".join(payload))
return msg.as_bytes(policy=msg.policy.clone(linesep="\r\n"))
|
migrate | vote_details_ip_backfill | import json
from collections import defaultdict
from datetime import datetime, timedelta
from pylons import app_globals as g
from r2.lib.db.sorts import epoch_seconds
from r2.lib.db.tdb_cassandra import write_consistency_level
from r2.lib.utils import in_chunks
from r2.models.vote import (VoteDetailsByComment, VoteDetailsByLink,
VoterIPByThing)
def backfill_vote_details(cls):
ninety_days = timedelta(days=90).total_seconds()
for chunk in in_chunks(cls._all(), size=100):
detail_chunk = defaultdict(dict)
try:
with VoterIPByThing._cf.batch(write_consistency_level=cls._write_consistency_level) as b:
for vote_list in chunk:
thing_id36 = vote_list._id
thing_fullname = vote_list.votee_fullname
details = vote_list.decode_details()
for detail in details:
voter_id36 = detail["voter_id"]
if "ip" in detail and detail["ip"]:
ip = detail["ip"]
redacted = dict(detail)
del redacted["ip"]
cast = detail["date"]
now = epoch_seconds(datetime.utcnow().replace(tzinfo=g.tz))
ttl = ninety_days - (now - cast)
oneweek = ""
if ttl < 3600 * 24 * 7:
oneweek = "(<= one week left)"
print "Inserting %s with IP ttl %d %s" % (redacted, ttl, oneweek)
detail_chunk[thing_id36][voter_id36] = json.dumps(redacted)
if ttl <= 0:
print "Skipping bogus ttl for %s: %d" % (redacted, ttl)
continue
b.insert(thing_fullname, {voter_id36: ip}, ttl=ttl)
except Exception:
# Getting some really weird spurious errors here; complaints about negative
# TTLs even though they can't possibly be negative, errors from cass
# that have an explanation of "(why=')"
# Just going to brute-force this through. We might lose 100 here and there
# but mostly it'll be intact.
pass
for votee_id36, valuedict in detail_chunk.iteritems():
cls._set_values(votee_id36, valuedict)
def main():
cfs = [VoteDetailsByComment, VoteDetailsByLink]
for cf in cfs:
backfill_vote_details(cf)
if __name__ == '__builtin__':
main()
|
widgets | nested_combobox | from typing import Dict, Optional, Tuple
from sglib.lib.translate import _
from sgui.sgqt import QAction, QMenu, QPushButton
class NestedComboBox(QPushButton):
def __init__(
self,
lookup: Dict[str, Tuple[int, Optional[str]]],
tooltip=None,
):
"""
lookup:
A dictionary of str: (int, str) that maps names to UIDs
and tooltips
tooltip: A tooltip for the button.
"""
self._callbacks = []
self.lookup = lookup
self.reverse_lookup = {v[0]: k for k, v in lookup.items()}
assert len(lookup) == len(self.reverse_lookup), (
len(lookup),
len(self.reverse_lookup),
lookup,
self.reverse_lookup,
)
QPushButton.__init__(self, _("None"))
self.setObjectName("nested_combobox")
self.menu = QMenu(self)
self.setMenu(self.menu)
self._index = 0
self.menu.triggered.connect(self.action_triggered)
self.setToolTip(tooltip)
def currentIndex(self):
return self._index
def currentIndexChanged_connect(self, callback):
self._callbacks.append(callback)
def _emit_currentIndexChanged(self, index):
for callback in self._callbacks:
callback(index)
def currentText(self):
return self.reverse_lookup[self._index]
def setCurrentIndex(self, a_index):
a_index = int(a_index)
self._index = a_index
self.setText(self.reverse_lookup[a_index])
self._emit_currentIndexChanged(a_index)
def action_triggered(self, a_val):
a_val = a_val.plugin_name
self._index = self.lookup[a_val][0]
self.setText(a_val)
self._emit_currentIndexChanged(self._index)
def addItems(self, items):
"""Add entries to the dropdown
items: [("Submenu Name" ["EntryName1", "EntryName2"])]
"""
for v in items:
if isinstance(v, str):
action = QAction(v, self.menu)
self.menu.addAction(action)
tooltip = self.lookup[v][1]
action.setToolTip(tooltip)
action.plugin_name = v
else:
k, v = v
menu = self.menu.addMenu(k)
for name in v:
action = QAction(name, menu)
menu.addAction(action)
tooltip = self.lookup[name][1]
action.setToolTip(tooltip)
action.plugin_name = name
|
Code | Apertura | import random
from Code import Books, Partida, Util, VarGen
class EtiApertura:
def __init__(self, nombre, eco, a1h8, pgn):
self.nombre = nombre
self.eco = eco
self.a1h8 = a1h8.split(" ")
self.pgn = pgn
self.liHijos = []
def hijo(self, ea):
self.liHijos.append(ea)
class AperturaPol:
def __init__(self, maxNivel, elo=None):
if elo:
siPTZ = elo < 1700
else:
siPTZ = 1 <= maxNivel <= 2
self.fichero = VarGen.tbookPTZ if siPTZ else VarGen.tbook
self.book = Books.Polyglot()
if not (
(Util.tamFichero(self.fichero) / (len(self.fichero) - 9)) in (75876, 802116)
):
import sys
sys.exit()
self.activa = True
self.maxNivel = maxNivel * 2
self.nivelActual = 0
self.siObligatoria = False
def marcaEstado(self):
dic = {
"ACTIVA": self.activa,
"NIVELACTUAL": self.nivelActual,
"SIOBLIGATORIA": self.siObligatoria,
}
return dic
def recuperaEstado(self, dic):
self.activa = dic["ACTIVA"]
self.nivelActual = dic["NIVELACTUAL"]
self.siObligatoria = dic["SIOBLIGATORIA"]
def leeRandom(self, fen):
li = self.book.lista(self.fichero, fen)
if not li:
return None
liNum = []
for nentry, entry in enumerate(li):
liNum.extend([nentry] * (entry.weight + 1)) # Always entry.weight+1> 0
return li[random.choice(liNum)]
def isActive(self, fen):
x = self.leeRandom(fen)
return x is not None
def juegaMotor(self, fen):
self.nivelActual += 1
if self.nivelActual > self.maxNivel:
self.activa = False
return False, None, None, None
if not self.activa:
return False, None, None, None
entry = self.leeRandom(fen)
if entry is None:
self.activa = False
return False, None, None, None
pv = entry.pv()
return True, pv[:2], pv[2:4], pv[4:]
def compruebaHumano(self, fen, desde, hasta):
if not self.activa:
return False
li = self.book.lista(self.fichero, fen)
if not li:
return False
for entry in li:
pv = entry.pv()
if pv[:2] == desde and pv[2:4] == hasta:
return True
return False
class JuegaApertura:
def __init__(self, a1h8):
p = Partida.Partida()
p.leerPV(a1h8)
self.dicFEN = {}
for jg in p.liJugadas:
self.dicFEN[jg.posicionBase.fen()] = jg
self.activa = True
def juegaMotor(self, fen):
try:
jg = self.dicFEN[fen]
return True, jg.desde, jg.hasta, jg.coronacion
except:
self.activa = False
return False, None, None, None
def compruebaHumano(self, fen, desde, hasta):
if fen in self.dicFEN:
jg = self.dicFEN[fen]
return desde == jg.desde and hasta == jg.hasta
else:
self.activa = False
return False
def desdeHastaActual(self, fen):
if fen in self.dicFEN:
jg = self.dicFEN[fen]
return jg.desde, jg.hasta
self.activa = False
return None, None
|
plugin-base | find_extension | from gi.repository import Gtk
class FindExtension:
"""Base class for extending find files tool.
Use this class to provide find files tool with additional
options. Objects are created every time tool is created!
"""
def __init__(self, parent, always_on=False):
self._parent = parent
self._active = always_on
# create and configure title widget
self.title = TitleRow(self, always_on)
self.title.check.connect("state-set", self.__handle_state_set)
# create and configure container
self.container = Gtk.VBox.new(False, 5)
self.container.set_border_width(10)
self.container.extension = self
def __handle_state_set(self, widget, state):
"""Update extension active."""
self._active = state
def __get_active(self):
"""Get state of the extension."""
return self._active
def __set_active(self, value):
"""Set state of the extension."""
self._active = value
self.title.set_active(value)
def get_title(self):
"""Return name of the extension."""
return None
def get_title_widget(self):
"""Return title widget for extension."""
return self.title
def get_container(self):
"""Return widget container."""
return self.container
def is_path_ok(self, provider, path):
"""Check is specified path fits the cirteria."""
return True
active = property(__get_active, __set_active)
class TitleRow(Gtk.ListBoxRow):
"""List box row representing extension."""
def __init__(self, extension, always_on):
Gtk.ListBoxRow.__init__(self)
self._extension = extension
self.set_selectable(True)
self.set_activatable(True)
self.set_focus_on_click(True)
# create interface
hbox = Gtk.HBox.new(False, 10)
hbox.set_border_width(5)
self.add(hbox)
label = Gtk.Label.new(extension.get_title())
label.set_alignment(0, 0.5)
hbox.pack_start(label, True, True, 0)
self.check = Gtk.Switch.new()
self.check.set_sensitive(not always_on)
hbox.pack_start(self.check, False, False, 0)
def get_extension(self):
"""Return parent extension."""
return self._extension
def set_active(self, value):
"""Set state of the extension."""
self.check.set_active(value)
|
cli | main | # -*- coding: utf-8 -*-
"""
flaskbb.cli.commands
~~~~~~~~~~~~~~~~~~~~
This module contains the main commands.
:copyright: (c) 2016 by the FlaskBB Team.
:license: BSD, see LICENSE for more details.
"""
import binascii
import logging
import os
import sys
import time
import traceback
from datetime import datetime
import click
from flask import current_app
from flask.cli import FlaskGroup, ScriptInfo, with_appcontext
from flask_alembic import alembic_click
from flaskbb import create_app
from flaskbb.cli.utils import (
EmailType,
FlaskBBCLIError,
get_version,
prompt_config_path,
prompt_save_user,
write_config,
)
from flaskbb.extensions import alembic, celery, db, whooshee
from flaskbb.utils.populate import (
create_default_groups,
create_default_settings,
create_latest_db,
create_test_data,
create_welcome_forum,
insert_bulk_data,
run_plugin_migrations,
update_settings_from_fixture,
)
from flaskbb.utils.translations import compile_translations
from jinja2 import Environment, FileSystemLoader
from sqlalchemy_utils.functions import database_exists
from werkzeug.utils import import_string
logger = logging.getLogger(__name__)
class FlaskBBGroup(FlaskGroup):
def __init__(self, *args, **kwargs):
super(FlaskBBGroup, self).__init__(*args, **kwargs)
self._loaded_flaskbb_plugins = False
def _load_flaskbb_plugins(self, ctx):
if self._loaded_flaskbb_plugins:
return
try:
app = ctx.ensure_object(ScriptInfo).load_app()
app.pluggy.hook.flaskbb_cli(cli=self, app=app)
self._loaded_flaskbb_plugins = True
except Exception:
logger.error(
"Error while loading CLI Plugins", exc_info=traceback.format_exc()
)
else:
shell_context_processors = app.pluggy.hook.flaskbb_shell_context()
for p in shell_context_processors:
app.shell_context_processor(p)
def get_command(self, ctx, name):
self._load_flaskbb_plugins(ctx)
return super(FlaskBBGroup, self).get_command(ctx, name)
def list_commands(self, ctx):
self._load_flaskbb_plugins(ctx)
return super(FlaskBBGroup, self).list_commands(ctx)
def make_app():
ctx = click.get_current_context(silent=True)
script_info = None
if ctx is not None:
script_info = ctx.obj
config_file = getattr(script_info, "config_file", None)
instance_path = getattr(script_info, "instance_path", None)
return create_app(config_file, instance_path)
def set_config(ctx, param, value):
"""This will pass the config file to the create_app function."""
ctx.ensure_object(ScriptInfo).config_file = value
def set_instance(ctx, param, value):
"""This will pass the instance path on the script info which can then
be used in 'make_app'."""
ctx.ensure_object(ScriptInfo).instance_path = value
@click.group(
cls=FlaskBBGroup,
create_app=make_app,
add_version_option=False,
invoke_without_command=True,
)
@click.option(
"--config",
expose_value=False,
callback=set_config,
required=False,
is_flag=False,
is_eager=True,
metavar="CONFIG",
help="Specify the config to use either in dotted module "
"notation e.g. 'flaskbb.configs.default.DefaultConfig' "
"or by using a path like '/path/to/flaskbb.cfg'",
)
@click.option(
"--instance",
expose_value=False,
callback=set_instance,
required=False,
is_flag=False,
is_eager=True,
metavar="PATH",
help="Specify the instance path to use. By default the folder "
"'instance' next to the package or module is assumed to "
"be the instance path.",
)
@click.option(
"--version",
expose_value=False,
callback=get_version,
is_flag=True,
is_eager=True,
help="Show the FlaskBB version.",
)
@click.pass_context
def flaskbb(ctx):
"""This is the commandline interface for flaskbb."""
if ctx.invoked_subcommand is None:
# show the help text instead of an error
# when just '--config' option has been provided
click.echo(ctx.get_help())
flaskbb.add_command(alembic_click, "db")
@flaskbb.command()
@click.option(
"--welcome", "-w", default=True, is_flag=True, help="Disable the welcome forum."
)
@click.option(
"--force", "-f", default=False, is_flag=True, help="Doesn't ask for confirmation."
)
@click.option("--username", "-u", help="The username of the user.")
@click.option("--email", "-e", type=EmailType(), help="The email address of the user.")
@click.option("--password", "-p", help="The password of the user.")
@click.option(
"--no-plugins",
"-n",
default=False,
is_flag=True,
help="Don't run the migrations for the default plugins.",
)
@with_appcontext
def install(welcome, force, username, email, password, no_plugins):
"""Installs flaskbb. If no arguments are used, an interactive setup
will be run.
"""
if not current_app.config["CONFIG_PATH"]:
click.secho(
"[!] No 'flaskbb.cfg' config found. "
"You can generate a configuration file with 'flaskbb makeconfig'.",
fg="red",
)
sys.exit(1)
click.secho("[+] Installing FlaskBB...", fg="cyan")
if database_exists(db.engine.url):
if force or click.confirm(
click.style(
"Existing database found. Do you want to delete the old one and "
"create a new one?",
fg="magenta",
)
):
db.drop_all()
else:
sys.exit(0)
# creating database from scratch and 'stamping it'
create_latest_db()
click.secho("[+] Creating default settings...", fg="cyan")
create_default_groups()
create_default_settings()
click.secho("[+] Creating admin user...", fg="cyan")
prompt_save_user(username, email, password, "admin")
if welcome:
click.secho("[+] Creating welcome forum...", fg="cyan")
create_welcome_forum()
if not no_plugins:
click.secho("[+] Installing default plugins...", fg="cyan")
run_plugin_migrations()
click.secho("[+] Compiling translations...", fg="cyan")
compile_translations()
click.secho("[+] FlaskBB has been successfully installed!", fg="green", bold=True)
@flaskbb.command()
@click.option(
"--test-data", "-t", default=False, is_flag=True, help="Adds some test data."
)
@click.option(
"--bulk-data", "-b", default=False, is_flag=True, help="Adds a lot of data."
)
@click.option(
"--posts",
default=100,
help="Number of posts to create in each topic (default: 100).",
)
@click.option(
"--topics", default=100, help="Number of topics to create (default: 100)."
)
@click.option(
"--force", "-f", is_flag=True, help="Will delete the database before populating it."
)
@click.option(
"--initdb",
"-i",
is_flag=True,
help="Initializes the database before populating it.",
)
def populate(bulk_data, test_data, posts, topics, force, initdb):
"""Creates the necessary tables and groups for FlaskBB."""
if force:
click.secho("[+] Recreating database...", fg="cyan")
db.drop_all()
# do not initialize the db if -i is passed
if not initdb:
create_latest_db()
if initdb:
click.secho("[+] Initializing database...", fg="cyan")
create_latest_db()
run_plugin_migrations()
if test_data:
click.secho("[+] Adding some test data...", fg="cyan")
create_test_data()
if bulk_data:
click.secho("[+] Adding a lot of test data...", fg="cyan")
timer = time.time()
rv = insert_bulk_data(int(topics), int(posts))
if not rv and not test_data:
create_test_data()
rv = insert_bulk_data(int(topics), int(posts))
elapsed = time.time() - timer
click.secho(
"[+] It took {:.2f} seconds to create {} topics and {} " "posts.".format(
elapsed, rv[0], rv[1]
),
fg="cyan",
)
# this just makes the most sense for the command name; use -i to
# init the db as well
if not test_data and not bulk_data:
click.secho("[+] Populating the database with some defaults...", fg="cyan")
create_default_groups()
create_default_settings()
@flaskbb.command()
def reindex():
"""Reindexes the search index."""
click.secho("[+] Reindexing search index...", fg="cyan")
whooshee.reindex()
@flaskbb.command()
@click.option(
"all_latest",
"--all",
"-a",
default=False,
is_flag=True,
help="Upgrades migrations AND fixtures to the latest version.",
)
@click.option(
"--fixture/",
"-f",
default=None,
help="The fixture which should be upgraded or installed.",
)
@click.option(
"--force", default=False, is_flag=True, help="Forcefully upgrades the fixtures."
)
def upgrade(all_latest, fixture, force):
"""Updates the migrations and fixtures."""
if all_latest:
click.secho("[+] Upgrading migrations to the latest version...", fg="cyan")
alembic.upgrade()
if fixture or all_latest:
try:
settings = import_string("flaskbb.fixtures.{}".format(fixture))
settings = settings.fixture
except ImportError:
raise FlaskBBCLIError(
"{} fixture is not available".format(fixture), fg="red"
)
click.secho("[+] Updating fixtures...", fg="cyan")
count = update_settings_from_fixture(
fixture=settings, overwrite_group=force, overwrite_setting=force
)
click.secho(
"[+] {settings} settings in {groups} setting groups " "updated.".format(
groups=len(count),
settings=sum(len(settings) for settings in count.values()),
),
fg="green",
)
@flaskbb.command(
"celery",
add_help_option=False,
context_settings={"ignore_unknown_options": True, "allow_extra_args": True},
)
@click.pass_context
@with_appcontext
def start_celery(ctx):
"""Preconfigured wrapper around the 'celery' command."""
celery.start(ctx.args)
@flaskbb.command("shell", short_help="Runs a shell in the app context.")
@with_appcontext
def shell_command():
"""Runs an interactive Python shell in the context of a given
Flask application. The application will populate the default
namespace of this shell according to it"s configuration.
This is useful for executing small snippets of management code
without having to manually configuring the application.
This code snippet is taken from Flask"s cli module and modified to
run IPython and falls back to the normal shell if IPython is not
available.
"""
import code
banner = "Python %s on %s\nInstance Path: %s" % (
sys.version,
sys.platform,
current_app.instance_path,
)
ctx = {"db": db}
# Support the regular Python interpreter startup script if someone
# is using it.
startup = os.environ.get("PYTHONSTARTUP")
if startup and os.path.isfile(startup):
with open(startup, "r") as f:
eval(compile(f.read(), startup, "exec"), ctx)
ctx.update(current_app.make_shell_context())
try:
import IPython
from traitlets.config import get_config
c = get_config()
# This makes the prompt to use colors again
c.InteractiveShellEmbed.colors = "Linux"
IPython.embed(config=c, banner1=banner, user_ns=ctx)
except ImportError:
code.interact(banner=banner, local=ctx)
@flaskbb.command("urls", short_help="Show routes for the app.")
@click.option(
"--route", "-r", "order_by", flag_value="rule", default=True, help="Order by route"
)
@click.option(
"--endpoint", "-e", "order_by", flag_value="endpoint", help="Order by endpoint"
)
@click.option(
"--methods", "-m", "order_by", flag_value="methods", help="Order by methods"
)
@with_appcontext
def list_urls(order_by):
"""Lists all available routes."""
from flask import current_app
rules = sorted(
current_app.url_map.iter_rules(), key=lambda rule: getattr(rule, order_by)
)
max_rule_len = max(len(rule.rule) for rule in rules)
max_rule_len = max(max_rule_len, len("Route"))
max_endpoint_len = max(len(rule.endpoint) for rule in rules)
max_endpoint_len = max(max_endpoint_len, len("Endpoint"))
max_method_len = max(len(", ".join(rule.methods)) for rule in rules)
max_method_len = max(max_method_len, len("Methods"))
column_header_len = max_rule_len + max_endpoint_len + max_method_len + 4
column_template = "{:<%s} {:<%s} {:<%s}" % (
max_rule_len,
max_endpoint_len,
max_method_len,
)
click.secho(
column_template.format("Route", "Endpoint", "Methods"), fg="blue", bold=True
)
click.secho("=" * column_header_len, bold=True)
for rule in rules:
methods = ", ".join(rule.methods)
click.echo(column_template.format(rule.rule, rule.endpoint, methods))
@flaskbb.command("makeconfig")
@click.option(
"--development",
"-d",
default=False,
is_flag=True,
help="Creates a development config with DEBUG set to True.",
)
@click.option(
"--output",
"-o",
required=False,
help="The path where the config file will be saved at. "
"Defaults to the flaskbb's root folder.",
)
@click.option(
"--force",
"-f",
default=False,
is_flag=True,
help="Overwrite any existing config file if one exists.",
)
def generate_config(development, output, force):
"""Generates a FlaskBB configuration file."""
config_env = Environment(
loader=FileSystemLoader(os.path.join(current_app.root_path, "configs"))
)
config_template = config_env.get_template("config.cfg.template")
if output:
config_path = os.path.abspath(output)
else:
config_path = os.path.dirname(current_app.root_path)
if os.path.exists(config_path) and not os.path.isfile(config_path):
config_path = os.path.join(config_path, "flaskbb.cfg")
# An override to handle database location paths on Windows environments
database_path = "sqlite:///" + os.path.join(
os.path.dirname(current_app.instance_path), "flaskbb.sqlite"
)
if os.name == "nt":
database_path = database_path.replace("\\", r"\\")
default_conf = {
"is_debug": False,
"server_name": "example.org",
"use_https": True,
"database_uri": database_path,
"redis_enabled": False,
"redis_uri": "redis://localhost:6379",
"mail_server": "localhost",
"mail_port": 25,
"mail_use_tls": False,
"mail_use_ssl": False,
"mail_username": "",
"mail_password": "",
"mail_sender_name": "FlaskBB Mailer",
"mail_sender_address": "noreply@yourdomain",
"mail_admin_address": "admin@yourdomain",
"secret_key": binascii.hexlify(os.urandom(24)).decode(),
"csrf_secret_key": binascii.hexlify(os.urandom(24)).decode(),
"timestamp": datetime.utcnow().strftime("%A, %d. %B %Y at %H:%M"),
"log_config_path": "",
"deprecation_level": "default",
}
if not force:
config_path = prompt_config_path(config_path)
if force and os.path.exists(config_path):
click.secho(
"Overwriting existing config file: {}".format(config_path), fg="yellow"
)
if development:
default_conf["is_debug"] = True
default_conf["use_https"] = False
default_conf["server_name"] = "localhost:5000"
write_config(default_conf, config_template, config_path)
sys.exit(0)
# SERVER_NAME
click.secho(
"The name and port number of the exposed server.\n"
"If FlaskBB is accesible on port 80 you can just omit the "
"port.\n For example, if FlaskBB is accessible via "
"example.org:8080 than this is also what you would set here.",
fg="cyan",
)
default_conf["server_name"] = click.prompt(
click.style("Server Name", fg="magenta"),
type=str,
default=default_conf.get("server_name"),
)
# HTTPS or HTTP
click.secho("Is HTTPS (recommended) or HTTP used for to serve FlaskBB?", fg="cyan")
default_conf["use_https"] = click.confirm(
click.style("Use HTTPS?", fg="magenta"), default=default_conf.get("use_https")
)
# SQLALCHEMY_DATABASE_URI
click.secho(
"For Postgres use:\n"
" postgresql://flaskbb@localhost:5432/flaskbb\n"
"For more options see the SQLAlchemy docs:\n"
" http://docs.sqlalchemy.org/en/latest/core/engines.html",
fg="cyan",
)
default_conf["database_uri"] = click.prompt(
click.style("Database URI", fg="magenta"),
default=default_conf.get("database_uri"),
)
# REDIS_ENABLED
click.secho(
"Redis will be used for things such as the task queue, "
"caching and rate limiting.",
fg="cyan",
)
default_conf["redis_enabled"] = click.confirm(
click.style("Would you like to use redis?", fg="magenta"), default=True
) # default_conf.get("redis_enabled") is False
# REDIS_URI
if default_conf.get("redis_enabled", False):
default_conf["redis_uri"] = click.prompt(
click.style("Redis URI", fg="magenta"),
default=default_conf.get("redis_uri"),
)
else:
default_conf["redis_uri"] = ""
# MAIL_SERVER
click.secho(
"To use 'localhost' make sure that you have sendmail or\n"
"something similar installed. Gmail is also supprted.",
fg="cyan",
)
default_conf["mail_server"] = click.prompt(
click.style("Mail Server", fg="magenta"),
default=default_conf.get("mail_server"),
)
# MAIL_PORT
click.secho("The port on which the SMTP server is listening on.", fg="cyan")
default_conf["mail_port"] = click.prompt(
click.style("Mail Server SMTP Port", fg="magenta"),
default=default_conf.get("mail_port"),
)
# MAIL_USE_TLS
click.secho(
"If you are using a local SMTP server like sendmail this is "
"not needed. For external servers it is required.",
fg="cyan",
)
default_conf["mail_use_tls"] = click.confirm(
click.style("Use TLS for sending mails?", fg="magenta"),
default=default_conf.get("mail_use_tls"),
)
# MAIL_USE_SSL
click.secho("Same as above. TLS is the successor to SSL.", fg="cyan")
default_conf["mail_use_ssl"] = click.confirm(
click.style("Use SSL for sending mails?", fg="magenta"),
default=default_conf.get("mail_use_ssl"),
)
# MAIL_USERNAME
click.secho(
"Not needed if you are using a local smtp server.\nFor gmail "
"you have to put in your email address here.",
fg="cyan",
)
default_conf["mail_username"] = click.prompt(
click.style("Mail Username", fg="magenta"),
default=default_conf.get("mail_username"),
)
# MAIL_PASSWORD
click.secho(
"Not needed if you are using a local smtp server.\nFor gmail "
"you have to put in your gmail password here.",
fg="cyan",
)
default_conf["mail_password"] = click.prompt(
click.style("Mail Password", fg="magenta"),
default=default_conf.get("mail_password"),
)
# MAIL_DEFAULT_SENDER
click.secho(
"The name of the sender. You probably want to change it to "
"something like '<your_community> Mailer'.",
fg="cyan",
)
default_conf["mail_sender_name"] = click.prompt(
click.style("Mail Sender Name", fg="magenta"),
default=default_conf.get("mail_sender_name"),
)
click.secho(
"On localhost you want to use a noreply address here. "
"Use your email address for gmail here.",
fg="cyan",
)
default_conf["mail_sender_address"] = click.prompt(
click.style("Mail Sender Address", fg="magenta"),
default=default_conf.get("mail_sender_address"),
)
# ADMINS
click.secho(
"Logs and important system messages are sent to this address. "
"Use your email address for gmail here.",
fg="cyan",
)
default_conf["mail_admin_address"] = click.prompt(
click.style("Mail Admin Email", fg="magenta"),
default=default_conf.get("mail_admin_address"),
)
click.secho(
"Optional filepath to load a logging configuration file from. "
"See the Python logging documentation for more detail.\n"
"\thttps://docs.python.org/library/logging.config.html#logging-config-fileformat", # noqa
fg="cyan",
)
default_conf["log_config_path"] = click.prompt(
click.style("Logging Config Path", fg="magenta"),
default=default_conf.get("log_config_path"),
)
deprecation_mesg = (
"Warning level for deprecations. options are: \n"
"\terror\tturns deprecation warnings into exceptions\n"
"\tignore\tnever warns about deprecations\n"
"\talways\talways warns about deprecations even if the warning has been issued\n" # noqa
"\tdefault\tshows deprecation warning once per usage\n"
"\tmodule\tshows deprecation warning once per module\n"
"\tonce\tonly shows deprecation warning once regardless of location\n"
"If you are unsure, select default\n"
"for more details see: https://docs.python.org/3/library/warnings.html#the-warnings-filter" # noqa
)
click.secho(deprecation_mesg, fg="cyan")
default_conf["deprecation_level"] = click.prompt(
click.style("Deperecation warning level", fg="magenta"),
default=default_conf.get("deprecation_level"),
)
write_config(default_conf, config_template, config_path)
# Finished
click.secho(
"The configuration file has been saved to:\n{cfg}\n"
"Feel free to adjust it as needed.".format(cfg=config_path),
fg="blue",
bold=True,
)
click.secho(
"Usage: \nflaskbb --config {cfg} run".format(cfg=config_path), fg="green"
)
|
extractor | noz | # coding: utf-8
from __future__ import unicode_literals
from ..compat import compat_urllib_parse_unquote, compat_xpath
from ..utils import find_xpath_attr, int_or_none, update_url_query, xpath_text
from .common import InfoExtractor
class NozIE(InfoExtractor):
_VALID_URL = r"https?://(?:www\.)?noz\.de/video/(?P<id>[0-9]+)/"
_TESTS = [
{
"url": "http://www.noz.de/video/25151/32-Deutschland-gewinnt-Badminton-Lnderspiel-in-Melle",
"info_dict": {
"id": "25151",
"ext": "mp4",
"duration": 215,
"title": "3:2 - Deutschland gewinnt Badminton-Länderspiel in Melle",
"description": "Vor rund 370 Zuschauern gewinnt die deutsche Badminton-Nationalmannschaft am Donnerstag ein EM-Vorbereitungsspiel gegen Frankreich in Melle. Video Moritz Frankenberg.",
"thumbnail": r"re:^http://.*\.jpg",
},
}
]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
description = self._og_search_description(webpage)
edge_url = self._html_search_regex(
r'<script\s+(?:type="text/javascript"\s+)?src="(.*?/videojs_.*?)"',
webpage,
"edge URL",
)
edge_content = self._download_webpage(edge_url, "meta configuration")
config_url_encoded = self._search_regex(
r'so\.addVariable\("config_url","[^,]*,(.*?)"', edge_content, "config URL"
)
config_url = compat_urllib_parse_unquote(config_url_encoded)
doc = self._download_xml(config_url, "video configuration")
title = xpath_text(doc, ".//title")
thumbnail = xpath_text(doc, ".//article/thumbnail/url")
duration = int_or_none(xpath_text(doc, ".//article/movie/file/duration"))
formats = []
for qnode in doc.findall(compat_xpath(".//article/movie/file/qualities/qual")):
http_url_ele = find_xpath_attr(
qnode, "./html_urls/video_url", "format", "video/mp4"
)
http_url = http_url_ele.text if http_url_ele is not None else None
if http_url:
formats.append(
{
"url": http_url,
"format_name": xpath_text(qnode, "./name"),
"format_id": "%s-%s" % ("http", xpath_text(qnode, "./id")),
"height": int_or_none(xpath_text(qnode, "./height")),
"width": int_or_none(xpath_text(qnode, "./width")),
"tbr": int_or_none(xpath_text(qnode, "./bitrate"), scale=1000),
}
)
else:
f4m_url = xpath_text(qnode, "url_hd2")
if f4m_url:
formats.extend(
self._extract_f4m_formats(
update_url_query(f4m_url, {"hdcore": "3.4.0"}),
video_id,
f4m_id="hds",
fatal=False,
)
)
m3u8_url_ele = find_xpath_attr(
qnode,
"./html_urls/video_url",
"format",
"application/vnd.apple.mpegurl",
)
m3u8_url = m3u8_url_ele.text if m3u8_url_ele is not None else None
if m3u8_url:
formats.extend(
self._extract_m3u8_formats(
m3u8_url,
video_id,
"mp4",
"m3u8_native",
m3u8_id="hls",
fatal=False,
)
)
self._sort_formats(formats)
return {
"id": video_id,
"formats": formats,
"title": title,
"duration": duration,
"description": description,
"thumbnail": thumbnail,
}
|
gstreamer | compressor | # Copyright 2012 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
from gi.repository import GObject, Gst, Gtk
from quodlibet import _, config, qltk
from quodlibet.plugins import PluginImportException
from quodlibet.plugins.gstelement import GStreamerPlugin
from quodlibet.qltk.util import GSignals
_PLUGIN_ID = "compressor"
_SETTINGS = {
"threshold": [_("_Threshold:"), _("Threshold until the filter is activated"), 1.0],
"ratio": [_("R_atio:"), _("Compression ratio"), 1.0],
}
def get_cfg(option):
cfg_option = "%s_%s" % (_PLUGIN_ID, option)
default = _SETTINGS[option][2]
if option == "threshold":
return config.getfloat("plugins", cfg_option, default)
elif option == "ratio":
return config.getfloat("plugins", cfg_option, default)
def set_cfg(option, value):
cfg_option = "%s_%s" % (_PLUGIN_ID, option)
if get_cfg(option) != value:
config.set("plugins", cfg_option, value)
class Preferences(Gtk.VBox):
__gsignals__: GSignals = {
"changed": (GObject.SignalFlags.RUN_LAST, None, tuple()),
}
def __init__(self):
super().__init__(spacing=12)
table = Gtk.Table(n_rows=2, n_columns=2)
table.set_col_spacings(6)
table.set_row_spacings(6)
labels = {}
for idx, key in enumerate(["threshold", "ratio"]):
text, tooltip = _SETTINGS[key][:2]
label = Gtk.Label(label=text)
labels[key] = label
label.set_tooltip_text(tooltip)
label.set_alignment(0.0, 0.5)
label.set_padding(0, 6)
label.set_use_underline(True)
table.attach(
label,
0,
1,
idx,
idx + 1,
xoptions=Gtk.AttachOptions.FILL | Gtk.AttachOptions.SHRINK,
)
threshold_scale = Gtk.HScale(
adjustment=Gtk.Adjustment.new(0, 0, 1, 0.01, 0.1, 0)
)
threshold_scale.set_digits(2)
labels["threshold"].set_mnemonic_widget(threshold_scale)
threshold_scale.set_value_pos(Gtk.PositionType.RIGHT)
def format_perc(scale, value):
return _("%d %%") % (value * 100)
threshold_scale.connect("format-value", format_perc)
table.attach(threshold_scale, 1, 2, 0, 1)
def threshold_changed(scale):
value = scale.get_value()
set_cfg("threshold", value)
self.emit("changed")
threshold_scale.connect("value-changed", threshold_changed)
threshold_scale.set_value(get_cfg("threshold"))
ratio_scale = Gtk.HScale(adjustment=Gtk.Adjustment.new(0, 0, 1, 0.01, 0.1, 0))
ratio_scale.set_digits(2)
labels["ratio"].set_mnemonic_widget(ratio_scale)
ratio_scale.set_value_pos(Gtk.PositionType.RIGHT)
table.attach(ratio_scale, 1, 2, 1, 2)
def ratio_changed(scale):
value = scale.get_value()
set_cfg("ratio", value)
self.emit("changed")
ratio_scale.connect("value-changed", ratio_changed)
ratio_scale.set_value(get_cfg("ratio"))
self.pack_start(qltk.Frame(_("Preferences"), child=table), True, True, 0)
class Compressor(GStreamerPlugin):
PLUGIN_ID = _PLUGIN_ID
PLUGIN_NAME = _("Audio Compressor")
PLUGIN_DESC = _(
"Changes the amplitude of all samples above a specific "
"threshold with a specific ratio."
)
@classmethod
def setup_element(cls):
return Gst.ElementFactory.make("audiodynamic", cls.PLUGIN_ID)
@classmethod
def update_element(cls, element):
element.set_property("characteristics", "soft-knee")
element.set_property("mode", "compressor")
element.set_property("ratio", get_cfg("ratio"))
element.set_property("threshold", get_cfg("threshold"))
@classmethod
def PluginPreferences(cls, window):
prefs = Preferences()
prefs.connect("changed", lambda *x: cls.queue_update())
return prefs
if not Compressor.setup_element():
raise PluginImportException(
"GStreamer element 'audiodynamic' missing (gst-plugins-good)"
)
|
config | terminalfilters | __license__ = "GNU Affero General Public License http://www.gnu.org/licenses/agpl.html"
__copyright__ = "Copyright (C) 2022 The OctoPrint Project - Released under terms of the AGPLv3 License"
from octoprint.schema import BaseModel
from octoprint.vendor.with_attrs_docs import with_attrs_docs
@with_attrs_docs
class TerminalFilterEntry(BaseModel):
name: str
"""The name of the filter."""
regex: str
"""The regular expression to match. Use [JavaScript regular expressions](https://developer.mozilla.org/en/docs/Web/JavaScript/Guide/Regular_Expressions)."""
DEFAULT_TERMINAL_FILTERS = [
TerminalFilterEntry(
name="Suppress temperature messages",
regex=r"(Send: (N\d+\s+)?M105)|(Recv:\s+(ok\s+([PBN]\d+\s+)*)?([BCLPR]|T\d*):-?\d+)",
),
TerminalFilterEntry(
name="Suppress SD status messages",
regex=r"(Send: (N\d+\s+)?M27)|(Recv: SD printing byte)|(Recv: Not SD printing)",
),
TerminalFilterEntry(
name="Suppress position messages",
regex=r"(Send:\s+(N\d+\s+)?M114)|(Recv:\s+(ok\s+)?X:[+-]?([0-9]*[.])?[0-9]+\s+Y:[+-]?([0-9]*[.])?[0-9]+\s+Z:[+-]?([0-9]*[.])?[0-9]+\s+E\d*:[+-]?([0-9]*[.])?[0-9]+).*",
),
TerminalFilterEntry(name="Suppress wait responses", regex=r"Recv: wait"),
TerminalFilterEntry(
name="Suppress processing responses",
regex=r"Recv: (echo:\s*)?busy:\s*processing",
),
]
|
utils | localization | #!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import absolute_import
__license__ = "GPL v3"
__copyright__ = "2009, Kovid Goyal <kovid@kovidgoyal.net>"
__docformat__ = "restructuredtext en"
import locale
import os
import re
from gettext import GNUTranslations, NullTranslations
import cPickle
import cStringIO
_available_translations = None
def available_translations():
global _available_translations
if _available_translations is None:
stats = P("localization/stats.pickle", allow_user_override=False)
if os.path.exists(stats):
stats = cPickle.load(open(stats, "rb"))
else:
stats = {}
_available_translations = [x for x in stats if stats[x] > 0.1]
return _available_translations
def get_system_locale():
from calibre.constants import isosx, iswindows, plugins
lang = None
if iswindows:
try:
from calibre.constants import get_windows_user_locale_name
lang = get_windows_user_locale_name()
lang = lang.strip()
if not lang:
lang = None
except:
pass # Windows XP does not have the GetUserDefaultLocaleName fn
elif isosx:
try:
lang = plugins["usbobserver"][0].user_locale() or None
except:
# Fallback to environment vars if something bad happened
import traceback
traceback.print_exc()
if lang is None:
try:
lang = locale.getdefaultlocale(
["LANGUAGE", "LC_ALL", "LC_CTYPE", "LC_MESSAGES", "LANG"]
)[0]
except:
pass # This happens on Ubuntu apparently
if lang is None and "LANG" in os.environ: # Needed for OS X
try:
lang = os.environ["LANG"]
except:
pass
if lang:
lang = lang.replace("-", "_")
lang = "_".join(lang.split("_")[:2])
return lang
def get_lang():
"Try to figure out what language to display the interface in"
from calibre.utils.config_base import prefs
lang = prefs["language"]
lang = os.environ.get("CALIBRE_OVERRIDE_LANG", lang)
if lang:
return lang
try:
lang = get_system_locale()
except:
import traceback
traceback.print_exc()
lang = None
if lang:
match = re.match("[a-z]{2,3}(_[A-Z]{2}){0,1}", lang)
if match:
lang = match.group()
if lang == "zh":
lang = "zh_CN"
if not lang:
lang = "en"
return lang
def is_rtl():
return get_lang()[:2].lower() in {"he", "ar"}
def get_lc_messages_path(lang):
hlang = None
if zf_exists():
if lang in available_translations():
hlang = lang
else:
xlang = lang.split("_")[0].lower()
if xlang in available_translations():
hlang = xlang
return hlang
def zf_exists():
return os.path.exists(P("localization/locales.zip", allow_user_override=False))
_lang_trans = None
def get_all_translators():
from zipfile import ZipFile
with ZipFile(P("localization/locales.zip", allow_user_override=False), "r") as zf:
for lang in available_translations():
mpath = get_lc_messages_path(lang)
if mpath is not None:
buf = cStringIO.StringIO(zf.read(mpath + "/messages.mo"))
yield lang, GNUTranslations(buf)
def get_single_translator(mpath):
from zipfile import ZipFile
with ZipFile(P("localization/locales.zip", allow_user_override=False), "r") as zf:
buf = cStringIO.StringIO(zf.read(mpath + "/messages.mo"))
return GNUTranslations(buf)
def get_translator(bcp_47_code):
parts = bcp_47_code.replace("-", "_").split("_")[:2]
parts[0] = lang_as_iso639_1(parts[0].lower()) or "en"
if len(parts) > 1:
parts[1] = parts[1].upper()
lang = "_".join(parts)
lang = {"pt": "pt_BR", "zh": "zh_CN"}.get(lang, lang)
available = available_translations()
found = True
if lang not in available:
lang = {"pt": "pt_BR", "zh": "zh_CN"}.get(parts[0], parts[0])
if lang not in available:
lang = get_lang()
if lang not in available:
lang = "en"
found = False
if lang == "en":
return found, lang, NullTranslations()
return found, lang, get_single_translator(lang)
lcdata = {
"abday": ("Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"),
"abmon": (
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
),
"d_fmt": "%m/%d/%Y",
"d_t_fmt": "%a %d %b %Y %r %Z",
"day": (
"Sunday",
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday",
),
"mon": (
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December",
),
"noexpr": "^[nN].*",
"radixchar": ".",
"t_fmt": "%r",
"t_fmt_ampm": "%I:%M:%S %p",
"thousep": ",",
"yesexpr": "^[yY].*",
}
def load_po(path):
from calibre.translations.msgfmt import make
buf = cStringIO.StringIO()
try:
make(path, buf)
except Exception:
print(("Failed to compile translations file: %s, ignoring") % path)
buf = None
else:
buf = cStringIO.StringIO(buf.getvalue())
return buf
def set_translators():
global _lang_trans, lcdata
# To test different translations invoke as
# CALIBRE_OVERRIDE_LANG=de_DE.utf8 program
lang = get_lang()
t = buf = iso639 = None
if "CALIBRE_TEST_TRANSLATION" in os.environ:
buf = load_po(os.path.expanduser(os.environ["CALIBRE_TEST_TRANSLATION"]))
if lang:
mpath = get_lc_messages_path(lang)
if buf is None and mpath and os.access(mpath + ".po", os.R_OK):
buf = load_po(mpath + ".po")
if mpath is not None:
from zipfile import ZipFile
with ZipFile(
P("localization/locales.zip", allow_user_override=False), "r"
) as zf:
if buf is None:
buf = cStringIO.StringIO(zf.read(mpath + "/messages.mo"))
if mpath == "nds":
mpath = "de"
isof = mpath + "/iso639.mo"
try:
iso639 = cStringIO.StringIO(zf.read(isof))
except:
pass # No iso639 translations for this lang
if buf is not None:
try:
lcdata = cPickle.loads(zf.read(mpath + "/lcdata.pickle"))
except:
pass # No lcdata
if buf is not None:
t = GNUTranslations(buf)
if iso639 is not None:
iso639 = _lang_trans = GNUTranslations(iso639)
t.add_fallback(iso639)
if t is None:
t = NullTranslations()
try:
set_translators.lang = t.info().get("language")
except Exception:
pass
t.install(unicode=True, names=("ngettext",))
# Now that we have installed a translator, we have to retranslate the help
# for the global prefs object as it was instantiated in get_lang(), before
# the translator was installed.
from calibre.utils.config_base import prefs
prefs.retranslate_help()
set_translators.lang = None
_iso639 = None
_extra_lang_codes = {
"pt_BR": _("Brazilian Portuguese"),
"en_GB": _("English (UK)"),
"zh_CN": _("Simplified Chinese"),
"zh_TW": _("Traditional Chinese"),
"en": _("English"),
"en_US": _("English (United States)"),
"en_AR": _("English (Argentina)"),
"en_AU": _("English (Australia)"),
"en_JP": _("English (Japan)"),
"en_DE": _("English (Germany)"),
"en_BG": _("English (Bulgaria)"),
"en_EG": _("English (Egypt)"),
"en_NZ": _("English (New Zealand)"),
"en_CA": _("English (Canada)"),
"en_GR": _("English (Greece)"),
"en_IN": _("English (India)"),
"en_NP": _("English (Nepal)"),
"en_TH": _("English (Thailand)"),
"en_TR": _("English (Turkey)"),
"en_CY": _("English (Cyprus)"),
"en_CZ": _("English (Czech Republic)"),
"en_PH": _("English (Philippines)"),
"en_PK": _("English (Pakistan)"),
"en_PL": _("English (Poland)"),
"en_HR": _("English (Croatia)"),
"en_HU": _("English (Hungary)"),
"en_ID": _("English (Indonesia)"),
"en_IL": _("English (Israel)"),
"en_RU": _("English (Russia)"),
"en_SG": _("English (Singapore)"),
"en_YE": _("English (Yemen)"),
"en_IE": _("English (Ireland)"),
"en_CN": _("English (China)"),
"en_TW": _("English (Taiwan)"),
"en_ZA": _("English (South Africa)"),
"es_PY": _("Spanish (Paraguay)"),
"es_UY": _("Spanish (Uruguay)"),
"es_AR": _("Spanish (Argentina)"),
"es_CR": _("Spanish (Costa Rica)"),
"es_MX": _("Spanish (Mexico)"),
"es_CU": _("Spanish (Cuba)"),
"es_CL": _("Spanish (Chile)"),
"es_EC": _("Spanish (Ecuador)"),
"es_HN": _("Spanish (Honduras)"),
"es_VE": _("Spanish (Venezuela)"),
"es_BO": _("Spanish (Bolivia)"),
"es_NI": _("Spanish (Nicaragua)"),
"es_CO": _("Spanish (Colombia)"),
"de_AT": _("German (AT)"),
"fr_BE": _("French (BE)"),
"nl": _("Dutch (NL)"),
"nl_BE": _("Dutch (BE)"),
"und": _("Unknown"),
}
if False:
# Extra strings needed for Qt
# NOTE: Ante Meridian (i.e. like 10:00 AM)
_("AM")
# NOTE: Post Meridian (i.e. like 10:00 PM)
_("PM")
# NOTE: Ante Meridian (i.e. like 10:00 am)
_("am")
# NOTE: Post Meridian (i.e. like 10:00 pm)
_("pm")
_("&Copy")
_("Select All")
_("Copy Link")
_("&Select All")
_("Copy &Link location")
_("&Undo")
_("&Redo")
_("Cu&t")
_("&Paste")
_("Paste and Match Style")
_("Directions")
_("Left to Right")
_("Right to Left")
_("Fonts")
_("&Step up")
_("Step &down")
_("Close without Saving")
_lcase_map = {}
for k in _extra_lang_codes:
_lcase_map[k.lower()] = k
def _load_iso639():
global _iso639
if _iso639 is None:
ip = P("localization/iso639.pickle", allow_user_override=False)
with open(ip, "rb") as f:
_iso639 = cPickle.load(f)
return _iso639
def get_language(lang):
translate = _
lang = _lcase_map.get(lang, lang)
if lang in _extra_lang_codes:
# The translator was not active when _extra_lang_codes was defined, so
# re-translate
return translate(_extra_lang_codes[lang])
iso639 = _load_iso639()
ans = lang
lang = lang.split("_")[0].lower()
if len(lang) == 2:
ans = iso639["by_2"].get(lang, ans)
elif len(lang) == 3:
if lang in iso639["by_3b"]:
ans = iso639["by_3b"][lang]
else:
ans = iso639["by_3t"].get(lang, ans)
try:
return _lang_trans.ugettext(ans)
except AttributeError:
return translate(ans)
def calibre_langcode_to_name(lc, localize=True):
iso639 = _load_iso639()
translate = _ if localize else lambda x: x
try:
return translate(iso639["by_3t"][lc])
except:
pass
return lc
def canonicalize_lang(raw):
if not raw:
return None
if not isinstance(raw, unicode):
raw = raw.decode("utf-8", "ignore")
raw = raw.lower().strip()
if not raw:
return None
raw = raw.replace("_", "-").partition("-")[0].strip()
if not raw:
return None
iso639 = _load_iso639()
m2to3 = iso639["2to3"]
if len(raw) == 2:
ans = m2to3.get(raw, None)
if ans is not None:
return ans
elif len(raw) == 3:
if raw in iso639["by_3t"]:
return raw
if raw in iso639["3bto3t"]:
return iso639["3bto3t"][raw]
return iso639["name_map"].get(raw, None)
_lang_map = None
def lang_map():
"Return mapping of ISO 639 3 letter codes to localized language names"
iso639 = _load_iso639()
translate = _
global _lang_map
if _lang_map is None:
_lang_map = {k: translate(v) for k, v in iso639["by_3t"].iteritems()}
return _lang_map
def langnames_to_langcodes(names):
"""
Given a list of localized language names return a mapping of the names to 3
letter ISO 639 language codes. If a name is not recognized, it is mapped to
None.
"""
iso639 = _load_iso639()
translate = _
ans = {}
names = set(names)
for k, v in iso639["by_3t"].iteritems():
tv = translate(v)
if tv in names:
names.remove(tv)
ans[tv] = k
if not names:
break
for x in names:
ans[x] = None
return ans
def lang_as_iso639_1(name_or_code):
code = canonicalize_lang(name_or_code)
if code is not None:
iso639 = _load_iso639()
return iso639["3to2"].get(code, None)
_udc = None
def get_udc():
global _udc
if _udc is None:
from calibre.ebooks.unihandecode import Unihandecoder
_udc = Unihandecoder(lang=get_lang())
return _udc
def localize_user_manual_link(url):
lc = lang_as_iso639_1(get_lang())
if lc == "en":
return url
import json
try:
stats = json.loads(
P(
"user-manual-translation-stats.json",
allow_user_override=False,
data=True,
)
)
except EnvironmentError:
return url
if stats.get(lc, 0) < 0.3:
return url
from urlparse import urlparse, urlunparse
parts = urlparse(url)
path = re.sub(r"/generated/[a-z]+/", "/generated/%s/" % lc, parts.path or "")
path = "/%s%s" % (lc, path)
parts = list(parts)
parts[2] = path
return urlunparse(parts)
|
posthog | errors | import re
from dataclasses import dataclass
from typing import Dict
from clickhouse_driver.errors import ServerException
from posthog.exceptions import EstimatedQueryExecutionTimeTooLong
class InternalCHQueryError(ServerException):
code_name: str
def __init__(self, message, *, code=None, nested=None, code_name):
self.code_name = code_name
super().__init__(message, code, nested)
class ExposedCHQueryError(InternalCHQueryError):
def __str__(self) -> str:
message: str = self.message
start_index = message.index("DB::Exception:") + len("DB::Exception:")
end_index = message.index("Stack trace:")
return self.message[start_index:end_index].strip()
@dataclass
class ErrorCodeMeta:
name: str
user_safe: bool = False # Whether this error code is safe to show to the user and couldn't be caught at HogQL level
def wrap_query_error(err: Exception) -> Exception:
"Beautifies clickhouse client errors, using custom error classes for every code"
if not isinstance(err, ServerException):
return err
# Return a 512 error for queries which would time out
match = re.search(
r"Estimated query execution time \(.* seconds\) is too long.", err.message
)
if match:
return EstimatedQueryExecutionTimeTooLong(detail=match.group(0))
# :TRICKY: Return a custom class for every code by looking up the short name and creating a class dynamically.
if hasattr(err, "code"):
meta = look_up_error_code_meta(err)
name = f"CHQueryError{meta.name.replace('_', ' ').title().replace(' ', '')}"
processed_error_class = (
ExposedCHQueryError if meta.user_safe else InternalCHQueryError
)
return type(name, (processed_error_class,), {})(
err.message, code=err.code, code_name=meta.name.lower()
)
return err
def look_up_error_code_meta(error: ServerException) -> ErrorCodeMeta:
code = getattr(error, "code", None)
if code is None or code not in CLICKHOUSE_ERROR_CODE_LOOKUP:
return CLICKHOUSE_UNKNOWN_EXCEPTION
return CLICKHOUSE_ERROR_CODE_LOOKUP[code]
#
# From https://github.com/ClickHouse/ClickHouse/blob/22.3/src/Common/ErrorCodes.cpp#L16-L622
#
# Please keep this list up to date at each ClickHouse upgrade.
#
# You can fetch and print an updated list of error codes with something like:
#
# import json
# import re
# import requests
# output = {}
# resp = requests.get('https://raw.githubusercontent.com/ClickHouse/ClickHouse/22.3/src/Common/ErrorCodes.cpp')
# for line in resp.text.split("\n"):
# result = re.search(r"^M\(([0-9]+), (\S+)\).*$", line.strip())
# if result is not None:
# output[int(result.group(1))] = result.group(2)
# print(json.dumps(output, sort_keys=True, indent=4))
#
CLICKHOUSE_UNKNOWN_EXCEPTION = ErrorCodeMeta("UNKNOWN_EXCEPTION")
CLICKHOUSE_ERROR_CODE_LOOKUP: Dict[int, ErrorCodeMeta] = {
0: ErrorCodeMeta("OK"),
1: ErrorCodeMeta("UNSUPPORTED_METHOD"),
2: ErrorCodeMeta("UNSUPPORTED_PARAMETER"),
3: ErrorCodeMeta("UNEXPECTED_END_OF_FILE"),
4: ErrorCodeMeta("EXPECTED_END_OF_FILE"),
6: ErrorCodeMeta("CANNOT_PARSE_TEXT"),
7: ErrorCodeMeta("INCORRECT_NUMBER_OF_COLUMNS"),
8: ErrorCodeMeta("THERE_IS_NO_COLUMN"),
9: ErrorCodeMeta("SIZES_OF_COLUMNS_DOESNT_MATCH"),
10: ErrorCodeMeta("NOT_FOUND_COLUMN_IN_BLOCK"),
11: ErrorCodeMeta("POSITION_OUT_OF_BOUND"),
12: ErrorCodeMeta("PARAMETER_OUT_OF_BOUND"),
13: ErrorCodeMeta("SIZES_OF_COLUMNS_IN_TUPLE_DOESNT_MATCH"),
15: ErrorCodeMeta("DUPLICATE_COLUMN"),
16: ErrorCodeMeta("NO_SUCH_COLUMN_IN_TABLE"),
17: ErrorCodeMeta("DELIMITER_IN_STRING_LITERAL_DOESNT_MATCH"),
18: ErrorCodeMeta("CANNOT_INSERT_ELEMENT_INTO_CONSTANT_COLUMN"),
19: ErrorCodeMeta("SIZE_OF_FIXED_STRING_DOESNT_MATCH"),
20: ErrorCodeMeta("NUMBER_OF_COLUMNS_DOESNT_MATCH"),
21: ErrorCodeMeta("CANNOT_READ_ALL_DATA_FROM_TAB_SEPARATED_INPUT"),
22: ErrorCodeMeta("CANNOT_PARSE_ALL_VALUE_FROM_TAB_SEPARATED_INPUT"),
23: ErrorCodeMeta("CANNOT_READ_FROM_ISTREAM"),
24: ErrorCodeMeta("CANNOT_WRITE_TO_OSTREAM"),
25: ErrorCodeMeta("CANNOT_PARSE_ESCAPE_SEQUENCE"),
26: ErrorCodeMeta("CANNOT_PARSE_QUOTED_STRING"),
27: ErrorCodeMeta("CANNOT_PARSE_INPUT_ASSERTION_FAILED"),
28: ErrorCodeMeta("CANNOT_PRINT_FLOAT_OR_DOUBLE_NUMBER"),
29: ErrorCodeMeta("CANNOT_PRINT_INTEGER"),
30: ErrorCodeMeta("CANNOT_READ_SIZE_OF_COMPRESSED_CHUNK"),
31: ErrorCodeMeta("CANNOT_READ_COMPRESSED_CHUNK"),
32: ErrorCodeMeta("ATTEMPT_TO_READ_AFTER_EOF"),
33: ErrorCodeMeta("CANNOT_READ_ALL_DATA"),
34: ErrorCodeMeta("TOO_MANY_ARGUMENTS_FOR_FUNCTION"),
35: ErrorCodeMeta("TOO_FEW_ARGUMENTS_FOR_FUNCTION"),
36: ErrorCodeMeta("BAD_ARGUMENTS"),
37: ErrorCodeMeta("UNKNOWN_ELEMENT_IN_AST"),
38: ErrorCodeMeta("CANNOT_PARSE_DATE", user_safe=True),
39: ErrorCodeMeta("TOO_LARGE_SIZE_COMPRESSED"),
40: ErrorCodeMeta("CHECKSUM_DOESNT_MATCH"),
41: ErrorCodeMeta("CANNOT_PARSE_DATETIME", user_safe=True),
42: ErrorCodeMeta("NUMBER_OF_ARGUMENTS_DOESNT_MATCH"),
43: ErrorCodeMeta("ILLEGAL_TYPE_OF_ARGUMENT", user_safe=True),
44: ErrorCodeMeta("ILLEGAL_COLUMN"),
45: ErrorCodeMeta("ILLEGAL_NUMBER_OF_RESULT_COLUMNS"),
46: ErrorCodeMeta("UNKNOWN_FUNCTION", user_safe=True),
47: ErrorCodeMeta("UNKNOWN_IDENTIFIER"),
48: ErrorCodeMeta("NOT_IMPLEMENTED"),
49: ErrorCodeMeta("LOGICAL_ERROR"),
50: ErrorCodeMeta("UNKNOWN_TYPE"),
51: ErrorCodeMeta("EMPTY_LIST_OF_COLUMNS_QUERIED"),
52: ErrorCodeMeta("COLUMN_QUERIED_MORE_THAN_ONCE"),
53: ErrorCodeMeta("TYPE_MISMATCH", user_safe=True),
54: ErrorCodeMeta("STORAGE_DOESNT_ALLOW_PARAMETERS"),
55: ErrorCodeMeta("STORAGE_REQUIRES_PARAMETER"),
56: ErrorCodeMeta("UNKNOWN_STORAGE"),
57: ErrorCodeMeta("TABLE_ALREADY_EXISTS"),
58: ErrorCodeMeta("TABLE_METADATA_ALREADY_EXISTS"),
59: ErrorCodeMeta("ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER"),
60: ErrorCodeMeta("UNKNOWN_TABLE"),
61: ErrorCodeMeta("ONLY_FILTER_COLUMN_IN_BLOCK"),
62: ErrorCodeMeta("SYNTAX_ERROR"),
63: ErrorCodeMeta("UNKNOWN_AGGREGATE_FUNCTION"),
64: ErrorCodeMeta("CANNOT_READ_AGGREGATE_FUNCTION_FROM_TEXT"),
65: ErrorCodeMeta("CANNOT_WRITE_AGGREGATE_FUNCTION_AS_TEXT"),
66: ErrorCodeMeta("NOT_A_COLUMN"),
67: ErrorCodeMeta("ILLEGAL_KEY_OF_AGGREGATION"),
68: ErrorCodeMeta("CANNOT_GET_SIZE_OF_FIELD"),
69: ErrorCodeMeta("ARGUMENT_OUT_OF_BOUND"),
70: ErrorCodeMeta("CANNOT_CONVERT_TYPE"),
71: ErrorCodeMeta("CANNOT_WRITE_AFTER_END_OF_BUFFER"),
72: ErrorCodeMeta("CANNOT_PARSE_NUMBER"),
73: ErrorCodeMeta("UNKNOWN_FORMAT"),
74: ErrorCodeMeta("CANNOT_READ_FROM_FILE_DESCRIPTOR"),
75: ErrorCodeMeta("CANNOT_WRITE_TO_FILE_DESCRIPTOR"),
76: ErrorCodeMeta("CANNOT_OPEN_FILE"),
77: ErrorCodeMeta("CANNOT_CLOSE_FILE"),
78: ErrorCodeMeta("UNKNOWN_TYPE_OF_QUERY"),
79: ErrorCodeMeta("INCORRECT_FILE_NAME"),
80: ErrorCodeMeta("INCORRECT_QUERY"),
81: ErrorCodeMeta("UNKNOWN_DATABASE"),
82: ErrorCodeMeta("DATABASE_ALREADY_EXISTS"),
83: ErrorCodeMeta("DIRECTORY_DOESNT_EXIST"),
84: ErrorCodeMeta("DIRECTORY_ALREADY_EXISTS"),
85: ErrorCodeMeta("FORMAT_IS_NOT_SUITABLE_FOR_INPUT"),
86: ErrorCodeMeta("RECEIVED_ERROR_FROM_REMOTE_IO_SERVER"),
87: ErrorCodeMeta("CANNOT_SEEK_THROUGH_FILE"),
88: ErrorCodeMeta("CANNOT_TRUNCATE_FILE"),
89: ErrorCodeMeta("UNKNOWN_COMPRESSION_METHOD"),
90: ErrorCodeMeta("EMPTY_LIST_OF_COLUMNS_PASSED"),
91: ErrorCodeMeta("SIZES_OF_MARKS_FILES_ARE_INCONSISTENT"),
92: ErrorCodeMeta("EMPTY_DATA_PASSED"),
93: ErrorCodeMeta("UNKNOWN_AGGREGATED_DATA_VARIANT"),
94: ErrorCodeMeta("CANNOT_MERGE_DIFFERENT_AGGREGATED_DATA_VARIANTS"),
95: ErrorCodeMeta("CANNOT_READ_FROM_SOCKET"),
96: ErrorCodeMeta("CANNOT_WRITE_TO_SOCKET"),
97: ErrorCodeMeta("CANNOT_READ_ALL_DATA_FROM_CHUNKED_INPUT"),
98: ErrorCodeMeta("CANNOT_WRITE_TO_EMPTY_BLOCK_OUTPUT_STREAM"),
99: ErrorCodeMeta("UNKNOWN_PACKET_FROM_CLIENT"),
100: ErrorCodeMeta("UNKNOWN_PACKET_FROM_SERVER"),
101: ErrorCodeMeta("UNEXPECTED_PACKET_FROM_CLIENT"),
102: ErrorCodeMeta("UNEXPECTED_PACKET_FROM_SERVER"),
103: ErrorCodeMeta("RECEIVED_DATA_FOR_WRONG_QUERY_ID"),
104: ErrorCodeMeta("TOO_SMALL_BUFFER_SIZE"),
105: ErrorCodeMeta("CANNOT_READ_HISTORY"),
106: ErrorCodeMeta("CANNOT_APPEND_HISTORY"),
107: ErrorCodeMeta("FILE_DOESNT_EXIST"),
108: ErrorCodeMeta("NO_DATA_TO_INSERT"),
109: ErrorCodeMeta("CANNOT_BLOCK_SIGNAL"),
110: ErrorCodeMeta("CANNOT_UNBLOCK_SIGNAL"),
111: ErrorCodeMeta("CANNOT_MANIPULATE_SIGSET"),
112: ErrorCodeMeta("CANNOT_WAIT_FOR_SIGNAL"),
113: ErrorCodeMeta("THERE_IS_NO_SESSION"),
114: ErrorCodeMeta("CANNOT_CLOCK_GETTIME"),
115: ErrorCodeMeta("UNKNOWN_SETTING"),
116: ErrorCodeMeta("THERE_IS_NO_DEFAULT_VALUE"),
117: ErrorCodeMeta("INCORRECT_DATA"),
119: ErrorCodeMeta("ENGINE_REQUIRED"),
120: ErrorCodeMeta("CANNOT_INSERT_VALUE_OF_DIFFERENT_SIZE_INTO_TUPLE"),
121: ErrorCodeMeta("UNSUPPORTED_JOIN_KEYS"),
122: ErrorCodeMeta("INCOMPATIBLE_COLUMNS"),
123: ErrorCodeMeta("UNKNOWN_TYPE_OF_AST_NODE"),
124: ErrorCodeMeta("INCORRECT_ELEMENT_OF_SET"),
125: ErrorCodeMeta("INCORRECT_RESULT_OF_SCALAR_SUBQUERY"),
126: ErrorCodeMeta("CANNOT_GET_RETURN_TYPE"),
127: ErrorCodeMeta("ILLEGAL_INDEX"),
128: ErrorCodeMeta("TOO_LARGE_ARRAY_SIZE"),
129: ErrorCodeMeta("FUNCTION_IS_SPECIAL"),
130: ErrorCodeMeta("CANNOT_READ_ARRAY_FROM_TEXT"),
131: ErrorCodeMeta("TOO_LARGE_STRING_SIZE"),
133: ErrorCodeMeta("AGGREGATE_FUNCTION_DOESNT_ALLOW_PARAMETERS"),
134: ErrorCodeMeta("PARAMETERS_TO_AGGREGATE_FUNCTIONS_MUST_BE_LITERALS"),
135: ErrorCodeMeta("ZERO_ARRAY_OR_TUPLE_INDEX", user_safe=True),
137: ErrorCodeMeta("UNKNOWN_ELEMENT_IN_CONFIG"),
138: ErrorCodeMeta("EXCESSIVE_ELEMENT_IN_CONFIG"),
139: ErrorCodeMeta("NO_ELEMENTS_IN_CONFIG"),
140: ErrorCodeMeta("ALL_REQUESTED_COLUMNS_ARE_MISSING"),
141: ErrorCodeMeta("SAMPLING_NOT_SUPPORTED"),
142: ErrorCodeMeta("NOT_FOUND_NODE"),
143: ErrorCodeMeta("FOUND_MORE_THAN_ONE_NODE"),
144: ErrorCodeMeta("FIRST_DATE_IS_BIGGER_THAN_LAST_DATE"),
145: ErrorCodeMeta("UNKNOWN_OVERFLOW_MODE"),
146: ErrorCodeMeta("QUERY_SECTION_DOESNT_MAKE_SENSE"),
147: ErrorCodeMeta("NOT_FOUND_FUNCTION_ELEMENT_FOR_AGGREGATE"),
148: ErrorCodeMeta("NOT_FOUND_RELATION_ELEMENT_FOR_CONDITION"),
149: ErrorCodeMeta("NOT_FOUND_RHS_ELEMENT_FOR_CONDITION"),
150: ErrorCodeMeta("EMPTY_LIST_OF_ATTRIBUTES_PASSED"),
151: ErrorCodeMeta("INDEX_OF_COLUMN_IN_SORT_CLAUSE_IS_OUT_OF_RANGE"),
152: ErrorCodeMeta("UNKNOWN_DIRECTION_OF_SORTING"),
153: ErrorCodeMeta("ILLEGAL_DIVISION", user_safe=True),
154: ErrorCodeMeta("AGGREGATE_FUNCTION_NOT_APPLICABLE"),
155: ErrorCodeMeta("UNKNOWN_RELATION"),
156: ErrorCodeMeta("DICTIONARIES_WAS_NOT_LOADED"),
157: ErrorCodeMeta("ILLEGAL_OVERFLOW_MODE"),
158: ErrorCodeMeta("TOO_MANY_ROWS"),
159: ErrorCodeMeta("TIMEOUT_EXCEEDED"),
160: ErrorCodeMeta("TOO_SLOW"),
161: ErrorCodeMeta("TOO_MANY_COLUMNS"),
162: ErrorCodeMeta("TOO_DEEP_SUBQUERIES"),
163: ErrorCodeMeta("TOO_DEEP_PIPELINE"),
164: ErrorCodeMeta("READONLY"),
165: ErrorCodeMeta("TOO_MANY_TEMPORARY_COLUMNS"),
166: ErrorCodeMeta("TOO_MANY_TEMPORARY_NON_CONST_COLUMNS"),
167: ErrorCodeMeta("TOO_DEEP_AST"),
168: ErrorCodeMeta("TOO_BIG_AST"),
169: ErrorCodeMeta("BAD_TYPE_OF_FIELD"),
170: ErrorCodeMeta("BAD_GET"),
172: ErrorCodeMeta("CANNOT_CREATE_DIRECTORY"),
173: ErrorCodeMeta("CANNOT_ALLOCATE_MEMORY"),
174: ErrorCodeMeta("CYCLIC_ALIASES"),
176: ErrorCodeMeta("CHUNK_NOT_FOUND"),
177: ErrorCodeMeta("DUPLICATE_CHUNK_NAME"),
178: ErrorCodeMeta("MULTIPLE_ALIASES_FOR_EXPRESSION"),
179: ErrorCodeMeta("MULTIPLE_EXPRESSIONS_FOR_ALIAS"),
180: ErrorCodeMeta("THERE_IS_NO_PROFILE"),
181: ErrorCodeMeta("ILLEGAL_FINAL"),
182: ErrorCodeMeta("ILLEGAL_PREWHERE"),
183: ErrorCodeMeta("UNEXPECTED_EXPRESSION"),
184: ErrorCodeMeta("ILLEGAL_AGGREGATION", user_safe=True),
185: ErrorCodeMeta("UNSUPPORTED_MYISAM_BLOCK_TYPE"),
186: ErrorCodeMeta("UNSUPPORTED_COLLATION_LOCALE"),
187: ErrorCodeMeta("COLLATION_COMPARISON_FAILED"),
188: ErrorCodeMeta("UNKNOWN_ACTION"),
189: ErrorCodeMeta("TABLE_MUST_NOT_BE_CREATED_MANUALLY"),
190: ErrorCodeMeta("SIZES_OF_ARRAYS_DOESNT_MATCH"),
191: ErrorCodeMeta("SET_SIZE_LIMIT_EXCEEDED"),
192: ErrorCodeMeta("UNKNOWN_USER"),
193: ErrorCodeMeta("WRONG_PASSWORD"),
194: ErrorCodeMeta("REQUIRED_PASSWORD"),
195: ErrorCodeMeta("IP_ADDRESS_NOT_ALLOWED"),
196: ErrorCodeMeta("UNKNOWN_ADDRESS_PATTERN_TYPE"),
197: ErrorCodeMeta("SERVER_REVISION_IS_TOO_OLD"),
198: ErrorCodeMeta("DNS_ERROR"),
199: ErrorCodeMeta("UNKNOWN_QUOTA"),
200: ErrorCodeMeta("QUOTA_DOESNT_ALLOW_KEYS"),
201: ErrorCodeMeta("QUOTA_EXPIRED"),
202: ErrorCodeMeta("TOO_MANY_SIMULTANEOUS_QUERIES"),
203: ErrorCodeMeta("NO_FREE_CONNECTION"),
204: ErrorCodeMeta("CANNOT_FSYNC"),
205: ErrorCodeMeta("NESTED_TYPE_TOO_DEEP"),
206: ErrorCodeMeta("ALIAS_REQUIRED"),
207: ErrorCodeMeta("AMBIGUOUS_IDENTIFIER"),
208: ErrorCodeMeta("EMPTY_NESTED_TABLE"),
209: ErrorCodeMeta("SOCKET_TIMEOUT"),
210: ErrorCodeMeta("NETWORK_ERROR"),
211: ErrorCodeMeta("EMPTY_QUERY"),
212: ErrorCodeMeta("UNKNOWN_LOAD_BALANCING"),
213: ErrorCodeMeta("UNKNOWN_TOTALS_MODE"),
214: ErrorCodeMeta("CANNOT_STATVFS"),
215: ErrorCodeMeta("NOT_AN_AGGREGATE", user_safe=True),
216: ErrorCodeMeta("QUERY_WITH_SAME_ID_IS_ALREADY_RUNNING"),
217: ErrorCodeMeta("CLIENT_HAS_CONNECTED_TO_WRONG_PORT"),
218: ErrorCodeMeta("TABLE_IS_DROPPED"),
219: ErrorCodeMeta("DATABASE_NOT_EMPTY"),
220: ErrorCodeMeta("DUPLICATE_INTERSERVER_IO_ENDPOINT"),
221: ErrorCodeMeta("NO_SUCH_INTERSERVER_IO_ENDPOINT"),
222: ErrorCodeMeta("ADDING_REPLICA_TO_NON_EMPTY_TABLE"),
223: ErrorCodeMeta("UNEXPECTED_AST_STRUCTURE"),
224: ErrorCodeMeta("REPLICA_IS_ALREADY_ACTIVE"),
225: ErrorCodeMeta("NO_ZOOKEEPER"),
226: ErrorCodeMeta("NO_FILE_IN_DATA_PART"),
227: ErrorCodeMeta("UNEXPECTED_FILE_IN_DATA_PART"),
228: ErrorCodeMeta("BAD_SIZE_OF_FILE_IN_DATA_PART"),
229: ErrorCodeMeta("QUERY_IS_TOO_LARGE"),
230: ErrorCodeMeta("NOT_FOUND_EXPECTED_DATA_PART"),
231: ErrorCodeMeta("TOO_MANY_UNEXPECTED_DATA_PARTS"),
232: ErrorCodeMeta("NO_SUCH_DATA_PART"),
233: ErrorCodeMeta("BAD_DATA_PART_NAME"),
234: ErrorCodeMeta("NO_REPLICA_HAS_PART"),
235: ErrorCodeMeta("DUPLICATE_DATA_PART"),
236: ErrorCodeMeta("ABORTED"),
237: ErrorCodeMeta("NO_REPLICA_NAME_GIVEN"),
238: ErrorCodeMeta("FORMAT_VERSION_TOO_OLD"),
239: ErrorCodeMeta("CANNOT_MUNMAP"),
240: ErrorCodeMeta("CANNOT_MREMAP"),
241: ErrorCodeMeta("MEMORY_LIMIT_EXCEEDED"),
242: ErrorCodeMeta("TABLE_IS_READ_ONLY"),
243: ErrorCodeMeta("NOT_ENOUGH_SPACE"),
244: ErrorCodeMeta("UNEXPECTED_ZOOKEEPER_ERROR"),
246: ErrorCodeMeta("CORRUPTED_DATA"),
247: ErrorCodeMeta("INCORRECT_MARK"),
248: ErrorCodeMeta("INVALID_PARTITION_VALUE"),
250: ErrorCodeMeta("NOT_ENOUGH_BLOCK_NUMBERS"),
251: ErrorCodeMeta("NO_SUCH_REPLICA"),
252: ErrorCodeMeta("TOO_MANY_PARTS"),
253: ErrorCodeMeta("REPLICA_IS_ALREADY_EXIST"),
254: ErrorCodeMeta("NO_ACTIVE_REPLICAS"),
255: ErrorCodeMeta("TOO_MANY_RETRIES_TO_FETCH_PARTS"),
256: ErrorCodeMeta("PARTITION_ALREADY_EXISTS"),
257: ErrorCodeMeta("PARTITION_DOESNT_EXIST"),
258: ErrorCodeMeta("UNION_ALL_RESULT_STRUCTURES_MISMATCH"),
260: ErrorCodeMeta("CLIENT_OUTPUT_FORMAT_SPECIFIED"),
261: ErrorCodeMeta("UNKNOWN_BLOCK_INFO_FIELD"),
262: ErrorCodeMeta("BAD_COLLATION"),
263: ErrorCodeMeta("CANNOT_COMPILE_CODE"),
264: ErrorCodeMeta("INCOMPATIBLE_TYPE_OF_JOIN"),
265: ErrorCodeMeta("NO_AVAILABLE_REPLICA"),
266: ErrorCodeMeta("MISMATCH_REPLICAS_DATA_SOURCES"),
267: ErrorCodeMeta("STORAGE_DOESNT_SUPPORT_PARALLEL_REPLICAS"),
268: ErrorCodeMeta("CPUID_ERROR"),
269: ErrorCodeMeta("INFINITE_LOOP"),
270: ErrorCodeMeta("CANNOT_COMPRESS"),
271: ErrorCodeMeta("CANNOT_DECOMPRESS"),
272: ErrorCodeMeta("CANNOT_IO_SUBMIT"),
273: ErrorCodeMeta("CANNOT_IO_GETEVENTS"),
274: ErrorCodeMeta("AIO_READ_ERROR"),
275: ErrorCodeMeta("AIO_WRITE_ERROR"),
277: ErrorCodeMeta("INDEX_NOT_USED"),
279: ErrorCodeMeta("ALL_CONNECTION_TRIES_FAILED"),
280: ErrorCodeMeta("NO_AVAILABLE_DATA"),
281: ErrorCodeMeta("DICTIONARY_IS_EMPTY"),
282: ErrorCodeMeta("INCORRECT_INDEX"),
283: ErrorCodeMeta("UNKNOWN_DISTRIBUTED_PRODUCT_MODE"),
284: ErrorCodeMeta("WRONG_GLOBAL_SUBQUERY"),
285: ErrorCodeMeta("TOO_FEW_LIVE_REPLICAS"),
286: ErrorCodeMeta("UNSATISFIED_QUORUM_FOR_PREVIOUS_WRITE"),
287: ErrorCodeMeta("UNKNOWN_FORMAT_VERSION"),
288: ErrorCodeMeta("DISTRIBUTED_IN_JOIN_SUBQUERY_DENIED"),
289: ErrorCodeMeta("REPLICA_IS_NOT_IN_QUORUM"),
290: ErrorCodeMeta("LIMIT_EXCEEDED"),
291: ErrorCodeMeta("DATABASE_ACCESS_DENIED"),
293: ErrorCodeMeta("MONGODB_CANNOT_AUTHENTICATE"),
294: ErrorCodeMeta("INVALID_BLOCK_EXTRA_INFO"),
295: ErrorCodeMeta("RECEIVED_EMPTY_DATA"),
296: ErrorCodeMeta("NO_REMOTE_SHARD_FOUND"),
297: ErrorCodeMeta("SHARD_HAS_NO_CONNECTIONS"),
298: ErrorCodeMeta("CANNOT_PIPE"),
299: ErrorCodeMeta("CANNOT_FORK"),
300: ErrorCodeMeta("CANNOT_DLSYM"),
301: ErrorCodeMeta("CANNOT_CREATE_CHILD_PROCESS"),
302: ErrorCodeMeta("CHILD_WAS_NOT_EXITED_NORMALLY"),
303: ErrorCodeMeta("CANNOT_SELECT"),
304: ErrorCodeMeta("CANNOT_WAITPID"),
305: ErrorCodeMeta("TABLE_WAS_NOT_DROPPED"),
306: ErrorCodeMeta("TOO_DEEP_RECURSION"),
307: ErrorCodeMeta("TOO_MANY_BYTES"),
308: ErrorCodeMeta("UNEXPECTED_NODE_IN_ZOOKEEPER"),
309: ErrorCodeMeta("FUNCTION_CANNOT_HAVE_PARAMETERS"),
317: ErrorCodeMeta("INVALID_SHARD_WEIGHT"),
318: ErrorCodeMeta("INVALID_CONFIG_PARAMETER"),
319: ErrorCodeMeta("UNKNOWN_STATUS_OF_INSERT"),
321: ErrorCodeMeta("VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE", user_safe=True),
335: ErrorCodeMeta("BARRIER_TIMEOUT"),
336: ErrorCodeMeta("UNKNOWN_DATABASE_ENGINE"),
337: ErrorCodeMeta("DDL_GUARD_IS_ACTIVE"),
341: ErrorCodeMeta("UNFINISHED"),
342: ErrorCodeMeta("METADATA_MISMATCH"),
344: ErrorCodeMeta("SUPPORT_IS_DISABLED"),
345: ErrorCodeMeta("TABLE_DIFFERS_TOO_MUCH"),
346: ErrorCodeMeta("CANNOT_CONVERT_CHARSET"),
347: ErrorCodeMeta("CANNOT_LOAD_CONFIG"),
349: ErrorCodeMeta("CANNOT_INSERT_NULL_IN_ORDINARY_COLUMN"),
350: ErrorCodeMeta("INCOMPATIBLE_SOURCE_TABLES"),
351: ErrorCodeMeta("AMBIGUOUS_TABLE_NAME"),
352: ErrorCodeMeta("AMBIGUOUS_COLUMN_NAME"),
353: ErrorCodeMeta("INDEX_OF_POSITIONAL_ARGUMENT_IS_OUT_OF_RANGE", user_safe=True),
354: ErrorCodeMeta("ZLIB_INFLATE_FAILED"),
355: ErrorCodeMeta("ZLIB_DEFLATE_FAILED"),
356: ErrorCodeMeta("BAD_LAMBDA"),
357: ErrorCodeMeta("RESERVED_IDENTIFIER_NAME"),
358: ErrorCodeMeta("INTO_OUTFILE_NOT_ALLOWED"),
359: ErrorCodeMeta("TABLE_SIZE_EXCEEDS_MAX_DROP_SIZE_LIMIT"),
360: ErrorCodeMeta("CANNOT_CREATE_CHARSET_CONVERTER"),
361: ErrorCodeMeta("SEEK_POSITION_OUT_OF_BOUND"),
362: ErrorCodeMeta("CURRENT_WRITE_BUFFER_IS_EXHAUSTED"),
363: ErrorCodeMeta("CANNOT_CREATE_IO_BUFFER"),
364: ErrorCodeMeta("RECEIVED_ERROR_TOO_MANY_REQUESTS"),
366: ErrorCodeMeta("SIZES_OF_NESTED_COLUMNS_ARE_INCONSISTENT"),
367: ErrorCodeMeta("TOO_MANY_FETCHES"),
369: ErrorCodeMeta("ALL_REPLICAS_ARE_STALE"),
370: ErrorCodeMeta("DATA_TYPE_CANNOT_BE_USED_IN_TABLES"),
371: ErrorCodeMeta("INCONSISTENT_CLUSTER_DEFINITION"),
372: ErrorCodeMeta("SESSION_NOT_FOUND"),
373: ErrorCodeMeta("SESSION_IS_LOCKED"),
374: ErrorCodeMeta("INVALID_SESSION_TIMEOUT"),
375: ErrorCodeMeta("CANNOT_DLOPEN"),
376: ErrorCodeMeta("CANNOT_PARSE_UUID"),
377: ErrorCodeMeta("ILLEGAL_SYNTAX_FOR_DATA_TYPE"),
378: ErrorCodeMeta("DATA_TYPE_CANNOT_HAVE_ARGUMENTS"),
379: ErrorCodeMeta("UNKNOWN_STATUS_OF_DISTRIBUTED_DDL_TASK"),
380: ErrorCodeMeta("CANNOT_KILL"),
381: ErrorCodeMeta("HTTP_LENGTH_REQUIRED"),
382: ErrorCodeMeta("CANNOT_LOAD_CATBOOST_MODEL"),
383: ErrorCodeMeta("CANNOT_APPLY_CATBOOST_MODEL"),
384: ErrorCodeMeta("PART_IS_TEMPORARILY_LOCKED"),
385: ErrorCodeMeta("MULTIPLE_STREAMS_REQUIRED"),
386: ErrorCodeMeta("NO_COMMON_TYPE"),
387: ErrorCodeMeta("DICTIONARY_ALREADY_EXISTS"),
388: ErrorCodeMeta("CANNOT_ASSIGN_OPTIMIZE"),
389: ErrorCodeMeta("INSERT_WAS_DEDUPLICATED"),
390: ErrorCodeMeta("CANNOT_GET_CREATE_TABLE_QUERY"),
391: ErrorCodeMeta("EXTERNAL_LIBRARY_ERROR"),
392: ErrorCodeMeta("QUERY_IS_PROHIBITED"),
393: ErrorCodeMeta("THERE_IS_NO_QUERY"),
394: ErrorCodeMeta("QUERY_WAS_CANCELLED"),
395: ErrorCodeMeta("FUNCTION_THROW_IF_VALUE_IS_NON_ZERO"),
396: ErrorCodeMeta("TOO_MANY_ROWS_OR_BYTES"),
397: ErrorCodeMeta("QUERY_IS_NOT_SUPPORTED_IN_MATERIALIZED_VIEW"),
398: ErrorCodeMeta("UNKNOWN_MUTATION_COMMAND"),
399: ErrorCodeMeta("FORMAT_IS_NOT_SUITABLE_FOR_OUTPUT"),
400: ErrorCodeMeta("CANNOT_STAT"),
401: ErrorCodeMeta("FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME"),
402: ErrorCodeMeta("CANNOT_IOSETUP"),
403: ErrorCodeMeta("INVALID_JOIN_ON_EXPRESSION"),
404: ErrorCodeMeta("BAD_ODBC_CONNECTION_STRING"),
405: ErrorCodeMeta("PARTITION_SIZE_EXCEEDS_MAX_DROP_SIZE_LIMIT"),
406: ErrorCodeMeta("TOP_AND_LIMIT_TOGETHER"),
407: ErrorCodeMeta("DECIMAL_OVERFLOW"),
408: ErrorCodeMeta("BAD_REQUEST_PARAMETER"),
409: ErrorCodeMeta("EXTERNAL_EXECUTABLE_NOT_FOUND"),
410: ErrorCodeMeta("EXTERNAL_SERVER_IS_NOT_RESPONDING"),
411: ErrorCodeMeta("PTHREAD_ERROR"),
412: ErrorCodeMeta("NETLINK_ERROR"),
413: ErrorCodeMeta("CANNOT_SET_SIGNAL_HANDLER"),
415: ErrorCodeMeta("ALL_REPLICAS_LOST"),
416: ErrorCodeMeta("REPLICA_STATUS_CHANGED"),
417: ErrorCodeMeta("EXPECTED_ALL_OR_ANY"),
418: ErrorCodeMeta("UNKNOWN_JOIN"),
419: ErrorCodeMeta("MULTIPLE_ASSIGNMENTS_TO_COLUMN"),
420: ErrorCodeMeta("CANNOT_UPDATE_COLUMN"),
421: ErrorCodeMeta("CANNOT_ADD_DIFFERENT_AGGREGATE_STATES"),
422: ErrorCodeMeta("UNSUPPORTED_URI_SCHEME"),
423: ErrorCodeMeta("CANNOT_GETTIMEOFDAY"),
424: ErrorCodeMeta("CANNOT_LINK"),
425: ErrorCodeMeta("SYSTEM_ERROR"),
427: ErrorCodeMeta("CANNOT_COMPILE_REGEXP"),
428: ErrorCodeMeta("UNKNOWN_LOG_LEVEL"),
429: ErrorCodeMeta("FAILED_TO_GETPWUID"),
430: ErrorCodeMeta("MISMATCHING_USERS_FOR_PROCESS_AND_DATA"),
431: ErrorCodeMeta("ILLEGAL_SYNTAX_FOR_CODEC_TYPE"),
432: ErrorCodeMeta("UNKNOWN_CODEC"),
433: ErrorCodeMeta("ILLEGAL_CODEC_PARAMETER"),
434: ErrorCodeMeta("CANNOT_PARSE_PROTOBUF_SCHEMA"),
435: ErrorCodeMeta("NO_COLUMN_SERIALIZED_TO_REQUIRED_PROTOBUF_FIELD"),
436: ErrorCodeMeta("PROTOBUF_BAD_CAST"),
437: ErrorCodeMeta("PROTOBUF_FIELD_NOT_REPEATED"),
438: ErrorCodeMeta("DATA_TYPE_CANNOT_BE_PROMOTED"),
439: ErrorCodeMeta("CANNOT_SCHEDULE_TASK"),
440: ErrorCodeMeta("INVALID_LIMIT_EXPRESSION"),
441: ErrorCodeMeta("CANNOT_PARSE_DOMAIN_VALUE_FROM_STRING"),
442: ErrorCodeMeta("BAD_DATABASE_FOR_TEMPORARY_TABLE"),
443: ErrorCodeMeta("NO_COLUMNS_SERIALIZED_TO_PROTOBUF_FIELDS"),
444: ErrorCodeMeta("UNKNOWN_PROTOBUF_FORMAT"),
445: ErrorCodeMeta("CANNOT_MPROTECT"),
446: ErrorCodeMeta("FUNCTION_NOT_ALLOWED"),
447: ErrorCodeMeta("HYPERSCAN_CANNOT_SCAN_TEXT"),
448: ErrorCodeMeta("BROTLI_READ_FAILED"),
449: ErrorCodeMeta("BROTLI_WRITE_FAILED"),
450: ErrorCodeMeta("BAD_TTL_EXPRESSION"),
451: ErrorCodeMeta("BAD_TTL_FILE"),
452: ErrorCodeMeta("SETTING_CONSTRAINT_VIOLATION"),
453: ErrorCodeMeta("MYSQL_CLIENT_INSUFFICIENT_CAPABILITIES"),
454: ErrorCodeMeta("OPENSSL_ERROR"),
455: ErrorCodeMeta("SUSPICIOUS_TYPE_FOR_LOW_CARDINALITY"),
456: ErrorCodeMeta("UNKNOWN_QUERY_PARAMETER"),
457: ErrorCodeMeta("BAD_QUERY_PARAMETER"),
458: ErrorCodeMeta("CANNOT_UNLINK"),
459: ErrorCodeMeta("CANNOT_SET_THREAD_PRIORITY"),
460: ErrorCodeMeta("CANNOT_CREATE_TIMER"),
461: ErrorCodeMeta("CANNOT_SET_TIMER_PERIOD"),
462: ErrorCodeMeta("CANNOT_DELETE_TIMER"),
463: ErrorCodeMeta("CANNOT_FCNTL"),
464: ErrorCodeMeta("CANNOT_PARSE_ELF"),
465: ErrorCodeMeta("CANNOT_PARSE_DWARF"),
466: ErrorCodeMeta("INSECURE_PATH"),
467: ErrorCodeMeta("CANNOT_PARSE_BOOL"),
468: ErrorCodeMeta("CANNOT_PTHREAD_ATTR"),
469: ErrorCodeMeta("VIOLATED_CONSTRAINT"),
470: ErrorCodeMeta("QUERY_IS_NOT_SUPPORTED_IN_LIVE_VIEW"),
471: ErrorCodeMeta("INVALID_SETTING_VALUE"),
472: ErrorCodeMeta("READONLY_SETTING"),
473: ErrorCodeMeta("DEADLOCK_AVOIDED"),
474: ErrorCodeMeta("INVALID_TEMPLATE_FORMAT"),
475: ErrorCodeMeta("INVALID_WITH_FILL_EXPRESSION"),
476: ErrorCodeMeta("WITH_TIES_WITHOUT_ORDER_BY", user_safe=True),
477: ErrorCodeMeta("INVALID_USAGE_OF_INPUT"),
478: ErrorCodeMeta("UNKNOWN_POLICY"),
479: ErrorCodeMeta("UNKNOWN_DISK"),
480: ErrorCodeMeta("UNKNOWN_PROTOCOL"),
481: ErrorCodeMeta("PATH_ACCESS_DENIED"),
482: ErrorCodeMeta("DICTIONARY_ACCESS_DENIED"),
483: ErrorCodeMeta("TOO_MANY_REDIRECTS"),
484: ErrorCodeMeta("INTERNAL_REDIS_ERROR"),
485: ErrorCodeMeta("SCALAR_ALREADY_EXISTS"),
487: ErrorCodeMeta("CANNOT_GET_CREATE_DICTIONARY_QUERY"),
488: ErrorCodeMeta("UNKNOWN_DICTIONARY"),
489: ErrorCodeMeta("INCORRECT_DICTIONARY_DEFINITION"),
490: ErrorCodeMeta("CANNOT_FORMAT_DATETIME"),
491: ErrorCodeMeta("UNACCEPTABLE_URL"),
492: ErrorCodeMeta("ACCESS_ENTITY_NOT_FOUND"),
493: ErrorCodeMeta("ACCESS_ENTITY_ALREADY_EXISTS"),
494: ErrorCodeMeta("ACCESS_ENTITY_FOUND_DUPLICATES"),
495: ErrorCodeMeta("ACCESS_STORAGE_READONLY"),
496: ErrorCodeMeta("QUOTA_REQUIRES_CLIENT_KEY"),
497: ErrorCodeMeta("ACCESS_DENIED"),
498: ErrorCodeMeta("LIMIT_BY_WITH_TIES_IS_NOT_SUPPORTED"),
499: ErrorCodeMeta("S3_ERROR"),
500: ErrorCodeMeta("AZURE_BLOB_STORAGE_ERROR"),
501: ErrorCodeMeta("CANNOT_CREATE_DATABASE"),
502: ErrorCodeMeta("CANNOT_SIGQUEUE"),
503: ErrorCodeMeta("AGGREGATE_FUNCTION_THROW"),
504: ErrorCodeMeta("FILE_ALREADY_EXISTS"),
505: ErrorCodeMeta("CANNOT_DELETE_DIRECTORY"),
506: ErrorCodeMeta("UNEXPECTED_ERROR_CODE"),
507: ErrorCodeMeta("UNABLE_TO_SKIP_UNUSED_SHARDS"),
508: ErrorCodeMeta("UNKNOWN_ACCESS_TYPE"),
509: ErrorCodeMeta("INVALID_GRANT"),
510: ErrorCodeMeta("CACHE_DICTIONARY_UPDATE_FAIL"),
511: ErrorCodeMeta("UNKNOWN_ROLE"),
512: ErrorCodeMeta("SET_NON_GRANTED_ROLE"),
513: ErrorCodeMeta("UNKNOWN_PART_TYPE"),
514: ErrorCodeMeta("ACCESS_STORAGE_FOR_INSERTION_NOT_FOUND"),
515: ErrorCodeMeta("INCORRECT_ACCESS_ENTITY_DEFINITION"),
516: ErrorCodeMeta("AUTHENTICATION_FAILED"),
517: ErrorCodeMeta("CANNOT_ASSIGN_ALTER"),
518: ErrorCodeMeta("CANNOT_COMMIT_OFFSET"),
519: ErrorCodeMeta("NO_REMOTE_SHARD_AVAILABLE"),
520: ErrorCodeMeta("CANNOT_DETACH_DICTIONARY_AS_TABLE"),
521: ErrorCodeMeta("ATOMIC_RENAME_FAIL"),
523: ErrorCodeMeta("UNKNOWN_ROW_POLICY"),
524: ErrorCodeMeta("ALTER_OF_COLUMN_IS_FORBIDDEN"),
525: ErrorCodeMeta("INCORRECT_DISK_INDEX"),
527: ErrorCodeMeta("NO_SUITABLE_FUNCTION_IMPLEMENTATION"),
528: ErrorCodeMeta("CASSANDRA_INTERNAL_ERROR"),
529: ErrorCodeMeta("NOT_A_LEADER"),
530: ErrorCodeMeta("CANNOT_CONNECT_RABBITMQ"),
531: ErrorCodeMeta("CANNOT_FSTAT"),
532: ErrorCodeMeta("LDAP_ERROR"),
533: ErrorCodeMeta("INCONSISTENT_RESERVATIONS"),
534: ErrorCodeMeta("NO_RESERVATIONS_PROVIDED"),
535: ErrorCodeMeta("UNKNOWN_RAID_TYPE"),
536: ErrorCodeMeta("CANNOT_RESTORE_FROM_FIELD_DUMP"),
537: ErrorCodeMeta("ILLEGAL_MYSQL_VARIABLE"),
538: ErrorCodeMeta("MYSQL_SYNTAX_ERROR"),
539: ErrorCodeMeta("CANNOT_BIND_RABBITMQ_EXCHANGE"),
540: ErrorCodeMeta("CANNOT_DECLARE_RABBITMQ_EXCHANGE"),
541: ErrorCodeMeta("CANNOT_CREATE_RABBITMQ_QUEUE_BINDING"),
542: ErrorCodeMeta("CANNOT_REMOVE_RABBITMQ_EXCHANGE"),
543: ErrorCodeMeta("UNKNOWN_MYSQL_DATATYPES_SUPPORT_LEVEL"),
544: ErrorCodeMeta("ROW_AND_ROWS_TOGETHER"),
545: ErrorCodeMeta("FIRST_AND_NEXT_TOGETHER"),
546: ErrorCodeMeta("NO_ROW_DELIMITER"),
547: ErrorCodeMeta("INVALID_RAID_TYPE"),
548: ErrorCodeMeta("UNKNOWN_VOLUME"),
549: ErrorCodeMeta("DATA_TYPE_CANNOT_BE_USED_IN_KEY"),
550: ErrorCodeMeta("CONDITIONAL_TREE_PARENT_NOT_FOUND"),
551: ErrorCodeMeta("ILLEGAL_PROJECTION_MANIPULATOR"),
552: ErrorCodeMeta("UNRECOGNIZED_ARGUMENTS"),
553: ErrorCodeMeta("LZMA_STREAM_ENCODER_FAILED"),
554: ErrorCodeMeta("LZMA_STREAM_DECODER_FAILED"),
555: ErrorCodeMeta("ROCKSDB_ERROR"),
556: ErrorCodeMeta("SYNC_MYSQL_USER_ACCESS_ERROR"),
557: ErrorCodeMeta("UNKNOWN_UNION"),
558: ErrorCodeMeta("EXPECTED_ALL_OR_DISTINCT"),
559: ErrorCodeMeta("INVALID_GRPC_QUERY_INFO"),
560: ErrorCodeMeta("ZSTD_ENCODER_FAILED"),
561: ErrorCodeMeta("ZSTD_DECODER_FAILED"),
562: ErrorCodeMeta("TLD_LIST_NOT_FOUND"),
563: ErrorCodeMeta("CANNOT_READ_MAP_FROM_TEXT"),
564: ErrorCodeMeta("INTERSERVER_SCHEME_DOESNT_MATCH"),
565: ErrorCodeMeta("TOO_MANY_PARTITIONS"),
566: ErrorCodeMeta("CANNOT_RMDIR"),
567: ErrorCodeMeta("DUPLICATED_PART_UUIDS"),
568: ErrorCodeMeta("RAFT_ERROR"),
569: ErrorCodeMeta("MULTIPLE_COLUMNS_SERIALIZED_TO_SAME_PROTOBUF_FIELD"),
570: ErrorCodeMeta("DATA_TYPE_INCOMPATIBLE_WITH_PROTOBUF_FIELD"),
571: ErrorCodeMeta("DATABASE_REPLICATION_FAILED"),
572: ErrorCodeMeta("TOO_MANY_QUERY_PLAN_OPTIMIZATIONS"),
573: ErrorCodeMeta("EPOLL_ERROR"),
574: ErrorCodeMeta("DISTRIBUTED_TOO_MANY_PENDING_BYTES"),
575: ErrorCodeMeta("UNKNOWN_SNAPSHOT"),
576: ErrorCodeMeta("KERBEROS_ERROR"),
577: ErrorCodeMeta("INVALID_SHARD_ID"),
578: ErrorCodeMeta("INVALID_FORMAT_INSERT_QUERY_WITH_DATA"),
579: ErrorCodeMeta("INCORRECT_PART_TYPE"),
580: ErrorCodeMeta("CANNOT_SET_ROUNDING_MODE"),
581: ErrorCodeMeta("TOO_LARGE_DISTRIBUTED_DEPTH"),
582: ErrorCodeMeta("NO_SUCH_PROJECTION_IN_TABLE"),
583: ErrorCodeMeta("ILLEGAL_PROJECTION"),
584: ErrorCodeMeta("PROJECTION_NOT_USED"),
585: ErrorCodeMeta("CANNOT_PARSE_YAML"),
586: ErrorCodeMeta("CANNOT_CREATE_FILE"),
587: ErrorCodeMeta("CONCURRENT_ACCESS_NOT_SUPPORTED"),
588: ErrorCodeMeta("DISTRIBUTED_BROKEN_BATCH_INFO"),
589: ErrorCodeMeta("DISTRIBUTED_BROKEN_BATCH_FILES"),
590: ErrorCodeMeta("CANNOT_SYSCONF"),
591: ErrorCodeMeta("SQLITE_ENGINE_ERROR"),
592: ErrorCodeMeta("DATA_ENCRYPTION_ERROR"),
593: ErrorCodeMeta("ZERO_COPY_REPLICATION_ERROR"),
594: ErrorCodeMeta("BZIP2_STREAM_DECODER_FAILED"),
595: ErrorCodeMeta("BZIP2_STREAM_ENCODER_FAILED"),
596: ErrorCodeMeta("INTERSECT_OR_EXCEPT_RESULT_STRUCTURES_MISMATCH"),
597: ErrorCodeMeta("NO_SUCH_ERROR_CODE"),
598: ErrorCodeMeta("BACKUP_ALREADY_EXISTS"),
599: ErrorCodeMeta("BACKUP_NOT_FOUND"),
600: ErrorCodeMeta("BACKUP_VERSION_NOT_SUPPORTED"),
601: ErrorCodeMeta("BACKUP_DAMAGED"),
602: ErrorCodeMeta("NO_BASE_BACKUP"),
603: ErrorCodeMeta("WRONG_BASE_BACKUP"),
604: ErrorCodeMeta("BACKUP_ENTRY_ALREADY_EXISTS"),
605: ErrorCodeMeta("BACKUP_ENTRY_NOT_FOUND"),
606: ErrorCodeMeta("BACKUP_IS_EMPTY"),
607: ErrorCodeMeta("BACKUP_ELEMENT_DUPLICATE"),
608: ErrorCodeMeta("CANNOT_RESTORE_TABLE"),
609: ErrorCodeMeta("FUNCTION_ALREADY_EXISTS"),
610: ErrorCodeMeta("CANNOT_DROP_FUNCTION"),
611: ErrorCodeMeta("CANNOT_CREATE_RECURSIVE_FUNCTION"),
612: ErrorCodeMeta("OBJECT_ALREADY_STORED_ON_DISK"),
613: ErrorCodeMeta("OBJECT_WAS_NOT_STORED_ON_DISK"),
614: ErrorCodeMeta("POSTGRESQL_CONNECTION_FAILURE"),
615: ErrorCodeMeta("CANNOT_ADVISE"),
616: ErrorCodeMeta("UNKNOWN_READ_METHOD"),
617: ErrorCodeMeta("LZ4_ENCODER_FAILED"),
618: ErrorCodeMeta("LZ4_DECODER_FAILED"),
619: ErrorCodeMeta("POSTGRESQL_REPLICATION_INTERNAL_ERROR"),
620: ErrorCodeMeta("QUERY_NOT_ALLOWED"),
621: ErrorCodeMeta("CANNOT_NORMALIZE_STRING"),
622: ErrorCodeMeta("CANNOT_PARSE_CAPN_PROTO_SCHEMA"),
623: ErrorCodeMeta("CAPN_PROTO_BAD_CAST"),
624: ErrorCodeMeta("BAD_FILE_TYPE"),
625: ErrorCodeMeta("IO_SETUP_ERROR"),
626: ErrorCodeMeta("CANNOT_SKIP_UNKNOWN_FIELD"),
627: ErrorCodeMeta("BACKUP_ENGINE_NOT_FOUND"),
628: ErrorCodeMeta("OFFSET_FETCH_WITHOUT_ORDER_BY"),
629: ErrorCodeMeta("HTTP_RANGE_NOT_SATISFIABLE"),
630: ErrorCodeMeta("HAVE_DEPENDENT_OBJECTS"),
631: ErrorCodeMeta("UNKNOWN_FILE_SIZE"),
632: ErrorCodeMeta("UNEXPECTED_DATA_AFTER_PARSED_VALUE"),
633: ErrorCodeMeta("QUERY_IS_NOT_SUPPORTED_IN_WINDOW_VIEW"),
634: ErrorCodeMeta("MONGODB_ERROR"),
635: ErrorCodeMeta("CANNOT_POLL"),
636: ErrorCodeMeta("CANNOT_EXTRACT_TABLE_STRUCTURE"),
637: ErrorCodeMeta("INVALID_TABLE_OVERRIDE"),
638: ErrorCodeMeta("SNAPPY_UNCOMPRESS_FAILED"),
639: ErrorCodeMeta("SNAPPY_COMPRESS_FAILED"),
640: ErrorCodeMeta("NO_HIVEMETASTORE"),
641: ErrorCodeMeta("CANNOT_APPEND_TO_FILE"),
642: ErrorCodeMeta("CANNOT_PACK_ARCHIVE"),
643: ErrorCodeMeta("CANNOT_UNPACK_ARCHIVE"),
644: ErrorCodeMeta("REMOTE_FS_OBJECT_CACHE_ERROR"),
645: ErrorCodeMeta("NUMBER_OF_DIMENSIONS_MISMATHED"),
999: ErrorCodeMeta("KEEPER_EXCEPTION"),
1000: ErrorCodeMeta("POCO_EXCEPTION"),
1001: ErrorCodeMeta("STD_EXCEPTION"),
1002: CLICKHOUSE_UNKNOWN_EXCEPTION,
}
|
orm-bindings | channel_metadata | import os
from binascii import unhexlify
from datetime import datetime
from lz4.frame import LZ4FrameCompressor
from pony import orm
from pony.orm import db_session, raw_sql, select
from tribler.core.components.libtorrent.utils.libtorrent_helper import libtorrent as lt
from tribler.core.components.metadata_store.db.orm_bindings.channel_node import (
CHANNEL_DESCRIPTION_FLAG,
CHANNEL_THUMBNAIL_FLAG,
COMMITTED,
LEGACY_ENTRY,
NEW,
PUBLIC_KEY_LEN,
TODELETE,
UPDATED,
)
from tribler.core.components.metadata_store.db.orm_bindings.discrete_clock import clock
from tribler.core.components.metadata_store.db.serialization import (
CHANNEL_TORRENT,
ChannelMetadataPayload,
HealthItemsPayload,
)
from tribler.core.utilities.path_util import Path
from tribler.core.utilities.simpledefs import CHANNEL_STATE
from tribler.core.utilities.unicode import hexlify
from tribler.core.utilities.utilities import random_infohash
CHANNEL_DIR_NAME_PK_LENGTH = 32 # Its not 40 so it could be distinguished from infohash
CHANNEL_DIR_NAME_ID_LENGTH = 16 # Zero-padded long int in hex form
CHANNEL_DIR_NAME_LENGTH = CHANNEL_DIR_NAME_PK_LENGTH + CHANNEL_DIR_NAME_ID_LENGTH
BLOB_EXTENSION = ".mdblob"
LZ4_END_MARK_SIZE = 4 # in bytes, from original specification. We don't use CRC
HEALTH_ITEM_HEADER_SIZE = 4 # in bytes, len of varlenI header
LZ4_EMPTY_ARCHIVE = unhexlify("04224d184040c000000000")
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i : i + n]
def create_torrent_from_dir(directory, torrent_filename):
fs = lt.file_storage()
lt.add_files(fs, str(directory))
t = lt.create_torrent(fs)
# t = create_torrent(fs, flags=17) # piece alignment
t.set_priv(False)
lt.set_piece_hashes(t, str(directory.parent))
torrent = t.generate()
with open(torrent_filename, "wb") as f:
f.write(lt.bencode(torrent))
infohash = lt.torrent_info(torrent).info_hash().to_bytes()
return torrent, infohash
def get_mdblob_sequence_number(filename):
filepath = Path(filename)
if filepath.suffixes == [BLOB_EXTENSION]:
return int(filename.stem)
if filepath.suffixes == [BLOB_EXTENSION, ".lz4"]:
return int(Path(filepath.stem).stem)
return None
def entries_to_chunk(metadata_list, chunk_size, start_index=0, include_health=False):
"""
:param metadata_list: the list of metadata to process.
:param chunk_size: the desired chunk size limit, in bytes.
:param start_index: the index of the element of metadata_list from which the processing should start.
:param include_health: if True, put metadata health information into the chunk.
:return: (chunk, last_entry_index) tuple, where chunk is the resulting chunk in string form and
last_entry_index is the index of the element of the input list that was put into the chunk the last.
"""
# Try to fit as many blobs into this chunk as permitted by chunk_size and
# calculate their ends' offsets in the blob
if start_index >= len(metadata_list):
raise Exception(
"Could not serialize chunk: incorrect start_index",
metadata_list,
chunk_size,
start_index,
)
compressor = MetadataCompressor(chunk_size, include_health)
index = start_index
while index < len(metadata_list):
metadata = metadata_list[index]
was_able_to_add = compressor.put(metadata)
if not was_able_to_add:
break
index += 1
return compressor.close(), index
class MetadataCompressor:
"""
This class provides methods to put serialized data of one or more metadata entries into a single binary chunk.
The data is added incrementally until it stops fitting into the designated chunk size. The first entry is added
regardless of violating the chunk size limit.
The chunk format is:
<LZ4-compressed sequence of serialized metadata entries>
[<optional HealthItemsPayload>]
The optional health information is serialized separately, as it was not originally included in the serialized
metadata format. If present, it contains the same number of items as the serialized list of metadata
entries. The N-th health info item in the health block corresponds to the N-th metadata entry.
For the details of the health info format see the documentation: doc/metadata_store/serialization_format.rst
While it is possible to put the health info items into the second LZ4-compressed frame, it is more efficient to
serialize them without any compression. The reason for this is that a typical health info item has a 1-byte
length (about 17 bytes if a torrent has actual health information), and the number of items is few for a single
chunk (usually less then 10 items). If we use LZ4 compressor, we want to use it incrementally in order to detect
when items stop fitting into a chunk. LZ4 algorithm cannot compress such small items efficiently in an incremental
fashion, and the resulting "compressed" size can be significantly bigger than the original data size.
"""
def __init__(self, chunk_size: int, include_health: bool = False):
"""
:param chunk_size: the desired chunk size limit, in bytes.
:param include_health: if True, put metadata health information into the chunk.
"""
self.chunk_size = chunk_size
self.include_health = include_health
self.compressor = LZ4FrameCompressor(auto_flush=True)
# The next line is not necessary, added just to be safe
# in case of possible future changes of LZ4FrameCompressor
assert self.compressor.__enter__() is self.compressor
metadata_header: bytes = self.compressor.begin()
self.count = 0
self.size = len(metadata_header) + LZ4_END_MARK_SIZE
self.metadata_buffer = [metadata_header]
if include_health:
self.health_buffer = []
self.size += HEALTH_ITEM_HEADER_SIZE
else:
self.health_buffer = None
self.closed = False
def put(self, metadata) -> bool:
"""
Tries to add a metadata entry to chunk. The first entry is always added successfully. Then next entries are
added only if it possible to fit data into the chunk.
:param metadata: a metadata entry to process.
:return: False if it was not possible to fit data into the chunk
"""
if self.closed:
raise TypeError("Compressor is already closed")
metadata_bytes = (
metadata.serialized_delete()
if metadata.status == TODELETE
else metadata.serialized()
)
compressed_metadata_bytes = self.compressor.compress(metadata_bytes)
new_size = self.size + len(compressed_metadata_bytes)
health_bytes = b"" # To satisfy linter
if self.include_health:
health_bytes = metadata.serialized_health()
new_size += len(health_bytes)
if new_size > self.chunk_size and self.count > 0:
# The first entry is always added even if the resulted size exceeds the chunk size.
# This lets higher levels to decide what to do in this case, e.g. send it through EVA protocol.
return False
self.count += 1
self.size = new_size
self.metadata_buffer.append(compressed_metadata_bytes)
if self.include_health:
self.health_buffer.append(health_bytes)
return True
def close(self) -> bytes:
"""
Closes compressor object and returns packed data.
:return: serialized binary data
"""
if self.closed:
raise TypeError("Compressor is already closed")
self.closed = True
end_mark = self.compressor.flush()
self.metadata_buffer.append(end_mark)
result = b"".join(self.metadata_buffer)
# The next lines aren't necessary, added just to be safe
# in case of possible future changes of LZ4FrameCompressor
self.compressor.__exit__(None, None, None)
if self.include_health:
result += HealthItemsPayload(b"".join(self.health_buffer)).serialize()
return result
def define_binding(db): # pylint: disable=R0915
class ChannelMetadata(db.TorrentMetadata, db.CollectionNode):
"""
This ORM binding represents Channel entries in the GigaChannel system. Each channel is a Collection that
additionally has Torrent properties, such as infohash, etc. The torrent properties are used to associate
a torrent that holds the contents of the channel dumped on the disk in the serialized form.
Methods for committing channels into the torrent form are implemented in this class.
"""
_discriminator_ = CHANNEL_TORRENT
# Serializable
start_timestamp = orm.Optional(int, size=64, default=0)
# Local
subscribed = orm.Optional(bool, default=False)
share = orm.Optional(bool, default=False)
votes = orm.Optional(float, default=0.0)
individual_votes = orm.Set("ChannelVote", reverse="channel")
local_version = orm.Optional(int, size=64, default=0)
votes_scaling = 1.0
# Special class-level properties
_payload_class = ChannelMetadataPayload
_channels_dir = None
_category_filter = None
_CHUNK_SIZE_LIMIT = (
1 * 1024 * 1024
) # We use 1MB chunks as a workaround for Python's lack of string pointers
payload_arguments = _payload_class.__init__.__code__.co_varnames[
: _payload_class.__init__.__code__.co_argcount
][1:]
# As channel metadata depends on the public key, we can't include the infohash in nonpersonal_attributes
nonpersonal_attributes = set(db.CollectionNode.nonpersonal_attributes)
infohash_to_channel_name_cache = {}
@classmethod
@db_session
def get_my_channels(cls):
return ChannelMetadata.select(
lambda g: g.origin_id == 0
and g.public_key == cls._my_key.pub().key_to_bin()[10:]
)
@classmethod
@db_session
def create_channel(cls, title, description="", origin_id=0):
"""
Create a channel and sign it with a given key.
:param title: The title of the channel
:param description: The description of the channel
:param origin_id: id_ of the parent channel
:return: The channel metadata
"""
my_channel = cls(
origin_id=origin_id,
public_key=cls._my_key.pub().key_to_bin()[10:],
title=title,
tags=description,
subscribed=True,
share=True,
status=NEW,
infohash=random_infohash(),
)
# random infohash is necessary to avoid triggering DB uniqueness constraints
my_channel.sign()
return my_channel
@db_session
def consolidate_channel_torrent(self):
"""
Delete the channel dir contents and create it anew.
Use it to consolidate fragmented channel torrent directories.
:param key: The public/private key, used to sign the data
"""
# Remark: there should be a way to optimize this stuff with SQL and better tree traversal algorithms
# Cleanup entries marked for deletion
db.CollectionNode.collapse_deleted_subtrees()
# Note: It should be possible to stop alling get_contents_to_commit here
commit_queue = self.get_contents_to_commit()
for entry in commit_queue:
if entry.status == TODELETE:
entry.delete()
folder = Path(self._channels_dir) / self.dirname
# We check if we need to re-create the channel dir in case it was deleted for some reason
if not folder.is_dir():
os.makedirs(folder)
for filename in os.listdir(folder):
file_path = folder / filename
# We only remove mdblobs and leave the rest as it is
if filename.endswith(BLOB_EXTENSION) or filename.endswith(
BLOB_EXTENSION + ".lz4"
):
os.unlink(Path.fix_win_long_file(file_path))
# Channel should get a new starting timestamp and its contents should get higher timestamps
start_timestamp = clock.tick()
def update_timestamps_recursive(node):
if issubclass(type(node), db.CollectionNode):
for child in node.contents:
update_timestamps_recursive(child)
if node.status in [COMMITTED, UPDATED, NEW]:
node.status = UPDATED
node.timestamp = clock.tick()
node.sign()
update_timestamps_recursive(self)
return self.commit_channel_torrent(new_start_timestamp=start_timestamp)
def update_channel_torrent(self, metadata_list):
"""
Channel torrents are append-only to support seeding the old versions
from the same dir and avoid updating already downloaded blobs.
:param metadata_list: The list of metadata entries to add to the torrent dir.
ACHTUNG: TODELETE entries _MUST_ be sorted to the end of the list to prevent channel corruption!
:return The newly create channel torrent infohash, final timestamp for the channel and torrent date
"""
# As a workaround for delete entries not having a timestamp in the DB, delete entries should
# be placed after create/modify entries:
# | create/modify entries | delete entries | <- final timestamp
# Create dir for the metadata files
channel_dir = Path(self._channels_dir / self.dirname).absolute()
if not channel_dir.is_dir():
os.makedirs(Path.fix_win_long_file(channel_dir))
existing_contents = sorted(channel_dir.iterdir())
last_existing_blob_number = (
get_mdblob_sequence_number(existing_contents[-1])
if existing_contents
else None
)
index = 0
while index < len(metadata_list):
# Squash several serialized and signed metadata entries into a single file
data, index = entries_to_chunk(
metadata_list, self._CHUNK_SIZE_LIMIT, start_index=index
)
# Blobs ending with TODELETE entries increase the final timestamp as a workaround for delete commands
# possessing no timestamp.
if metadata_list[index - 1].status == TODELETE:
blob_timestamp = clock.tick()
else:
blob_timestamp = metadata_list[index - 1].timestamp
# The final file in the sequence should get a timestamp that is higher than the timestamp of
# the last channel contents entry. This final timestamp then should be returned to the calling function
# to be assigned to the corresponding channel entry.
# Otherwise, the local channel version will never become equal to its timestamp.
if index >= len(metadata_list):
blob_timestamp = clock.tick()
# Check that the mdblob we're going to create has a greater timestamp than the existing ones
assert last_existing_blob_number is None or (
blob_timestamp > last_existing_blob_number
)
blob_filename = Path(
channel_dir, str(blob_timestamp).zfill(12) + BLOB_EXTENSION + ".lz4"
)
assert (
not blob_filename.exists()
) # Never ever write over existing files.
blob_filename.write_bytes(data)
last_existing_blob_number = blob_timestamp
with db_session:
thumb_exists = db.ChannelThumbnail.exists(
lambda g: g.public_key == self.public_key
and g.origin_id == self.id_
and g.status != TODELETE
)
descr_exists = db.ChannelDescription.exists(
lambda g: g.public_key == self.public_key
and g.origin_id == self.id_
and g.status != TODELETE
)
flags = CHANNEL_THUMBNAIL_FLAG * (
int(thumb_exists)
) + CHANNEL_DESCRIPTION_FLAG * (int(descr_exists))
# Note: the timestamp can end up messed in case of an error
# Make torrent out of dir with metadata files
torrent, infohash = create_torrent_from_dir(
channel_dir, self._channels_dir / (self.dirname + ".torrent")
)
torrent_date = datetime.utcfromtimestamp(torrent[b"creation date"])
return {
"infohash": infohash,
"timestamp": last_existing_blob_number,
"torrent_date": torrent_date,
"reserved_flags": flags,
}, torrent
def commit_channel_torrent(self, new_start_timestamp=None, commit_list=None):
"""
Collect new/uncommitted and marked for deletion metadata entries, commit them to a channel torrent and
remove the obsolete entries if the commit succeeds.
:param new_start_timestamp: change the start_timestamp of the committed channel entry to this value
:param commit_list: the list of ORM objects to commit into this channel torrent
:return The new infohash, should be used to update the downloads
"""
md_list = commit_list or self.get_contents_to_commit()
if not md_list:
return None
try:
update_dict, torrent = self.update_channel_torrent(md_list)
except OSError:
self._logger.error(
"Error during channel torrent commit, not going to garbage collect the channel. Channel %s",
hexlify(self.public_key),
)
return None
if new_start_timestamp:
update_dict["start_timestamp"] = new_start_timestamp
# Update channel infohash, etc
for attr, val in update_dict.items():
setattr(self, attr, val)
self.local_version = self.timestamp
self.sign()
# Change the statuses of committed entries and clean up obsolete TODELETE entries
for g in md_list:
if g.status in [NEW, UPDATED]:
g.status = COMMITTED
elif g.status == TODELETE:
g.delete()
# Write the channel mdblob to disk
self.status = COMMITTED # pylint: disable=W0201
self.to_file(self._channels_dir / (self.dirname + BLOB_EXTENSION))
self._logger.info(
"Channel %s committed with %i new entries. New version is %i",
hexlify(self.public_key),
len(md_list),
update_dict["timestamp"],
)
return torrent
@property
def dirname(self):
# Have to limit this to support Windows file path length limit
return (
hexlify(self.public_key)[:CHANNEL_DIR_NAME_PK_LENGTH]
+ f"{self.id_:0>16x}"
)
@classmethod
@db_session
def get_channels_by_title(cls, title):
return cls.select(lambda g: g.title == title)
@classmethod
@db_session
def get_channel_with_infohash(cls, infohash):
return cls.get(infohash=infohash)
@classmethod
@db_session
def get_channel_with_dirname(cls, dirname):
# Parse the public key part of the dirname
pk_part = dirname[:-CHANNEL_DIR_NAME_ID_LENGTH]
def extend_to_bitmask(txt):
return txt + "0" * (PUBLIC_KEY_LEN * 2 - CHANNEL_DIR_NAME_LENGTH)
pk_binmask_start = "x'" + extend_to_bitmask(pk_part) + "'"
pk_plus_one = f"{int(pk_part, 16) + 1:X}".zfill(len(pk_part))
pk_binmask_end = "x'" + extend_to_bitmask(pk_plus_one) + "'"
# It is impossible to use LIKE queries on BLOBs, so we have to use comparisons
sql = (
"g.public_key >= "
+ pk_binmask_start
+ " AND g.public_key < "
+ pk_binmask_end
)
# Parse the id part of the dirname
id_part = dirname[-CHANNEL_DIR_NAME_ID_LENGTH:]
id_ = int(id_part, 16)
return orm.select(g for g in cls if g.id_ == id_ and raw_sql(sql)).first()
@classmethod
@db_session
def get_updated_channels(cls):
return select(
g
for g in cls
if g.subscribed == 1
and g.status != LEGACY_ENTRY
and (g.local_version < g.timestamp)
and g.public_key != cls._my_key.pub().key_to_bin()[10:]
) # don't simplify `g.subscribed == 1` to bool form, it is used by partial index!
@property
@db_session
def state(self):
"""
This property describes the current state of the channel.
:return: Text-based status
"""
if self.is_personal:
return CHANNEL_STATE.PERSONAL.value
if self.status == LEGACY_ENTRY:
return CHANNEL_STATE.LEGACY.value
if self.local_version == self.timestamp:
return CHANNEL_STATE.COMPLETE.value
if self.local_version > 0:
return CHANNEL_STATE.UPDATING.value
if self.subscribed:
return CHANNEL_STATE.METAINFO_LOOKUP.value
return CHANNEL_STATE.PREVIEW.value
def to_simple_dict(self, **kwargs):
"""
Return a basic dictionary with information about the channel.
"""
result = super().to_simple_dict(**kwargs)
result.update(
{
"state": self.state,
"subscribed": self.subscribed,
"votes": self.votes / db.ChannelMetadata.votes_scaling,
"dirty": self.dirty if self.is_personal else False,
}
)
return result
@classmethod
def get_channel_name_cached(cls, dl_name, infohash):
# Querying the database each time is costly so we cache the name request in a dict.
chan_name = cls.infohash_to_channel_name_cache.get(infohash)
if chan_name is None:
chan_name = cls.get_channel_name(dl_name, infohash)
cls.infohash_to_channel_name_cache[infohash] = chan_name
return chan_name
@classmethod
@db_session
def get_channel_name(cls, dl_name, infohash):
"""
Try to translate a Tribler download name into matching channel name. By searching for a channel with the
given dirname and/or infohash. Try do determine if infohash belongs to an older version of
some channel we already have.
:param dl_name - name of the download. Should match the directory name of the channel.
:param infohash - infohash of the download.
:return: Channel title as a string, prefixed with 'OLD:' for older versions
"""
channel = cls.get_channel_with_infohash(infohash)
if not channel:
try:
channel = cls.get_channel_with_dirname(dl_name)
except UnicodeEncodeError:
channel = None
if not channel:
return dl_name
if channel.infohash == infohash:
return channel.title
return "OLD:" + channel.title
@db_session
def update_properties(self, update_dict):
updated_self = super().update_properties(update_dict)
if updated_self.origin_id != 0:
# Coerce to CollectionNode
# ACHTUNG! This is a little bit awkward way to re-create the entry as an instance of
# another class. Be very careful with it!
self_dict = updated_self.to_dict()
updated_self.delete(recursive=False)
self_dict.pop("rowid")
self_dict.pop("metadata_type")
self_dict["sign_with"] = self._my_key
updated_self = db.CollectionNode.from_dict(self_dict)
return updated_self
def make_copy(self, tgt_parent_id, **kwargs):
return db.CollectionNode.make_copy(
self,
tgt_parent_id,
attributes_override={"infohash": random_infohash()},
**kwargs,
)
return ChannelMetadata
|
plover | config | # Copyright (c) 2010-2011 Joshua Harlan Lifton.
# See LICENSE.txt for details.
"""Configuration management."""
import configparser
import json
import re
from collections import ChainMap, OrderedDict, namedtuple
from plover import log
from plover.exception import InvalidConfigurationError
from plover.machine.keymap import Keymap
from plover.misc import boolean, expand_path, shorten_path
from plover.registry import registry
from plover.resource import resource_update
# General configuration sections, options and defaults.
MACHINE_CONFIG_SECTION = "Machine Configuration"
LEGACY_DICTIONARY_CONFIG_SECTION = "Dictionary Configuration"
LOGGING_CONFIG_SECTION = "Logging Configuration"
OUTPUT_CONFIG_SECTION = "Output Configuration"
DEFAULT_UNDO_LEVELS = 100
MINIMUM_UNDO_LEVELS = 1
DEFAULT_TIME_BETWEEN_KEY_PRESSES = 0
MINIMUM_TIME_BETWEEN_KEY_PRESSES = 0
DEFAULT_SYSTEM_NAME = "English Stenotype"
SYSTEM_CONFIG_SECTION = "System: %s"
SYSTEM_KEYMAP_OPTION = "keymap[%s]"
class DictionaryConfig(namedtuple("DictionaryConfig", "path enabled")):
def __new__(cls, path, enabled=True):
return super().__new__(cls, expand_path(path), enabled)
@property
def short_path(self):
return shorten_path(self.path)
def to_dict(self):
# Note: do not use _asdict because of
# https://bugs.python.org/issue24931
return {
"path": self.short_path,
"enabled": self.enabled,
}
def replace(self, **kwargs):
return self._replace(**kwargs)
@staticmethod
def from_dict(d):
return DictionaryConfig(**d)
def __repr__(self):
return "DictionaryConfig(%r, %r)" % (self.short_path, self.enabled)
ConfigOption = namedtuple(
"ConfigOption",
"""
name default
getter setter
validate full_key
""",
)
class InvalidConfigOption(ValueError):
def __init__(self, raw_value, fixed_value, message=None):
super().__init__(raw_value)
self.raw_value = raw_value
self.fixed_value = fixed_value
self.message = message
def __str__(self):
return self.message or repr(self.raw_value)
def raw_option(name, default, section, option, validate):
option = option or name
def getter(config, key):
return config._config[section][option]
def setter(config, key, value):
config._set(section, option, value)
return ConfigOption(name, lambda c, k: default, getter, setter, validate, None)
def json_option(name, default, section, option, validate):
option = option or name
def getter(config, key):
value = config._config[section][option]
try:
return json.loads(value)
except json.JSONDecodeError as e:
raise InvalidConfigOption(value, default) from e
def setter(config, key, value):
if isinstance(value, set):
# JSON does not support sets.
value = list(sorted(value))
config._set(
section, option, json.dumps(value, sort_keys=True, ensure_ascii=False)
)
return ConfigOption(name, default, getter, setter, validate, None)
def int_option(name, default, minimum, maximum, section, option=None):
option = option or name
def getter(config, key):
return config._config[section][option]
def setter(config, key, value):
config._set(section, option, str(value))
def validate(config, key, value):
try:
value = int(value)
except ValueError as e:
raise InvalidConfigOption(value, default) from e
if (minimum is not None and value < minimum) or (
maximum is not None and value > maximum
):
message = "%s not in [%s, %s]" % (value, minimum or "-∞", maximum or "∞")
raise InvalidConfigOption(value, default, message)
return value
return ConfigOption(name, lambda c, k: default, getter, setter, validate, None)
def boolean_option(name, default, section, option=None):
option = option or name
def getter(config, key):
return config._config[section][option]
def setter(config, key, value):
config._set(section, option, str(value))
def validate(config, key, value):
try:
return boolean(value)
except ValueError as e:
raise InvalidConfigOption(value, default) from e
return ConfigOption(name, lambda c, k: default, getter, setter, validate, None)
def choice_option(name, choices, section, option=None):
default = choices[0]
def validate(config, key, value):
if value not in choices:
raise InvalidConfigOption(value, default)
return value
return raw_option(name, default, section, option, validate)
def plugin_option(name, plugin_type, default, section, option=None):
def validate(config, key, value):
try:
return registry.get_plugin(plugin_type, value).name
except KeyError as e:
raise InvalidConfigOption(value, default) from e
return raw_option(name, default, section, option, validate)
def opacity_option(name, section, option=None):
return int_option(name, 100, 0, 100, section, option)
def path_option(name, default, section, option=None):
option = option or name
def getter(config, key):
return expand_path(config._config[section][option])
def setter(config, key, value):
config._set(section, option, shorten_path(value))
def validate(config, key, value):
if not isinstance(value, str):
raise InvalidConfigOption(value, default)
return value
return ConfigOption(name, lambda c, k: default, getter, setter, validate, None)
def enabled_extensions_option():
def validate(config, key, value):
if not isinstance(value, (list, set, tuple)):
raise InvalidConfigOption(value, ())
return set(value)
return json_option(
"enabled_extensions",
lambda c, k: set(),
"Plugins",
"enabled_extensions",
validate,
)
def machine_specific_options():
def full_key(config, key):
if isinstance(key, tuple):
assert len(key) == 2
return key
return (key, config["machine_type"])
def default(config, key):
machine_class = registry.get_plugin("machine", key[1]).obj
return {
name: params[0] for name, params in machine_class.get_option_info().items()
}
def getter(config, key):
return config._config[key[1]]
def setter(config, key, value):
config._config[key[1]] = value
def validate(config, key, raw_options):
if not isinstance(raw_options, (dict, configparser.SectionProxy)):
raise InvalidConfigOption(raw_options, default(config, key))
machine_options = OrderedDict()
invalid_options = OrderedDict()
machine_class = registry.get_plugin("machine", key[1]).obj
for name, params in sorted(machine_class.get_option_info().items()):
fallback, convert = params
try:
raw_value = raw_options[name]
except KeyError:
value = fallback
else:
try:
value = convert(raw_value)
except ValueError:
invalid_options[name] = raw_value
value = fallback
machine_options[name] = value
if invalid_options:
raise InvalidConfigOption(invalid_options, machine_options)
return machine_options
return ConfigOption(
"machine_specific_options", default, getter, setter, validate, full_key
)
def system_keymap_option():
def full_key(config, key):
if isinstance(key, tuple):
assert len(key) == 3
return key
return (key, config["system_name"], config["machine_type"])
def location(config, key):
return SYSTEM_CONFIG_SECTION % key[1], SYSTEM_KEYMAP_OPTION % key[2]
def build_keymap(config, key, mappings=None):
system = registry.get_plugin("system", key[1]).obj
machine_class = registry.get_plugin("machine", key[2]).obj
keymap = Keymap(
machine_class.get_keys(), system.KEYS + machine_class.get_actions()
)
if mappings is None:
mappings = system.KEYMAPS.get(key[2])
if mappings is None:
if machine_class.KEYMAP_MACHINE_TYPE is not None:
# Try fallback.
return build_keymap(
config, (key[0], key[1], machine_class.KEYMAP_MACHINE_TYPE)
)
# No fallback...
mappings = {}
keymap.set_mappings(mappings)
return keymap
def default(config, key):
return build_keymap(config, key)
def getter(config, key):
section, option = location(config, key)
return config._config[section][option]
def setter(config, key, keymap):
section, option = location(config, key)
config._set(section, option, str(keymap))
def validate(config, key, value):
try:
return build_keymap(config, key, value)
except (TypeError, ValueError) as e:
raise InvalidConfigOption(value, default(config, key)) from e
return ConfigOption("system_keymap", default, getter, setter, validate, full_key)
def dictionaries_option():
def full_key(config, key):
if isinstance(key, tuple):
assert len(key) == 2
return key
return (key, config["system_name"])
def location(config, key):
return (
SYSTEM_CONFIG_SECTION % key[1],
"dictionaries",
)
def default(config, key):
system = registry.get_plugin("system", key[1]).obj
return [DictionaryConfig(path) for path in system.DEFAULT_DICTIONARIES]
def legacy_getter(config):
options = config._config[LEGACY_DICTIONARY_CONFIG_SECTION].items()
return [
{"path": value}
for name, value in reversed(sorted(options))
if re.match(r"dictionary_file\d*$", name) is not None
]
def getter(config, key):
section, option = location(config, key)
value = config._config.get(section, option, fallback=None)
if value is None:
return legacy_getter(config)
return json.loads(value)
def setter(config, key, dictionaries):
section, option = location(config, key)
config._set(
section,
option,
json.dumps([d.to_dict() for d in dictionaries], sort_keys=True),
)
config._config.remove_section(LEGACY_DICTIONARY_CONFIG_SECTION)
def validate(config, key, value):
dictionaries = []
for d in value:
if isinstance(d, DictionaryConfig):
pass
elif isinstance(d, str):
d = DictionaryConfig(d)
else:
d = DictionaryConfig.from_dict(d)
dictionaries.append(d)
return dictionaries
return ConfigOption("dictionaries", default, getter, setter, validate, full_key)
class Config:
def __init__(self, path=None):
self._config = None
self._cache = {}
# A convenient place for other code to store a file name.
self.path = path
self.clear()
def load(self):
self.clear()
with open(self.path, encoding="utf-8") as fp:
try:
self._config.read_file(fp)
except configparser.Error as e:
raise InvalidConfigurationError(str(e))
def clear(self):
self._config = configparser.RawConfigParser()
self._cache.clear()
def save(self):
with resource_update(self.path) as temp_path:
with open(temp_path, mode="w", encoding="utf-8") as fp:
self._config.write(fp)
def _set(self, section, option, value):
if not self._config.has_section(section):
self._config.add_section(section)
self._config.set(section, option, value)
# Note: order matters, e.g. machine_type comes before
# machine_specific_options and system_keymap because
# the latter depend on the former.
_OPTIONS = OrderedDict(
(opt.name, opt)
for opt in [
# Output.
choice_option(
"space_placement",
("Before Output", "After Output"),
OUTPUT_CONFIG_SECTION,
),
boolean_option("start_attached", False, OUTPUT_CONFIG_SECTION),
boolean_option("start_capitalized", False, OUTPUT_CONFIG_SECTION),
int_option(
"undo_levels",
DEFAULT_UNDO_LEVELS,
MINIMUM_UNDO_LEVELS,
None,
OUTPUT_CONFIG_SECTION,
),
int_option(
"time_between_key_presses",
DEFAULT_TIME_BETWEEN_KEY_PRESSES,
MINIMUM_TIME_BETWEEN_KEY_PRESSES,
None,
OUTPUT_CONFIG_SECTION,
),
# Logging.
path_option(
"log_file_name",
expand_path("strokes.log"),
LOGGING_CONFIG_SECTION,
"log_file",
),
boolean_option("enable_stroke_logging", False, LOGGING_CONFIG_SECTION),
boolean_option("enable_translation_logging", False, LOGGING_CONFIG_SECTION),
# GUI.
boolean_option("start_minimized", False, "Startup", "Start Minimized"),
boolean_option("show_stroke_display", False, "Stroke Display", "show"),
boolean_option(
"show_suggestions_display", False, "Suggestions Display", "show"
),
opacity_option("translation_frame_opacity", "Translation Frame", "opacity"),
boolean_option("classic_dictionaries_display_order", False, "GUI"),
# Plugins.
enabled_extensions_option(),
# Machine.
boolean_option("auto_start", False, MACHINE_CONFIG_SECTION),
plugin_option(
"machine_type", "machine", "Keyboard", MACHINE_CONFIG_SECTION
),
machine_specific_options(),
# System.
plugin_option(
"system_name", "system", DEFAULT_SYSTEM_NAME, "System", "name"
),
system_keymap_option(),
dictionaries_option(),
]
)
def _lookup(self, key):
name = key[0] if isinstance(key, tuple) else key
opt = self._OPTIONS[name]
if opt.full_key is not None:
key = opt.full_key(self, key)
return key, opt
def __getitem__(self, key):
key, opt = self._lookup(key)
if key in self._cache:
return self._cache[key]
try:
value = opt.validate(self, key, opt.getter(self, key))
except (configparser.NoOptionError, KeyError):
value = opt.default(self, key)
except InvalidConfigOption as e:
log.error("invalid value for %r option", opt.name, exc_info=True)
value = e.fixed_value
self._cache[key] = value
return value
def __setitem__(self, key, value):
key, opt = self._lookup(key)
value = opt.validate(self._config, key, value)
opt.setter(self, key, value)
self._cache[key] = value
def as_dict(self):
return {opt.name: self[opt.name] for opt in self._OPTIONS.values()}
def update(self, **kwargs):
new_settings = []
new_config = ChainMap({}, self)
for opt in self._OPTIONS.values():
if opt.name in kwargs:
key = opt.name
if opt.full_key is not None:
key = opt.full_key(new_config, key)
value = opt.validate(new_config, key, kwargs[opt.name])
new_settings.append((opt, key, value))
new_config[opt.name] = value
for opt, key, value in new_settings:
opt.setter(self, key, value)
self._cache[key] = value
|
commands | resolve_pending_jobs | """Resolve pending jobs.
This is an interactive command used to resolve processing jobs
awaiting user decisions. It interacts with the workflow engine RPC
interface directly meaning that it can operate independently to the
Archivematica Dashboard or its API.
It impersonates the admin user, or the first match when multiple
exist.
Not recommended for general use, i.e. if Archivematica provides an
interactive command-line interface in the future it will rely on
public APIs. This is an alternative to the old mcp-rpc-cli command.
"""
from contrib.mcp.client import MCPClient
from django.contrib.auth import get_user_model
from django.core.management.base import CommandError
from lxml import etree
from main.management.commands import DashboardCommand
class Command(DashboardCommand):
help = __doc__
def handle(self, *args, **options):
try:
self.loop(*args, **options)
except KeyboardInterrupt:
self.stdout.write("")
self.warning("Bye!")
def loop(self, *args, **options):
admin_user = self.admin_user()
if not admin_user:
raise CommandError("Cannot find a superuser.")
client = MCPClient(admin_user)
while True:
self.success("Fetching packages awaiting decisions...")
packages = etree.fromstring(client.list())
if not len(packages):
self.error("No packages!")
self.print_pending_packages(packages)
choice = self.prompt_package_choice()
if choice == "q":
self.warning("Bye!")
break
elif choice == "u":
continue
try:
choice = int(choice)
if choice < 1:
raise ValueError()
except ValueError:
self.warning("Not a valid choice. Try again!")
continue
try:
package = packages[choice - 1]
except IndexError:
self.warning("Number not found. Try again!")
continue
package_id = package.find("./unit/unitXML/UUID").text
package_type = package.find("./unit/type").text
decisions = package.find("./choices")
job_id = package.find("./UUID").text
job = {
job["id"]: job["description"]
for job in client.get_unit_status(package_id)["jobs"]
}.get(job_id)
while True:
self.print_pending_job_decisions(
package_type, package_id, job, decisions
)
choice = self.prompt_decision_choice(decisions)
if choice == "q":
break
try:
choice = int(choice)
if choice < 1:
raise ValueError()
except ValueError:
self.warning("Not a valid choice. Try again!")
continue
try:
selected = decisions[choice - 1]
except IndexError:
self.warning("Number not found. Try again!")
continue
chain_id = selected.find("./chainAvailable").text
try:
client.execute_unit(package_id, chain_id)
break
except:
self.error("There was a problem executing the selected choice")
def admin_user(self):
UserModel = get_user_model()
return UserModel.objects.filter(is_superuser=True).first()
def prompt_package_choice(self):
"""Prompts the user to choose a package."""
self.stdout.write("╔═════════════════════════════════════╗")
self.stdout.write("║ [q] to quit ║")
self.stdout.write("║ [u] to update ║")
self.stdout.write("║ or number to choose a package. ║")
self.stdout.write("╚═════════════════════════════════════╝")
return input("Please enter your choice: ")
def prompt_decision_choice(self, decision):
"""Prompts the user to resolve a pending package."""
self.stdout.write("╔═════════════════════════════════════╗")
self.stdout.write("║ [q] to quit ║")
self.stdout.write("║ or number to choose a decision. ║")
self.stdout.write("╚═════════════════════════════════════╝")
return input("Please enter your choice: ")
def print_pending_packages(self, packages):
for idx, choice in enumerate(packages.getchildren(), 1):
package_type = choice.find("./unit/type").text
package_id = choice.find("./unit/unitXML/UUID").text
self.stdout.write(f" [{idx}] {package_type} {package_id}")
def print_pending_job_decisions(self, package_type, package_id, job, decisions):
self.stdout.write(f"{package_type}: {package_id}")
self.stdout.write(f"Job: {job}")
for idx, choice in enumerate(decisions.getchildren(), 1):
description = choice.find("./description").text
self.stdout.write(f" [{idx}] {description}")
|
projects | export | #!/usr/bin/env python3
"""List assets and attributes of account and commodities.
This script:
- produces a table of postings for the assets and liabilities,
- produces a table of per-account attributes,
- produces a table of per-commodity attributes,
- joins these tables,
- outputs them to a CSV file.
The upload-to-sheets program can then be used to replace the contents
of an existing sheet inside a Google Sheets doc from which various
reports to track one's portfolio can be produced, and updated with
live market data using the =GOOGLEFINANCE() function.
In theory, this script could eventually be replaced with an Beancount
Query Language query. However, BQL is not there yet.
"""
__copyright__ = "Copyright (C) 2018 Martin Blais"
__license__ = "GNU GPLv2"
import csv
import datetime
import logging
import re
from decimal import Decimal
from typing import Any, Dict, List, NamedTuple, Set, Tuple
import click
from beancount import loader
from beancount.core import account, account_types, data, getters, prices
from beancount.core.number import ONE, D
from beancount.ops import summarize
from beancount.parser import options
Header = List[str]
Rows = List[List[Any]]
Table = NamedTuple("Table", [("header", Header), ("rows", Rows)])
def get_metamap_table(
metamap: Dict[str, data.Directive], attributes: List[str], getter
) -> Table:
"""Produce a Table of per-commodity attributes."""
header = attributes
attrlist = attributes[1:]
rows = []
for key, value in metamap.items():
row = [key]
for attr in attrlist:
row.append(getter(value, attr))
rows.append(row)
return Table(attributes, sorted(rows))
def get_commodities_table(entries: data.Entries, attributes: List[str]) -> Table:
"""Produce a Table of per-commodity attributes."""
commodities = getters.get_commodity_directives(entries)
header = ["currency"] + attributes
getter = lambda entry, key: entry.meta.get(key, None)
table = get_metamap_table(commodities, header, getter)
return table
def get_accounts_table(entries: data.Entries, attributes: List[str]) -> Table:
"""Produce a Table of per-account attributes."""
oc_map = getters.get_account_open_close(entries)
accounts_map = {account: dopen for account, (dopen, _) in oc_map.items()}
header = ["account"] + attributes
defaults = {"tax": "taxable", "liquid": False}
def getter(entry, key):
"""Lookup the value working up the accounts tree."""
value = entry.meta.get(key, None)
if value is not None:
return value
account_name = account.parent(entry.account)
if not account_name:
return defaults.get(key, None)
parent_entry = accounts_map.get(account_name, None)
if not parent_entry:
return defaults.get(key, None)
return getter(parent_entry, key)
return get_metamap_table(accounts_map, header, getter), accounts_map
def abbreviate_account(acc: str, accounts_map: Dict[str, data.Open]):
"""Compute an abbreviated version of the account name."""
# Get the root of the account by inspecting the "root: TRUE" attribute up
# the accounts tree.
racc = acc
while racc:
racc = account.parent(racc)
dopen = accounts_map.get(racc, None)
if dopen and dopen.meta.get("root", False):
acc = racc
break
# Remove the account type.
acc = account.sans_root(acc)
# Remove the two-letter country code if there is one.
if re.match(r"[A-Z][A-Z]", acc):
acc = account.sans_root(acc)
return acc
def get_postings_table(
entries: data.Entries,
options_map: Dict,
accounts_map: Dict[str, data.Open],
threshold: Decimal = D("0.01"),
) -> Table:
"""Enumerate all the postings."""
header = [
"account",
"account_abbrev",
"number",
"currency",
"cost_number",
"cost_currency",
"cost_date",
]
balances, _ = summarize.balance_by_account(entries, compress_unbooked=True)
acctypes = options.get_account_types(options_map)
rows = []
for acc, balance in sorted(balances.items()):
# Keep only the balance sheet accounts.
acctype = account_types.get_account_type(acc)
if not acctype in (acctypes.assets, acctypes.liabilities):
continue
# Create a posting for each of the positions.
for pos in balance:
acc_abbrev = abbreviate_account(acc, accounts_map)
row = [
acc,
acc_abbrev,
pos.units.number,
pos.units.currency,
pos.cost.number if pos.cost else ONE,
pos.cost.currency if pos.cost else pos.units.currency,
pos.cost.date if pos.cost else None,
]
rows.append(row)
return Table(header, rows)
PRICE_Q = D("0.0000001")
def get_prices_table(entries: data.Entries, main_currency: str) -> Table:
"""Enumerate all the prices seen."""
price_map = prices.build_price_map(entries)
header = ["currency", "cost_currency", "price_file"]
rows = []
for base_quote in price_map.keys():
_, price = prices.get_latest_price(price_map, base_quote)
if price is None:
continue
base, quote = base_quote
rows.append([base, quote, price.quantize(PRICE_Q)])
return Table(header, rows)
def get_rates_table(
entries: data.Entries, currencies: Set[str], main_currency: str
) -> Table:
"""Enumerate all the exchange rates."""
price_map = prices.build_price_map(entries)
header = ["cost_currency", "rate_file"]
rows = []
for currency in currencies:
_, rate = prices.get_latest_price(price_map, (currency, main_currency))
if rate is None:
continue
rows.append([currency, rate.quantize(PRICE_Q)])
return Table(header, rows)
def join(main_table: Table, *col_tables: Tuple[Tuple[Tuple[str], Table]]) -> Table:
"""Join a table with a number of other tables.
col_tables is a tuple of (column, table) pairs."""
new_header = list(main_table.header)
for cols, col_table in col_tables:
header = list(col_table.header)
for col in cols:
assert col in main_table.header
header.remove(col)
new_header.extend(header)
col_maps = []
for cols, col_table in col_tables:
indexes_main = [main_table.header.index(col) for col in cols]
indexes_col = [col_table.header.index(col) for col in cols]
# indexes_notcol = sorted(set(range(len(col_table.header))) - set(indexes_col))
col_map = {}
for row in col_table.rows:
key = tuple(row[index] for index in indexes_col)
col_map[key] = row
assert len(col_map) == len(col_table.rows), cols
col_maps.append((indexes_main, indexes_col, col_map))
rows = []
for row in main_table.rows:
row = list(row)
empty_row = [None] * (len(col_table.header) - len(indexes_col))
for indexes_main, indexes_col, col_map in col_maps:
key = tuple(row[index] for index in indexes_main)
other_row = col_map.get(key, None)
if other_row is not None:
other_row = list(other_row)
for index in reversed(indexes_col):
del other_row[index]
else:
other_row = empty_row
row.extend(other_row)
rows.append(row)
return Table(new_header, rows)
def reorder_columns(table: Table, new_headers: List[str]) -> Table:
"""Reorder the columns of a table to a desired new headers."""
assert len(table.header) == len(new_headers)
indexes = [table.header.index(header) for header in new_headers]
rows = [[row[index] for index in indexes] for row in table.rows]
return Table(new_headers, rows)
def write_table(table: Table, outfile: str):
"""Write a table to a CSV file."""
writer = csv.writer(outfile)
writer.writerow(table.header)
writer.writerows(table.rows)
@click.command(help=__doc__)
@click.argument("filename")
@click.option(
"--currency", "-C", help="Output currency (default is first operating currency)."
)
@click.option(
"--ignore-options",
is_flag=True,
help=(
"Ignore options symbols before export. "
"This assumes a separate options trading strategy."
),
)
@click.option("--dry-run", "-n", is_flag=True)
@click.option(
"--insert-date", is_flag=True, help="Insert the date in the header of the output."
)
@click.option(
"--output",
"-o",
type=click.File("w"),
help="CSV filename to write out the final joined table to.",
)
@click.option(
"--output_commodities",
"-c",
type=click.File("w"),
help="CSV filename to write out the commodities table to.",
)
@click.option(
"--output_accounts",
"-a",
type=click.File("w"),
help="CSV filename to write out the accounts table to.",
)
@click.option(
"--output_prices",
"-p",
type=click.File("w"),
help="CSV filename to write out the prices table to.",
)
@click.option(
"--output_rates",
"-r",
type=click.File("w"),
help="CSV filename to write out the rates table to.",
)
@click.option(
"--output_postings",
"-m",
type=click.File("w"),
help="CSV filename to write out the postings table to.",
)
def main(
filename,
currency,
ignore_options,
dry_run,
insert_date,
output,
output_commodities,
output_accounts,
output_prices,
output_rates,
output_postings,
):
# Load the file contents.
entries, errors, options_map = loader.load_file(filename)
# Initialize main output currency.
main_currency = currency or options_map["operating_currency"][0]
logging.info("Operating currency: %s", main_currency)
# Get the map of commodities to their meta tags.
commodities_table = get_commodities_table(
entries, ["export", "assetcls", "strategy", "issuer"]
)
if output_commodities is not None:
write_table(commodities_table, output_commodities)
# Get a table of the commodity names.
#
# Note: We're fetching the table separately in order to avoid changes to the
# spreadsheet upstream, and want to tack on the values as new columns on the
# right.
names_table = get_commodities_table(entries, ["name"])
# Get the map of accounts to their meta tags.
accounts_table, accounts_map = get_accounts_table(entries, ["tax", "liquid"])
if output_accounts is not None:
write_table(accounts_table, output_accounts)
# Enumerate the list of assets.
postings_table = get_postings_table(entries, options_map, accounts_map)
if output_postings is not None:
write_table(postings_table, output_postings)
# Get the list of prices.
prices_table = get_prices_table(entries, main_currency)
if output_prices is not None:
write_table(prices_table, output_prices)
# Get the list of exchange rates.
index = postings_table.header.index("cost_currency")
currencies = set(row[index] for row in postings_table.rows)
rates_table = get_rates_table(entries, currencies, main_currency)
if output_rates is not None:
write_table(rates_table, output_rates)
# Join all the tables.
joined_table = join(
postings_table,
(("currency",), commodities_table),
(("account",), accounts_table),
(("currency", "cost_currency"), prices_table),
(("cost_currency",), rates_table),
(("currency",), names_table),
)
# Reorder columns.
# We do this in order to avoid having to change the spreadsheet when we add new columns.
headers = list(joined_table.header)
headers.remove("issuer")
headers.append("issuer")
final_table = reorder_columns(joined_table, headers)
# Filter table removing rows to ignore (rows not to export).
index = final_table.header.index("export")
rows = [
row
for row in final_table.rows
if row[index] is None or row[index].lower() != "ignore"
]
# Filter out options if requested.
if ignore_options:
index = final_table.header.index("currency")
is_option = re.compile(r"[A-Z]+_\d{6,}[CP]\d+", re.I).match
rows = [row for row in rows if row[index] is None or not is_option(row[index])]
table = Table(final_table.header, rows)
if output is not None:
if insert_date:
table[0][0] += " ({:%Y-%m-%d %H:%M})".format(datetime.datetime.now())
write_table(table, output)
if __name__ == "__main__":
main()
|
SQL | DBF | """
Navegacion por una tabla de datos mediante instrucciones tipo xBase.
"""
import collections
import sqlite3
import time
class Almacen:
pass
class DBF:
"""
Permite acceder a una consulta de SQL, con un estilo dBase, haciendo una
lectura inicial completa de los rowids y luego moviendose a traves de los
mismos, y leyendo el registro actual, y asignando los valores de los campos
a variables con el mismo nombre::
import sqlite3 as sqlite
import sys
reload(sys)
sys.setdefaultencoding( "latin-1" )
con = sqlite.connect("v003.db")
con.text_factory = lambda x: unicode(x, "latin-1", "ignore")
dbf = DBF( con, "CUENTAS", "CUENTA,NOMBRE", condicion='CUENTA like "43%"', orden="cuenta asc" )
if dbf.leer():
while not dbf.eof:
prlk dbf.CUENTA, dbf.NOMBRE
dbf.skip()
dbf.condicion = "cuenta like '4%'"
dbf.select = "NOMBRE, cuenta"
if dbf.leer():
while not dbf.eof:
prlk dbf.NOMBRE, dbf.cuenta
if not dbf.skip():
break
dbf.cerrar()
con.close()
"""
def __init__(self, conexion, ctabla, select, condicion="", orden=""):
"""
Abre un cursor.
@param conexion: recibe la conexion de la base de datos, crea con ella el cursor de trabajo
@param ctabla: str tabla principal de la consulta
@param select: str lista de campos separados por comas, como en un select sql
@param condicion: str condicion
@param orden: str orden
"""
self.conexion = conexion
self.cursor = conexion.cursor()
self.ponSelect(select)
self.condicion = condicion
self.orden = orden
self.ctabla = ctabla
self.eof = True
self.bof = True
self.liIDs = []
def reccount(self):
"""
Devuelve el numero total de registros.
"""
return len(self.liIDs)
def ponSelect(self, select):
self.select = select.upper()
self.liCampos = [campo.strip() for campo in self.select.split(",")]
def ponOrden(self, orden):
"""
Cambia el orden de lectura, previo a una lectura completa.
"""
self.orden = orden
def ponCondicion(self, condicion):
"""
Cambia la condicion, previo a una lectura completa.
"""
self.condicion = condicion
def leer(self):
"""
Lanza la consulta,
Lee todos los IDs de los registros
@return: True/False si se han encontrado o no registros
"""
self.bof = True
self.recno = -1
resto = ""
if self.condicion:
resto += "WHERE %s" % self.condicion
if self.orden:
if resto:
resto += " "
resto += "ORDER BY %s" % self.orden
cSQL = "SELECT rowid FROM %s %s" % (self.ctabla, resto)
self.cursor.execute(cSQL)
self.liIDs = self.cursor.fetchall()
return self.gotop()
def leerDispatch(self, dispatch, chunk=200):
self.cursorBuffer = self.conexion.cursor()
self.bof = True
self.recno = -1
self.siBufferPendiente = True
resto = ""
if self.condicion:
resto += "WHERE %s" % self.condicion
if self.orden:
if resto:
resto += " "
resto += "ORDER BY %s" % self.orden
cSQL = "SELECT rowid FROM %s %s" % (self.ctabla, resto)
self.cursorBuffer.execute(cSQL)
self.liIDs = []
while True:
li = self.cursorBuffer.fetchmany(chunk)
if li:
self.liIDs.extend(li)
if len(li) < chunk:
self.siBufferPendiente = False
self.cursorBuffer.close()
break
siparar = dispatch(len(self.liIDs))
if siparar:
break
return self.siBufferPendiente
def leerBuffer(self, segundos=1.0, chunk=200):
self.cursorBuffer = self.conexion.cursor()
self.bof = True
self.recno = -1
self.siBufferPendiente = True
resto = ""
if self.condicion:
resto += "WHERE %s" % self.condicion
if self.orden:
if resto:
resto += " "
resto += "ORDER BY %s" % self.orden
cSQL = "SELECT rowid FROM %s %s" % (self.ctabla, resto)
self.cursorBuffer.execute(cSQL)
self.liIDs = []
xInicio = time.time()
while True:
li = self.cursorBuffer.fetchmany(chunk)
if li:
self.liIDs.extend(li)
if len(li) < chunk:
self.siBufferPendiente = False
self.cursorBuffer.close()
break
xt = time.time() - xInicio
if xt > segundos:
break
return self.siBufferPendiente
def leerMasBuffer(self, segundos=1.0, chunk=200):
if not self.siBufferPendiente:
return True
xInicio = time.time()
while True:
li = self.cursorBuffer.fetchmany(chunk)
if li:
self.liIDs.extend(li)
if len(li) < chunk:
self.siBufferPendiente = False
self.cursorBuffer.close()
break
xt = time.time() - xInicio
if xt > segundos:
break
return self.siBufferPendiente
def _leerUno(self, numRecno):
"""
Lectura de un registro, y asignacion a las variables = campos.
"""
self.ID = self.liIDs[numRecno][0]
self.cursor.execute(
"SELECT %s FROM %s WHERE rowid =%d" % (self.select, self.ctabla, self.ID)
)
liValores = self.cursor.fetchone()
for numCampo, campo in enumerate(self.liCampos):
setattr(self, campo, liValores[numCampo])
def leeOtroCampo(self, recno, campo):
xid = self.rowid(recno)
self.cursor.execute(
"SELECT %s FROM %s WHERE rowid =%d" % (campo, self.ctabla, xid)
)
liValores = self.cursor.fetchone()
return liValores[0]
def goto(self, numRecno):
"""
Nos situa en un registro concreto con la lectura de los campos.
"""
if numRecno < 0 or numRecno >= self.reccount():
self.eof = True
self.bof = True
self.recno = -1
return False
else:
self._leerUno(numRecno)
self.eof = False
self.bof = False
self.recno = numRecno
return True
def gotoCache(self, numRecno):
if numRecno != self.recno:
self.goto(numRecno)
def rowid(self, numRecno):
"""
Devuelve el id del registro numRecno.
@param numRecno: numero de registro.
"""
return self.liIDs[numRecno][0]
def buscarID(self, xid):
"""
Busca el recno de un ID.
@param xid: numero de id.
"""
for r in range(self.reccount()):
if self.rowid(r) == xid:
return r
return -1
def skip(self, num=1):
"""
Salta un registro.
"""
return self.goto(num + self.recno)
def gotop(self):
"""
Salta al registro numero 0.
"""
return self.goto(0)
def gobottom(self):
"""
Salta al registro ultimo.
"""
return self.goto(self.reccount() - 1)
def cerrar(self):
"""
Cierra el cursor.
"""
self.cursor.close()
def borrarLista(self, listaRecnos, dispatch=None):
for n, recno in enumerate(listaRecnos):
if dispatch:
dispatch(n)
cSQL = "DELETE FROM %s WHERE rowid = %d" % (self.ctabla, self.rowid(recno))
self.cursor.execute(cSQL)
self.conexion.commit()
def pack(self):
self.cursor.execute("VACUUM")
self.conexion.commit()
def borrarConFiltro(self, filtro):
cSQL = "DELETE FROM %s WHERE %s" % (self.ctabla, filtro)
self.cursor.execute(cSQL)
self.conexion.commit()
def borrarROWID(self, rowid):
cSQL = "DELETE FROM %s WHERE rowid = %d" % (self.ctabla, rowid)
self.cursor.execute(cSQL)
self.conexion.commit()
def borrarBase(self, recno):
"""
Rutina interna de borrado de un registro.
"""
if self.goto(recno):
self.borrarROWID(self.rowid(recno))
return True
else:
return False
def borrar(self, recno):
"""
Borra un registro y lo quita de la lista de IDs.
"""
if self.borrarBase(recno):
del self.liIDs[recno]
return True
else:
return False
def insertar(self, regNuevo, liCampos=None):
"""
Inserta un registro.
@param regNuevo: registro cuyas variables son los valores a insertar.
@param liCampos: si la lista de campos a insertar es diferente se indica.
@return : el id nuevo insertado.
"""
if liCampos is None:
liCampos = self.liCampos
campos = ""
values = ""
liValues = []
for campo in liCampos:
if hasattr(regNuevo, campo):
campos += campo + ","
values += "?,"
liValues.append(getattr(regNuevo, campo))
campos = campos[:-1]
values = values[:-1]
cSQL = "insert into %s(%s) values(%s)" % (self.ctabla, campos, values)
self.cursor.execute(cSQL, liValues)
idNuevo = self.cursor.lastrowid
self.conexion.commit()
self.leer()
return self.buscarID(idNuevo)
def insertarReg(self, regNuevo, siReleer):
"""
Inserta un registro.
@param regNuevo: registro cuyas variables son los valores a insertar.
@return : el id nuevo insertado. if siReleer
"""
campos = ""
values = ""
liValues = []
for campo in dir(regNuevo):
if campo.isupper():
campos += campo + ","
values += "?,"
liValues.append(getattr(regNuevo, campo))
campos = campos[:-1]
values = values[:-1]
cSQL = "insert into %s(%s) values(%s)" % (self.ctabla, campos, values)
self.cursor.execute(cSQL, liValues)
idNuevo = self.cursor.lastrowid
self.conexion.commit()
# Por si acaso -> problemas blob
self.cursor.close()
self.cursor = self.conexion.cursor()
if siReleer:
self.leer()
return self.buscarID(idNuevo)
def insertarSoloReg(self, regNuevo, okCommit=True, okCursorClose=True):
"""
Inserta un registro en MYBOOK.
@param regNuevo: registro cuyas variables son los valores a insertar.
@return : el id nuevo insertado
"""
campos = ""
values = ""
liValues = []
for campo in dir(regNuevo):
if campo.isupper():
campos += campo + ","
values += "?,"
liValues.append(getattr(regNuevo, campo))
campos = campos[:-1]
values = values[:-1]
cSQL = "insert into %s(%s) values(%s)" % (self.ctabla, campos, values)
self.cursor.execute(cSQL, liValues)
idNuevo = self.cursor.lastrowid
if okCommit:
self.conexion.commit()
if okCursorClose:
# Por si acaso -> problemas blob
self.cursor.close()
self.cursor = self.conexion.cursor()
return idNuevo
def insertarLista(self, lista, dispatch):
if len(lista) == 0:
return
campos = ""
values = ""
liCampos = []
for campo in dir(lista[0]):
if campo.isupper():
campos += campo + ","
values += "?,"
liCampos.append(campo)
campos = campos[:-1]
values = values[:-1]
liError = []
cSQL = "insert into %s(%s) values(%s)" % (self.ctabla, campos, values)
for n, reg in enumerate(lista):
liValues = []
for campo in liCampos:
liValues.append(getattr(reg, campo))
try:
self.cursor.execute(cSQL, liValues)
except sqlite3.IntegrityError:
liError.append(reg)
if dispatch:
dispatch(n)
if n % 1000 == 0:
self.conexion.commit()
self.conexion.commit()
return liError
def soloGrabar(self, dicNuevo, siCommit=False):
campos = ""
values = ""
liValues = []
for campo in dicNuevo:
campos += campo + ","
values += "?,"
liValues.append(dicNuevo[campo])
campos = campos[:-1]
values = values[:-1]
cSQL = "insert into %s(%s) values(%s)" % (self.ctabla, campos, values)
self.cursor.execute(cSQL, liValues)
if siCommit:
self.conexion.commit()
def baseRegistro(self):
return Almacen()
def modificar(self, recno, regNuevo, liCampos=None):
"""
Modifica un registro.
@param recno: registro a modificar.
@param regNuevo: almacen de datos con las variables y contenidos a modificar.
@param liCampos: si la lista de campos a modificar es diferente se indica.
@return: registro modificado, que puede ser diferente al indicado inicialmente.
"""
if liCampos is None:
liCampos = self.liCampos
self.goto(recno)
campos = ""
liValues = []
siReleer = True
for campo in liCampos:
if hasattr(regNuevo, campo):
valorNue = getattr(regNuevo, campo)
valorAnt = getattr(self, campo)
if valorAnt != valorNue:
campos += campo + "= ?,"
liValues.append(valorNue)
if campo in self.orden:
siReleer = True
if campos:
campos = campos[:-1]
rid = self.rowid(recno)
cSQL = "UPDATE %s SET %s WHERE ROWID = %d" % (self.ctabla, campos, rid)
self.cursor.execute(cSQL, liValues)
self.conexion.commit()
if siReleer:
self.leer()
return self.buscarID(rid)
return recno
def modificarReg(self, recno, regNuevo):
"""
Modifica un registro.
@param recno: registro a modificar.
@param regNuevo: almacen de datos con las variables y contenidos a modificar.
"""
rid = self.rowid(recno)
self.modificarROWID(rid, regNuevo)
def modificarROWID(self, rowid, regNuevo):
"""
Modifica un registro.
@param recno: registro a modificar.
@param regNuevo: almacen de datos con las variables y contenidos a modificar.
"""
campos = ""
liValues = []
for campo in dir(regNuevo):
if campo.isupper():
campos += campo + "= ?,"
liValues.append(getattr(regNuevo, campo))
campos = campos[:-1]
cSQL = "UPDATE %s SET %s WHERE ROWID = %d" % (self.ctabla, campos, rowid)
self.cursor.execute(cSQL, liValues)
self.conexion.commit()
def copiaDBF(self):
"""
Se crea una copia completa del objeto, para hacer una lectura diferente, con los mismos datos basicos.
"""
return DBF(self.conexion, self.ctabla, self.select, self.condicion, self.orden)
def registroActual(self):
"""
Devuelve un registro con los valores de las variables.
"""
reg = Almacen()
for campo in self.liCampos:
setattr(reg, campo, getattr(self, campo))
return reg
def dicValores(self):
dic = collections.OrderedDict()
for campo in self.liCampos:
dic[campo] = getattr(self, campo)
return dic
def commit(self):
self.conexion.commit()
def borrarListaRaw(self, listaRowid, dispatch=None):
for n, rowid in enumerate(listaRowid):
if dispatch:
dispatch(n)
if rowid:
cSQL = "DELETE FROM %s WHERE rowid = %d" % (self.ctabla, rowid)
self.cursor.execute(cSQL)
self.conexion.commit()
def nuevaColumna(self, nombre, tipo):
cSQL = 'ALTER TABLE %s ADD COLUMN "%s" "%s"' % (self.ctabla, nombre, tipo)
self.cursor.execute(cSQL)
self.conexion.commit()
def maxCampo(self, campo):
cSQL = "SELECT Max(%s) FROM %s" % (campo, self.ctabla)
self.cursor.execute(cSQL)
liData = self.cursor.fetchall()
self.conexion.commit()
if not liData:
return 0
else:
return liData[0][0]
class DBFT(DBF):
"""
Permite acceder a una consulta de SQL, con un estilo dBase, haciendo una
lectura inicial completa de todos los datos y luego moviendose a traves
de los mismos, y leyendo el registro actual, y asignando los valores de
los campos a variables con el mismo nombre.
"""
def __init__(self, conexion, ctabla, select, condicion="", orden=""):
DBF.__init__(self, conexion, ctabla, select, condicion, orden)
self.liRows = []
def reccount(self):
"""
Devuelve el numero total de registros.
"""
return len(self.liRows)
def leer(self):
"""
Lanza la consulta,
Lee todos los registros
@return: True/False si se han encontrado o no registros
"""
self.bof = True
self.recno = -1
self.liCampos = ["ROWID"]
self.liCampos.extend([campo.strip() for campo in self.select.split(",")])
resto = ""
if self.condicion:
resto += "WHERE %s" % self.condicion
if self.orden:
if resto:
resto += " "
resto += "ORDER BY %s" % self.orden
cSQL = "SELECT ROWID,%s FROM %s %s" % (self.select, self.ctabla, resto)
self.cursor.execute(cSQL)
self.liRows = self.cursor.fetchall()
return self.gotop()
def _leerUno(self, numRecno):
"""
Lectura de un registro, y asignacion a las variables = campos.
"""
liValores = self.liRows[numRecno]
for numCampo, campo in enumerate(self.liCampos):
setattr(self, campo, liValores[numCampo])
def rowid(self, numRecno):
"""
Devuelve el id del regitro numRecno.
@param numRecno: numero de registro.
"""
return self.liRows[numRecno][0]
def copiaDBF(self):
"""
Se crea una copia completa del objeto, para hacer una lectura diferente, con los mismos datos basicos.
"""
return DBFT(self.conexion, self.ctabla, self.select, self.condicion, self.orden)
|
drafttaskpanels | task_orthoarray | # ***************************************************************************
# * (c) 2020 Eliud Cabrera Castillo <e.cabrera-castillo@tum.de> *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * FreeCAD is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with FreeCAD; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
"""Provides the task panel code for the Draft OrthoArray tool."""
## @package task_orthoarray
# \ingroup drafttaskpanels
# \brief Provides the task panel code for the Draft OrthoArray tool.
import Draft_rc # include resources, icons, ui files
import draftutils.utils as utils
import DraftVecUtils
import FreeCAD as App
import FreeCADGui as Gui
## \addtogroup drafttaskpanels
# @{
import PySide.QtGui as QtGui
from draftutils.messages import _err, _log, _msg
from draftutils.translate import translate
from FreeCAD import Units as U
from PySide.QtCore import QT_TRANSLATE_NOOP
# The module is used to prevent complaints from code checkers (flake8)
bool(Draft_rc.__name__)
class TaskPanelOrthoArray:
"""TaskPanel code for the OrthoArray command.
The names of the widgets are defined in the `.ui` file.
This `.ui` file `must` be loaded into an attribute
called `self.form` so that it is loaded into the task panel correctly.
In this class all widgets are automatically created
as `self.form.<widget_name>`.
The `.ui` file may use special FreeCAD widgets such as
`Gui::InputField` (based on `QLineEdit`) and
`Gui::QuantitySpinBox` (based on `QAbstractSpinBox`).
See the Doxygen documentation of the corresponding files in `src/Gui/`,
for example, `InputField.h` and `QuantitySpinBox.h`.
Attributes
----------
source_command: gui_base.GuiCommandBase
This attribute holds a reference to the calling class
of this task panel.
This parent class, which is derived from `gui_base.GuiCommandBase`,
is responsible for calling this task panel, for installing
certain callbacks, and for removing them.
It also delays the execution of the internal creation commands
by using the `draftutils.todo.ToDo` class.
See Also
--------
* https://forum.freecad.org/viewtopic.php?f=10&t=40007
* https://forum.freecad.org/viewtopic.php?t=5374#p43038
"""
def __init__(self):
self.name = "Orthogonal array"
_log(
translate("draft", "Task panel:")
+ " {}".format(translate("draft", "Orthogonal array"))
)
# The .ui file must be loaded into an attribute
# called `self.form` so that it is displayed in the task panel.
ui_file = ":/ui/TaskPanel_OrthoArray.ui"
self.form = Gui.PySideUic.loadUi(ui_file)
icon_name = "Draft_Array"
svg = ":/icons/" + icon_name
pix = QtGui.QPixmap(svg)
icon = QtGui.QIcon.fromTheme(icon_name, QtGui.QIcon(svg))
self.form.setWindowIcon(icon)
self.form.setWindowTitle(translate("draft", "Orthogonal array"))
self.form.label_icon.setPixmap(pix.scaled(32, 32))
# -------------------------------------------------------------------
# Default values for the internal function,
# and for the task panel interface
start_x = U.Quantity(100.0, App.Units.Length)
start_y = start_x
start_z = start_x
length_unit = start_x.getUserPreferred()[2]
self.v_x = App.Vector(start_x.Value, 0, 0)
self.v_y = App.Vector(0, start_y.Value, 0)
self.v_z = App.Vector(0, 0, start_z.Value)
self.form.input_X_x.setProperty("rawValue", self.v_x.x)
self.form.input_X_x.setProperty("unit", length_unit)
self.form.input_X_y.setProperty("rawValue", self.v_x.y)
self.form.input_X_y.setProperty("unit", length_unit)
self.form.input_X_z.setProperty("rawValue", self.v_x.z)
self.form.input_X_z.setProperty("unit", length_unit)
self.form.input_Y_x.setProperty("rawValue", self.v_y.x)
self.form.input_Y_x.setProperty("unit", length_unit)
self.form.input_Y_y.setProperty("rawValue", self.v_y.y)
self.form.input_Y_y.setProperty("unit", length_unit)
self.form.input_Y_z.setProperty("rawValue", self.v_y.z)
self.form.input_Y_z.setProperty("unit", length_unit)
self.form.input_Z_x.setProperty("rawValue", self.v_z.x)
self.form.input_Z_x.setProperty("unit", length_unit)
self.form.input_Z_y.setProperty("rawValue", self.v_z.y)
self.form.input_Z_y.setProperty("unit", length_unit)
self.form.input_Z_z.setProperty("rawValue", self.v_z.z)
self.form.input_Z_z.setProperty("unit", length_unit)
self.n_x = 2
self.n_y = 2
self.n_z = 1
self.form.spinbox_n_X.setValue(self.n_x)
self.form.spinbox_n_Y.setValue(self.n_y)
self.form.spinbox_n_Z.setValue(self.n_z)
self.fuse = utils.get_param("Draft_array_fuse", False)
self.use_link = utils.get_param("Draft_array_Link", True)
self.form.checkbox_fuse.setChecked(self.fuse)
self.form.checkbox_link.setChecked(self.use_link)
# -------------------------------------------------------------------
# Some objects need to be selected before we can execute the function.
self.selection = None
# This is used to test the input of the internal function.
# It should be changed to True before we can execute the function.
self.valid_input = False
self.set_widget_callbacks()
self.tr_true = QT_TRANSLATE_NOOP("Draft", "True")
self.tr_false = QT_TRANSLATE_NOOP("Draft", "False")
def set_widget_callbacks(self):
"""Set up the callbacks (slots) for the widget signals."""
# New style for Qt5
self.form.button_reset_X.clicked.connect(lambda: self.reset_v("X"))
self.form.button_reset_Y.clicked.connect(lambda: self.reset_v("Y"))
self.form.button_reset_Z.clicked.connect(lambda: self.reset_v("Z"))
# When the checkbox changes, change the internal value
self.form.checkbox_fuse.stateChanged.connect(self.set_fuse)
self.form.checkbox_link.stateChanged.connect(self.set_link)
def accept(self):
"""Execute when clicking the OK button or Enter key."""
self.selection = Gui.Selection.getSelection()
(self.v_x, self.v_y, self.v_z) = self.get_intervals()
(self.n_x, self.n_y, self.n_z) = self.get_numbers()
self.valid_input = self.validate_input(
self.selection, self.v_x, self.v_y, self.v_z, self.n_x, self.n_y, self.n_z
)
if self.valid_input:
self.create_object()
# The internal function already displays messages
# self.print_messages()
self.finish()
def validate_input(self, selection, v_x, v_y, v_z, n_x, n_y, n_z):
"""Check that the input is valid.
Some values may not need to be checked because
the interface may not allow to input wrong data.
"""
if not selection:
_err(translate("draft", "At least one element must be selected."))
return False
if n_x < 1 or n_y < 1 or n_z < 1:
_err(translate("draft", "Number of elements must be at least 1."))
return False
# TODO: this should handle multiple objects.
# Each of the elements of the selection should be tested.
obj = selection[0]
if obj.isDerivedFrom("App::FeaturePython"):
_err(translate("draft", "Selection is not suitable for array."))
_err(
translate("draft", "Object:")
+ " {0} ({1})".format(obj.Label, obj.TypeId)
)
return False
# The other arguments are not tested but they should be present.
if v_x and v_y and v_z:
pass
self.fuse = self.form.checkbox_fuse.isChecked()
self.use_link = self.form.checkbox_link.isChecked()
return True
def create_object(self):
"""Create the new object.
At this stage we already tested that the input is correct
so the necessary attributes are already set.
Then we proceed with the internal function to create the new object.
"""
if len(self.selection) == 1:
sel_obj = self.selection[0]
else:
# TODO: this should handle multiple objects.
# For example, it could take the shapes of all objects,
# make a compound and then use it as input for the array function.
sel_obj = self.selection[0]
# This creates the object immediately
# obj = Draft.make_ortho_array(sel_obj,
# self.v_x, self.v_y, self.v_z,
# self.n_x, self.n_y, self.n_z,
# self.use_link)
# Instead, we build the commands to execute through the caller
# of this class, the GuiCommand.
# This is needed to schedule geometry manipulation
# that would crash Coin3D if done in the event callback.
_cmd = "Draft.make_ortho_array"
_cmd += "("
_cmd += "App.ActiveDocument." + sel_obj.Name + ", "
_cmd += "v_x=" + DraftVecUtils.toString(self.v_x) + ", "
_cmd += "v_y=" + DraftVecUtils.toString(self.v_y) + ", "
_cmd += "v_z=" + DraftVecUtils.toString(self.v_z) + ", "
_cmd += "n_x=" + str(self.n_x) + ", "
_cmd += "n_y=" + str(self.n_y) + ", "
_cmd += "n_z=" + str(self.n_z) + ", "
_cmd += "use_link=" + str(self.use_link)
_cmd += ")"
Gui.addModule("Draft")
_cmd_list = [
"_obj_ = " + _cmd,
"_obj_.Fuse = " + str(self.fuse),
"Draft.autogroup(_obj_)",
"App.ActiveDocument.recompute()",
]
# We commit the command list through the parent command
self.source_command.commit(translate("draft", "Orthogonal array"), _cmd_list)
def get_numbers(self):
"""Get the number of elements from the widgets."""
return (
self.form.spinbox_n_X.value(),
self.form.spinbox_n_Y.value(),
self.form.spinbox_n_Z.value(),
)
def get_intervals(self):
"""Get the interval vectors from the widgets."""
v_x_x_str = self.form.input_X_x.text()
v_x_y_str = self.form.input_X_y.text()
v_x_z_str = self.form.input_X_z.text()
v_x = App.Vector(
U.Quantity(v_x_x_str).Value,
U.Quantity(v_x_y_str).Value,
U.Quantity(v_x_z_str).Value,
)
v_y_x_str = self.form.input_Y_x.text()
v_y_y_str = self.form.input_Y_y.text()
v_y_z_str = self.form.input_Y_z.text()
v_y = App.Vector(
U.Quantity(v_y_x_str).Value,
U.Quantity(v_y_y_str).Value,
U.Quantity(v_y_z_str).Value,
)
v_z_x_str = self.form.input_Z_x.text()
v_z_y_str = self.form.input_Z_y.text()
v_z_z_str = self.form.input_Z_z.text()
v_z = App.Vector(
U.Quantity(v_z_x_str).Value,
U.Quantity(v_z_y_str).Value,
U.Quantity(v_z_z_str).Value,
)
return v_x, v_y, v_z
def reset_v(self, interval):
"""Reset the interval to zero distance.
Parameters
----------
interval: str
Either "X", "Y", "Z", to reset the interval vector
for that direction.
"""
if interval == "X":
self.form.input_X_x.setProperty("rawValue", 100)
self.form.input_X_y.setProperty("rawValue", 0)
self.form.input_X_z.setProperty("rawValue", 0)
self.v_x, self.v_y, self.v_z = self.get_intervals()
_msg(
translate("draft", "Interval X reset:")
+ " ({0}, {1}, {2})".format(self.v_x.x, self.v_x.y, self.v_x.z)
)
elif interval == "Y":
self.form.input_Y_x.setProperty("rawValue", 0)
self.form.input_Y_y.setProperty("rawValue", 100)
self.form.input_Y_z.setProperty("rawValue", 0)
self.v_x, self.v_y, self.v_z = self.get_intervals()
_msg(
translate("draft", "Interval Y reset:")
+ " ({0}, {1}, {2})".format(self.v_y.x, self.v_y.y, self.v_y.z)
)
elif interval == "Z":
self.form.input_Z_x.setProperty("rawValue", 0)
self.form.input_Z_y.setProperty("rawValue", 0)
self.form.input_Z_z.setProperty("rawValue", 100)
self.v_x, self.v_y, self.v_z = self.get_intervals()
_msg(
translate("draft", "Interval Z reset:")
+ " ({0}, {1}, {2})".format(self.v_z.x, self.v_z.y, self.v_z.z)
)
def print_fuse_state(self, fuse):
"""Print the fuse state translated."""
if fuse:
state = self.tr_true
else:
state = self.tr_false
_msg(translate("draft", "Fuse:") + " {}".format(state))
def set_fuse(self):
"""Execute as a callback when the fuse checkbox changes."""
self.fuse = self.form.checkbox_fuse.isChecked()
self.print_fuse_state(self.fuse)
utils.set_param("Draft_array_fuse", self.fuse)
def print_link_state(self, use_link):
"""Print the link state translated."""
if use_link:
state = self.tr_true
else:
state = self.tr_false
_msg(translate("draft", "Create Link array:") + " {}".format(state))
def set_link(self):
"""Execute as a callback when the link checkbox changes."""
self.use_link = self.form.checkbox_link.isChecked()
self.print_link_state(self.use_link)
utils.set_param("Draft_array_Link", self.use_link)
def print_messages(self):
"""Print messages about the operation."""
if len(self.selection) == 1:
sel_obj = self.selection[0]
else:
# TODO: this should handle multiple objects.
# For example, it could take the shapes of all objects,
# make a compound and then use it as input for the array function.
sel_obj = self.selection[0]
_msg(translate("draft", "Object:") + " {}".format(sel_obj.Label))
_msg(translate("draft", "Number of X elements:") + " {}".format(self.n_x))
_msg(
translate("draft", "Interval X:")
+ " ({0}, {1}, {2})".format(self.v_x.x, self.v_x.y, self.v_x.z)
)
_msg(translate("draft", "Number of Y elements:") + " {}".format(self.n_y))
_msg(
translate("draft", "Interval Y:")
+ " ({0}, {1}, {2})".format(self.v_y.x, self.v_y.y, self.v_y.z)
)
_msg(translate("draft", "Number of Z elements:") + " {}".format(self.n_z))
_msg(
translate("draft", "Interval Z:")
+ " ({0}, {1}, {2})".format(self.v_z.x, self.v_z.y, self.v_z.z)
)
self.print_fuse_state(self.fuse)
self.print_link_state(self.use_link)
def reject(self):
"""Execute when clicking the Cancel button or pressing Escape."""
_msg(
translate("draft", "Aborted:")
+ " {}".format(translate("draft", "Orthogonal array"))
)
self.finish()
def finish(self):
"""Finish the command, after accept or reject.
It finally calls the parent class to execute
the delayed functions, and perform cleanup.
"""
# App.ActiveDocument.commitTransaction()
Gui.ActiveDocument.resetEdit()
# Runs the parent command to complete the call
self.source_command.completed()
## @}
|
loaders | strict_https_loader | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com thumbor@googlegroups.com
from thumbor.loaders import http_loader
def _normalize_url(url):
url = http_loader.quote_url(url)
if url.startswith("http:"):
url = url.replace("http:", "https:", 1)
return url if url.startswith("https://") else f"https://{url}"
def validate(context, url):
if url.startswith("http://"):
return False
return http_loader.validate(context, url, normalize_url_func=_normalize_url)
def return_contents(response, url, context):
return http_loader.return_contents(response, url, context)
async def load(context, url):
return await http_loader.load(
context,
url,
normalize_url_func=_normalize_url,
return_contents_fn=return_contents,
encode_fn=encode,
)
def encode(string):
return http_loader.encode(string)
|
downloaders | TwoSharedCom | # -*- coding: utf-8 -*-
from ..base.simple_downloader import SimpleDownloader
class TwoSharedCom(SimpleDownloader):
__name__ = "TwoSharedCom"
__type__ = "downloader"
__version__ = "0.19"
__status__ = "testing"
__pattern__ = r"http://(?:www\.)?2shared\.com/(account/)?(download|get|file|document|photo|video|audio)/.+"
__config__ = [
("enabled", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("fallback", "bool", "Fallback to free download if premium fails", True),
("chk_filesize", "bool", "Check file size", True),
("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10),
]
__description__ = """2Shared.com downloader plugin"""
__license__ = "GPLv3"
__authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
NAME_PATTERN = r"<h1>(?P<N>.*)</h1>"
SIZE_PATTERN = (
r'<span class="dtitle">File size:</span>\s*(?P<S>[\d.,]+) (?P<U>[\w^_]+)'
)
OFFLINE_PATTERN = (
r"The file link that you requested is not valid\.|This file was deleted\."
)
LINK_FREE_PATTERN = r"window.location =\'(.+?)\';"
def setup(self):
self.resume_download = True
self.multi_dl = True
|
extractor | viu | # coding: utf-8
from __future__ import unicode_literals
import re
from ..compat import compat_kwargs, compat_str
from ..utils import ExtractorError, int_or_none
from .common import InfoExtractor
class ViuBaseIE(InfoExtractor):
def _real_initialize(self):
viu_auth_res = self._request_webpage(
"https://www.viu.com/api/apps/v2/authenticate",
None,
"Requesting Viu auth",
query={
"acct": "test",
"appid": "viu_desktop",
"fmt": "json",
"iid": "guest",
"languageid": "default",
"platform": "desktop",
"userid": "guest",
"useridtype": "guest",
"ver": "1.0",
},
headers=self.geo_verification_headers(),
)
self._auth_token = viu_auth_res.info()["X-VIU-AUTH"]
def _call_api(self, path, *args, **kwargs):
headers = self.geo_verification_headers()
headers.update({"X-VIU-AUTH": self._auth_token})
headers.update(kwargs.get("headers", {}))
kwargs["headers"] = headers
response = self._download_json(
"https://www.viu.com/api/" + path, *args, **compat_kwargs(kwargs)
)["response"]
if response.get("status") != "success":
raise ExtractorError(
"%s said: %s" % (self.IE_NAME, response["message"]), expected=True
)
return response
class ViuIE(ViuBaseIE):
_VALID_URL = r"(?:viu:|https?://[^/]+\.viu\.com/[a-z]{2}/media/)(?P<id>\d+)"
_TESTS = [
{
"url": "https://www.viu.com/en/media/1116705532?containerId=playlist-22168059",
"info_dict": {
"id": "1116705532",
"ext": "mp4",
"title": "Citizen Khan - Ep 1",
"description": "md5:d7ea1604f49e5ba79c212c551ce2110e",
},
"params": {
"skip_download": "m3u8 download",
},
"skip": "Geo-restricted to India",
},
{
"url": "https://www.viu.com/en/media/1130599965",
"info_dict": {
"id": "1130599965",
"ext": "mp4",
"title": "Jealousy Incarnate - Episode 1",
"description": "md5:d3d82375cab969415d2720b6894361e9",
},
"params": {
"skip_download": "m3u8 download",
},
"skip": "Geo-restricted to Indonesia",
},
{
"url": "https://india.viu.com/en/media/1126286865",
"only_matching": True,
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
video_data = self._call_api(
"clip/load",
video_id,
"Downloading video data",
query={"appid": "viu_desktop", "fmt": "json", "id": video_id},
)["item"][0]
title = video_data["title"]
m3u8_url = None
url_path = video_data.get("urlpathd") or video_data.get("urlpath")
tdirforwhole = video_data.get("tdirforwhole")
# #EXT-X-BYTERANGE is not supported by native hls downloader
# and ffmpeg (#10955)
# hls_file = video_data.get('hlsfile')
hls_file = video_data.get("jwhlsfile")
if url_path and tdirforwhole and hls_file:
m3u8_url = "%s/%s/%s" % (url_path, tdirforwhole, hls_file)
else:
# m3u8_url = re.sub(
# r'(/hlsc_)[a-z]+(\d+\.m3u8)',
# r'\1whe\2', video_data['href'])
m3u8_url = video_data["href"]
formats = self._extract_m3u8_formats(m3u8_url, video_id, "mp4")
self._sort_formats(formats)
subtitles = {}
for key, value in video_data.items():
mobj = re.match(r"^subtitle_(?P<lang>[^_]+)_(?P<ext>(vtt|srt))", key)
if not mobj:
continue
subtitles.setdefault(mobj.group("lang"), []).append(
{"url": value, "ext": mobj.group("ext")}
)
return {
"id": video_id,
"title": title,
"description": video_data.get("description"),
"series": video_data.get("moviealbumshowname"),
"episode": title,
"episode_number": int_or_none(video_data.get("episodeno")),
"duration": int_or_none(video_data.get("duration")),
"formats": formats,
"subtitles": subtitles,
}
class ViuPlaylistIE(ViuBaseIE):
IE_NAME = "viu:playlist"
_VALID_URL = r"https?://www\.viu\.com/[^/]+/listing/playlist-(?P<id>\d+)"
_TEST = {
"url": "https://www.viu.com/en/listing/playlist-22461380",
"info_dict": {
"id": "22461380",
"title": "The Good Wife",
},
"playlist_count": 16,
"skip": "Geo-restricted to Indonesia",
}
def _real_extract(self, url):
playlist_id = self._match_id(url)
playlist_data = self._call_api(
"container/load",
playlist_id,
"Downloading playlist info",
query={
"appid": "viu_desktop",
"fmt": "json",
"id": "playlist-" + playlist_id,
},
)["container"]
entries = []
for item in playlist_data.get("item", []):
item_id = item.get("id")
if not item_id:
continue
item_id = compat_str(item_id)
entries.append(self.url_result("viu:" + item_id, "Viu", item_id))
return self.playlist_result(entries, playlist_id, playlist_data.get("title"))
class ViuOTTIE(InfoExtractor):
IE_NAME = "viu:ott"
_VALID_URL = r"https?://(?:www\.)?viu\.com/ott/(?P<country_code>[a-z]{2})/[a-z]{2}-[a-z]{2}/vod/(?P<id>\d+)"
_TESTS = [
{
"url": "http://www.viu.com/ott/sg/en-us/vod/3421/The%20Prime%20Minister%20and%20I",
"info_dict": {
"id": "3421",
"ext": "mp4",
"title": "A New Beginning",
"description": "md5:1e7486a619b6399b25ba6a41c0fe5b2c",
},
"params": {
"skip_download": "m3u8 download",
},
"skip": "Geo-restricted to Singapore",
},
{
"url": "http://www.viu.com/ott/hk/zh-hk/vod/7123/%E5%A4%A7%E4%BA%BA%E5%A5%B3%E5%AD%90",
"info_dict": {
"id": "7123",
"ext": "mp4",
"title": "這就是我的生活之道",
"description": "md5:4eb0d8b08cf04fcdc6bbbeb16043434f",
},
"params": {
"skip_download": "m3u8 download",
},
"skip": "Geo-restricted to Hong Kong",
},
]
_AREA_ID = {
"HK": 1,
"SG": 2,
"TH": 4,
"PH": 5,
}
def _real_extract(self, url):
country_code, video_id = re.match(self._VALID_URL, url).groups()
query = {
"r": "vod/ajax-detail",
"platform_flag_label": "web",
"product_id": video_id,
}
area_id = self._AREA_ID.get(country_code.upper())
if area_id:
query["area_id"] = area_id
product_data = self._download_json(
"http://www.viu.com/ott/%s/index.php" % country_code,
video_id,
"Downloading video info",
query=query,
)["data"]
video_data = product_data.get("current_product")
if not video_data:
raise ExtractorError(
"This video is not available in your region.", expected=True
)
stream_data = self._download_json(
"https://d1k2us671qcoau.cloudfront.net/distribute_web_%s.php"
% country_code,
video_id,
"Downloading stream info",
query={
"ccs_product_id": video_data["ccs_product_id"],
},
headers={
"Referer": url,
"Origin": re.search(r"https?://[^/]+", url).group(0),
},
)["data"]["stream"]
stream_sizes = stream_data.get("size", {})
formats = []
for vid_format, stream_url in stream_data.get("url", {}).items():
height = int_or_none(
self._search_regex(r"s(\d+)p", vid_format, "height", default=None)
)
formats.append(
{
"format_id": vid_format,
"url": stream_url,
"height": height,
"ext": "mp4",
"filesize": int_or_none(stream_sizes.get(vid_format)),
}
)
self._sort_formats(formats)
subtitles = {}
for sub in video_data.get("subtitle", []):
sub_url = sub.get("url")
if not sub_url:
continue
subtitles.setdefault(sub.get("name"), []).append(
{
"url": sub_url,
"ext": "srt",
}
)
title = video_data["synopsis"].strip()
return {
"id": video_id,
"title": title,
"description": video_data.get("description"),
"series": product_data.get("series", {}).get("name"),
"episode": title,
"episode_number": int_or_none(video_data.get("number")),
"duration": int_or_none(stream_data.get("duration")),
"thumbnail": video_data.get("cover_image_url"),
"formats": formats,
"subtitles": subtitles,
}
|
builtinContextMenus | moduleFill | import gui.fitCommands as cmd
import gui.mainFrame
import wx
from gui.contextMenu import ContextMenuSingle
from service.fit import Fit
_t = wx.GetTranslation
class FillWithModule(ContextMenuSingle):
visibilitySetting = "moduleFill"
def __init__(self):
self.mainFrame = gui.mainFrame.MainFrame.getInstance()
def display(self, callingWindow, srcContext, mainItem):
if mainItem is None or getattr(mainItem, "isEmpty", False):
return False
return srcContext == "fittingModule"
def getText(self, callingWindow, itmContext, mainItem):
return _t("Fill With {0}").format(
itmContext if itmContext is not None else _t("Module")
)
def activate(self, callingWindow, fullContext, mainItem, i):
srcContext = fullContext[0]
fitID = self.mainFrame.getActiveFit()
if srcContext == "fittingModule":
fit = Fit.getInstance().getFit(fitID)
if mainItem in fit.modules:
position = fit.modules.index(mainItem)
self.mainFrame.command.Submit(
cmd.GuiFillWithClonedLocalModulesCommand(
fitID=fitID, position=position
)
)
FillWithModule.register()
|
formats | midi | # -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2018, 2020-2022 Philipp Wolfer
# Copyright (C) 2020-2021 Laurent Monin
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from mutagen.smf import SMF
from picard import log
from picard.file import File
from picard.metadata import Metadata
from picard.util import encode_filename
class MIDIFile(File):
EXTENSIONS = [".mid", ".kar"]
NAME = "Standard MIDI File"
_File = SMF
def _load(self, filename):
log.debug("Loading file %r", filename)
metadata = Metadata()
file = self._File(encode_filename(filename))
self._info(metadata, file)
return metadata
def _save(self, filename, metadata):
log.debug("Saving file %r", filename)
def _info(self, metadata, file):
super()._info(metadata, file)
# mutagen.File.filename can be either a bytes or str object
filename = file.filename
if isinstance(filename, bytes):
filename = filename.decode()
if filename.lower().endswith(".kar"):
metadata["~format"] = "Standard MIDI File (Karaoke File)"
@classmethod
def supports_tag(cls, name):
return False
def can_analyze(self):
return False
|
qltk | tracknumbers | # Copyright 2004-2005 Joe Wreschnig, Michael Urman, Iñigo Serna
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
from gi.repository import Gtk
from quodlibet import _, qltk, util
from quodlibet.formats import AudioFileError
from quodlibet.qltk import Icons
from quodlibet.qltk._editutils import OverwriteWarning, WriteFailedError
from quodlibet.qltk.models import ObjectStore
from quodlibet.qltk.views import HintedTreeView, TreeViewColumn
from quodlibet.qltk.wlw import WritingWindow
from quodlibet.qltk.x import Align, Button
from quodlibet.util import connect_obj
from senf import fsn2text
class Entry:
def __init__(self, song):
self.song = song
self.tracknumber = song("tracknumber")
@property
def name(self):
return fsn2text(self.song("~basename"))
class TrackNumbers(Gtk.VBox):
def __init__(self, prop, library):
super().__init__(spacing=6)
self.title = _("Track Numbers")
self.set_border_width(12)
label_start = Gtk.Label(label=_("Start fro_m:"), halign=Gtk.Align.END)
label_start.set_use_underline(True)
spin_start = Gtk.SpinButton()
spin_start.set_range(0, 999)
spin_start.set_increments(1, 10)
spin_start.set_value(1)
label_start.set_mnemonic_widget(spin_start)
label_total = Gtk.Label(label=_("_Total tracks:"), halign=Gtk.Align.END)
label_total.set_use_underline(True)
spin_total = Gtk.SpinButton()
spin_total.set_range(0, 999)
spin_total.set_increments(1, 10)
label_total.set_mnemonic_widget(spin_total)
preview = qltk.Button(_("_Preview"), Icons.VIEW_REFRESH)
grid = Gtk.Grid(row_spacing=4, column_spacing=4)
grid.add(label_start)
grid.attach_next_to(spin_start, label_start, Gtk.PositionType.RIGHT, 1, 1)
grid.attach_next_to(label_total, label_start, Gtk.PositionType.BOTTOM, 1, 1)
grid.attach_next_to(spin_total, label_total, Gtk.PositionType.RIGHT, 1, 1)
grid.attach_next_to(
Align(preview, halign=Gtk.Align.END),
spin_start,
Gtk.PositionType.RIGHT,
1,
1,
)
preview.props.hexpand = True
model = ObjectStore()
view = HintedTreeView(model=model)
self.pack_start(grid, False, True, 0)
render = Gtk.CellRendererText()
column = TreeViewColumn(title=_("File"))
column.pack_start(render, True)
column.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
def cell_data_file(column, cell, model, iter_, data):
entry = model.get_value(iter_)
cell.set_property("text", entry.name)
column.set_cell_data_func(render, cell_data_file)
view.append_column(column)
render = Gtk.CellRendererText()
render.set_property("editable", True)
column = TreeViewColumn(title=_("Track"))
column.pack_start(render, True)
column.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
def cell_data_track(column, cell, model, iter_, data):
entry = model.get_value(iter_)
cell.set_property("text", entry.tracknumber)
column.set_cell_data_func(render, cell_data_track)
view.append_column(column)
view.set_reorderable(True)
w = Gtk.ScrolledWindow()
w.set_shadow_type(Gtk.ShadowType.IN)
w.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
w.add(view)
self.pack_start(w, True, True, 0)
bbox = Gtk.HButtonBox()
bbox.set_spacing(6)
bbox.set_layout(Gtk.ButtonBoxStyle.END)
save = Button(_("_Save"), Icons.DOCUMENT_SAVE)
self.save = save
connect_obj(save, "clicked", self.__save_files, prop, model, library)
revert = Button(_("_Revert"), Icons.DOCUMENT_REVERT)
self.revert = revert
bbox.pack_start(revert, True, True, 0)
bbox.pack_start(save, True, True, 0)
self.pack_start(bbox, False, True, 0)
preview_args = [spin_start, spin_total, model, save, revert]
preview.connect("clicked", self.__preview_tracks, *preview_args)
connect_obj(revert, "clicked", self.__update, None, *preview_args[1:])
spin_total.connect("value-changed", self.__preview_tracks, *preview_args)
spin_start.connect("value-changed", self.__preview_tracks, *preview_args)
connect_obj(
view, "drag-end", self.__class__.__preview_tracks, self, *preview_args
)
render.connect("edited", self.__row_edited, model, preview, save)
connect_obj(
prop,
"changed",
self.__class__.__update,
self,
spin_total,
model,
save,
revert,
)
for child in self.get_children():
child.show_all()
def __row_edited(self, render, path, new, model, preview, save):
path = Gtk.TreePath.new_from_string(path)
row = model[path]
entry = row[0]
if entry.tracknumber != new:
entry.tracknumber = new
preview.set_sensitive(True)
save.set_sensitive(True)
model.path_changed(path)
def __save_files(self, parent, model, library):
win = WritingWindow(parent, len(model))
was_changed = set()
all_done = False
for entry in model.values():
song, track = entry.song, entry.tracknumber
if song.get("tracknumber") == track:
win.step()
continue
if not song.valid():
win.hide()
dialog = OverwriteWarning(self, song)
resp = dialog.run()
win.show()
if resp != OverwriteWarning.RESPONSE_SAVE:
break
song["tracknumber"] = track
try:
song.write()
except AudioFileError:
util.print_exc()
WriteFailedError(self, song).run()
library.reload(song, changed=was_changed)
break
was_changed.add(song)
if win.step():
break
else:
all_done = True
library.changed(was_changed)
win.destroy()
self.save.set_sensitive(not all_done)
self.revert.set_sensitive(not all_done)
def __preview_tracks(self, ctx, start, total, model, save, revert):
start = start.get_value_as_int()
total = total.get_value_as_int()
for row in model:
if total:
s = "%d/%d" % (row.path.get_indices()[0] + start, total)
else:
s = str(row.path.get_indices()[0] + start)
entry = row[0]
entry.tracknumber = s
model.row_changed(row.path, row.iter)
save.set_sensitive(True)
revert.set_sensitive(True)
def __update(self, songs, total, model, save, revert):
if songs is None:
songs = [e.song for e in model.values()]
else:
songs = list(songs)
def sort_key(song):
return song("~#track", 0), song("~basename"), song
songs.sort(key=sort_key)
model.clear()
total.set_value(len(songs))
for song in songs:
if not song.can_change("tracknumber"):
self.set_sensitive(False)
break
else:
self.set_sensitive(True)
for song in songs:
model.append([Entry(song)])
save.set_sensitive(False)
revert.set_sensitive(False)
|
deposit | forms | # -*- encoding: utf-8 -*-
# Dissemin: open access policy enforcement tool
# Copyright (C) 2014 Antonin Delpeuch
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
from itertools import groupby
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
from deposit.declaration import REGISTERED_DECLARATION_FUNCTIONS
from deposit.models import Repository, UserPreferences
from deposit.registry import protocol_registry
from django import forms
from django.urls import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from papers.models import UPLOAD_TYPE_CHOICES
from upload.models import UploadedPDF
from website.widgets import OIDatePicker
class ModelGroupedMultipleChoiceField(forms.models.ModelMultipleChoiceField):
"""
Enabled groups for ModelMultipleChoiceField
"""
def __init__(self, group_by_field, group_label=None, *args, **kwargs):
"""
:param group_by_field: name of a field on the model to use for grouping
:param group_label: function to return a label for each choice group
"""
super().__init__(*args, **kwargs)
self.group_by_field = group_by_field
if group_label is None:
self.group_label = lambda group: group
else:
self.group_label = group_label
def _get_choices(self):
"""
Exactly as per ModelChoiceField except returns new iterator class
"""
if hasattr(self, "_choices"):
return self._choices
return ModelGroupedChoiceIterator(self)
choices = property(_get_choices, forms.models.ModelMultipleChoiceField._set_choices)
class ModelGroupedChoiceIterator(forms.models.ModelChoiceIterator):
"""
Iterator for ModelGroupedChoiceField
"""
def __iter__(self):
if self.field.empty_label is not None:
yield self.field.empty_label
for group, choices in groupby(
self.queryset.all(), key=lambda row: getattr(row, self.field.group_by_field)
):
if group is not None:
yield (
self.field.group_label(group),
[self.choice(ch) for ch in choices],
)
class PaperDepositForm(forms.Form):
"""
Main form for the deposit.
It references both the file (as an ID) and the upload type.
"""
file_id = forms.IntegerField()
radioUploadType = forms.ChoiceField(
choices=UPLOAD_TYPE_CHOICES,
label=_("Upload type"),
widget=forms.RadioSelect,
)
radioRepository = forms.ModelChoiceField(
label=_("Repository"), queryset=Repository.objects.filter(enabled=True)
)
def clean_file_id(self):
file_id = self.cleaned_data["file_id"]
try:
uploadedPDF = UploadedPDF.objects.get(pk=file_id)
except UploadedPDF.NotFound:
raise forms.ValidationError(
_("Invalid full text identifier."), code="invalid_file_id"
)
return uploadedPDF
def wrap_with_prefetch_status(baseWidget, callback, fieldname):
"""
Add a status text above the widget to display the prefetching status
of the data in the field.
:param baseWidget: the :class:`Widget` to be prefetched: the prefetching
status will be displayed above that widget and its value will
be set by the JS code
:param get_callback: function returning the AJAX URL where to get
the prefetching status from. This is a callback and not a plain
string for technical reasons (the URL cannot be computed before
Django is fully loaded).
:param fieldname: The name of the field to be prefetched, passed
to the AJAX callback.
"""
orig_render = baseWidget.render
def new_render(self, name, value, attrs=None, renderer=None):
base_html = orig_render(self, name, value, attrs, renderer)
if value:
return base_html
return (
'<span class="prefetchingFieldStatus" data-callback="%s" data-fieldid="%s" data-fieldname="%s" data-objfieldname="%s"></span>'
% (callback, attrs["id"], name, fieldname)
) + base_html
baseWidget.render = new_render
return baseWidget
class BaseMetadataForm(forms.Form):
"""
Base form for repository-specific options and metadata. Protocols can subclass this form and add or remove fields.
"""
field_order = ["abstract", "ddc", "license"]
# Dummy field to store the paper id (required for dynamic fetching of the abstract)
paper_id = forms.IntegerField(required=False, widget=forms.HiddenInput)
# Abstract field to
abstract = forms.CharField(
label=_("Abstract"),
widget=wrap_with_prefetch_status(
forms.Textarea, reverse_lazy("ajax-waitForConsolidatedField"), "paper_id"
)(attrs={"class": "form-control"}),
)
# DDC field to choose DDC classes
ddc = ModelGroupedMultipleChoiceField(
label=_("Dewey Decimal Class"),
queryset=None,
group_by_field="parent",
widget=forms.SelectMultiple,
)
embargo = forms.DateField(
label=_("Do not publish before"),
widget=OIDatePicker(),
)
# License field to choose license
license = forms.ModelChoiceField(
label=_("License"),
queryset=None,
empty_label=None,
initial=None,
widget=forms.RadioSelect,
)
def __init__(self, **kwargs):
"""
Subclasses can reimplement this and do things based on the models passed or generally add or remove fields.
The paper_id field is not filled here, because that should only happen when filling the form with initial data.
"""
abstract_required = kwargs.pop("abstract_required", True)
ddcs = kwargs.pop("ddcs", None)
embargo = kwargs.pop("embargo", None)
licenses = kwargs.pop("licenses", None)
super(BaseMetadataForm, self).__init__(**kwargs)
# Mark abstract as required or not
self.fields["abstract"].required = abstract_required
# If no DDC for repository choosen, then delete field from form
if ddcs is None:
del self.fields["ddc"]
else:
self.fields["ddc"].queryset = ddcs
# Handle embargo field
if embargo == "required":
self.fields["embargo"].required = True
elif embargo == "optional":
self.fields["embargo"].required = False
else:
del self.fields["embargo"]
# If no licenses for repository choosen, then delete field from form
if licenses is None:
del self.fields["license"]
else:
self.fields["license"].queryset = licenses
### Form for global preferences ###
class PreferredRepositoryField(forms.ModelChoiceField):
queryset = Repository.objects.filter(enabled=True)
def __init__(self, *args, **kwargs):
kwargs["empty_label"] = _("No preferred repository")
kwargs["queryset"] = Repository.objects.filter(enabled=True)
super(PreferredRepositoryField, self).__init__(*args, **kwargs)
class UserPreferencesForm(forms.ModelForm):
class Meta:
model = UserPreferences
fields = ["email", "preferred_repository"]
widgets = {
"preferred_repository": forms.RadioSelect(attrs={"class": "radio-margin"}),
}
field_classes = {
"preferred_repository": PreferredRepositoryField,
}
labels = {
"email": _("E-mail"),
"preferred_repository": _("Preferred repository"),
}
def __init__(self, *args, **kwargs):
super(UserPreferencesForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
# self.helper.form_class = 'form-horizontal'
# self.helper.label_class = 'col-lg-2'
# self.helper.field_class = 'col-lg-8'
self.helper.add_input(
Submit("submit", _("Save")),
)
class LetterOfDeclarationAdminForm(forms.ModelForm):
"""
We change here the widget for choosing a function that generates a letter of declaration
Instead of a free text field, we use a dropdown with a list of function keys (i.e. human readable names)
"""
def __init__(self, *args, **kwargs):
"""
Letter of Declaration
Get the list of user friendly names of the generating functions. We need them as tuple for djangos choices
"""
super().__init__(*args, **kwargs)
choices = [("", "--- None ---")] + sorted(
[(value, value) for value in REGISTERED_DECLARATION_FUNCTIONS],
key=lambda item: item[1],
)
self.fields["function_key"].widget = forms.Select(choices=choices)
class RepositoryAdminForm(forms.ModelForm):
"""
We change here to widgets for chosing the Protocol and the Declaration.
Instead of free text we provide a dropdown.
"""
def __init__(self, *args, **kwargs):
"""
We change the widget of the fields
We get the original form, register all repositories, create the list of protocols.
If a repo exists, we check that its protocol will be in the list. Otherwise the protocol of a repo with a currently not registered protocol would be overwritten.
"""
super().__init__(*args, **kwargs)
# Protocol
# Get the list with names of protocols
protocol_registry.load()
choices = [(key, str(value)) for key, value in protocol_registry.dct.items()]
# If the repo uses a protocol not in the list, we add this value, otherwise it is overriden on saving
if self.instance.protocol:
if self.instance.protocol not in protocol_registry.dct.keys():
choices += [(self.instance.protocol, self.instance.protocol)]
# Sort and populate the form
choices = sorted(
choices,
key=lambda protocol: protocol[1].lower(),
)
self.fields["protocol"].widget = forms.Select(choices=choices)
|
misc | sql | # SPDX-FileCopyrightText: Ryan Roden-Corrent (rcorre) <ryan@rcorre.net>
#
# SPDX-License-Identifier: GPL-3.0-or-later
"""Provides access to sqlite databases."""
import collections
import contextlib
import dataclasses
import enum
import types
from typing import (
Any,
Dict,
Iterator,
List,
Mapping,
MutableSequence,
Optional,
Type,
Union,
)
from qutebrowser.qt import machinery, sip
from qutebrowser.qt.core import QObject, pyqtSignal
from qutebrowser.qt.sql import QSqlDatabase, QSqlError, QSqlQuery
from qutebrowser.utils import debug, log
@dataclasses.dataclass
class UserVersion:
"""The version of data stored in the history database.
When we originally started using user_version, we only used it to signify that the
completion database should be regenerated. However, sometimes there are
backwards-incompatible changes.
Instead, we now (ab)use the fact that the user_version in sqlite is a 32-bit integer
to store both a major and a minor part. If only the minor part changed, we can deal
with it (there are only new URLs to clean up or somesuch). If the major part
changed, there are backwards-incompatible changes in how the database works, so
newer databases are not compatible with older qutebrowser versions.
"""
major: int
minor: int
@classmethod
def from_int(cls, num: int) -> "UserVersion":
"""Parse a number from sqlite into a major/minor user version."""
assert 0 <= num <= 0x7FFF_FFFF, num # signed integer, but shouldn't be negative
major = (num & 0x7FFF_0000) >> 16
minor = num & 0x0000_FFFF
return cls(major, minor)
def to_int(self) -> int:
"""Get a sqlite integer from a major/minor user version."""
assert 0 <= self.major <= 0x7FFF # signed integer
assert 0 <= self.minor <= 0xFFFF
return self.major << 16 | self.minor
def __str__(self) -> str:
return f"{self.major}.{self.minor}"
class SqliteErrorCode(enum.Enum):
"""Primary error codes as used by sqlite.
See https://sqlite.org/rescode.html
"""
# pylint: disable=invalid-name
OK = 0 # Successful result
ERROR = 1 # Generic error
INTERNAL = 2 # Internal logic error in SQLite
PERM = 3 # Access permission denied
ABORT = 4 # Callback routine requested an abort
BUSY = 5 # The database file is locked
LOCKED = 6 # A table in the database is locked
NOMEM = 7 # A malloc() failed
READONLY = 8 # Attempt to write a readonly database
INTERRUPT = 9 # Operation terminated by sqlite3_interrupt()*/
IOERR = 10 # Some kind of disk I/O error occurred
CORRUPT = 11 # The database disk image is malformed
NOTFOUND = 12 # Unknown opcode in sqlite3_file_control()
FULL = 13 # Insertion failed because database is full
CANTOPEN = 14 # Unable to open the database file
PROTOCOL = 15 # Database lock protocol error
EMPTY = 16 # Internal use only
SCHEMA = 17 # The database schema changed
TOOBIG = 18 # String or BLOB exceeds size limit
CONSTRAINT = 19 # Abort due to constraint violation
MISMATCH = 20 # Data type mismatch
MISUSE = 21 # Library used incorrectly
NOLFS = 22 # Uses OS features not supported on host
AUTH = 23 # Authorization denied
FORMAT = 24 # Not used
RANGE = 25 # 2nd parameter to sqlite3_bind out of range
NOTADB = 26 # File opened that is not a database file
NOTICE = 27 # Notifications from sqlite3_log()
WARNING = 28 # Warnings from sqlite3_log()
ROW = 100 # sqlite3_step() has another row ready
DONE = 101 # sqlite3_step() has finished executing
class Error(Exception):
"""Base class for all SQL related errors."""
def __init__(self, msg: str, error: Optional[QSqlError] = None) -> None:
super().__init__(msg)
self.error = error
def text(self) -> str:
"""Get a short text description of the error.
This is a string suitable to show to the user as error message.
"""
if self.error is None:
return str(self)
return self.error.databaseText()
class KnownError(Error):
"""Raised on an error interacting with the SQL database.
This is raised in conditions resulting from the environment (like a full
disk or I/O errors), where qutebrowser isn't to blame.
"""
class BugError(Error):
"""Raised on an error interacting with the SQL database.
This is raised for errors resulting from a qutebrowser bug.
"""
def raise_sqlite_error(msg: str, error: QSqlError) -> None:
"""Raise either a BugError or KnownError."""
error_code = error.nativeErrorCode()
primary_error_code: Union[SqliteErrorCode, str]
try:
# https://sqlite.org/rescode.html#pve
primary_error_code = SqliteErrorCode(int(error_code) & 0xFF)
except ValueError:
# not an int, or unknown error code -> fall back to string
primary_error_code = error_code
database_text = error.databaseText()
driver_text = error.driverText()
log.sql.debug("SQL error:")
log.sql.debug(f"type: {debug.qenum_key(QSqlError, error.type())}")
log.sql.debug(f"database text: {database_text}")
log.sql.debug(f"driver text: {driver_text}")
log.sql.debug(f"error code: {error_code} -> {primary_error_code}")
known_errors = [
SqliteErrorCode.BUSY,
SqliteErrorCode.READONLY,
SqliteErrorCode.IOERR,
SqliteErrorCode.CORRUPT,
SqliteErrorCode.FULL,
SqliteErrorCode.CANTOPEN,
SqliteErrorCode.PROTOCOL,
SqliteErrorCode.NOTADB,
]
# https://github.com/qutebrowser/qutebrowser/issues/4681
# If the query we built was too long
too_long_err = primary_error_code == SqliteErrorCode.ERROR and (
database_text.startswith("Expression tree is too large")
or database_text
in ["too many SQL variables", "LIKE or GLOB pattern too complex"]
)
if primary_error_code in known_errors or too_long_err:
raise KnownError(msg, error)
raise BugError(msg, error)
class Database:
"""A wrapper over a QSqlDatabase connection."""
_USER_VERSION = UserVersion(0, 4) # The current / newest user version
def __init__(self, path: str) -> None:
if QSqlDatabase.database(path).isValid():
raise BugError(f'A connection to the database at "{path}" already exists')
self._path = path
database = QSqlDatabase.addDatabase("QSQLITE", path)
if not database.isValid():
raise KnownError(
"Failed to add database. Are sqlite and Qt sqlite " "support installed?"
)
database.setDatabaseName(path)
if not database.open():
error = database.lastError()
msg = f"Failed to open sqlite database at {path}: {error.text()}"
raise_sqlite_error(msg, error)
version_int = self.query("pragma user_version").run().value()
self._user_version = UserVersion.from_int(version_int)
if self._user_version.major > self._USER_VERSION.major:
raise KnownError(
"Database is too new for this qutebrowser version (database version "
f"{self._user_version}, but {self._USER_VERSION.major}.x is supported)"
)
if self.user_version_changed():
# Enable write-ahead-logging and reduce disk write frequency
# see https://sqlite.org/pragma.html and issues #2930 and #3507
#
# We might already have done this (without a migration) in earlier versions,
# but as those are idempotent, let's make sure we run them once again.
self.query("PRAGMA journal_mode=WAL").run()
self.query("PRAGMA synchronous=NORMAL").run()
def qt_database(self) -> QSqlDatabase:
"""Return the wrapped QSqlDatabase instance."""
database = QSqlDatabase.database(self._path, open=True)
if not database.isValid():
raise BugError(
"Failed to get connection. Did you close() this Database " "instance?"
)
return database
def query(self, querystr: str, forward_only: bool = True) -> "Query":
"""Return a Query instance linked to this Database."""
return Query(self, querystr, forward_only)
def table(
self,
name: str,
fields: List[str],
constraints: Optional[Dict[str, str]] = None,
parent: Optional[QObject] = None,
) -> "SqlTable":
"""Return a SqlTable instance linked to this Database."""
return SqlTable(self, name, fields, constraints, parent)
def user_version_changed(self) -> bool:
"""Whether the version stored in the database differs from the current one."""
return self._user_version != self._USER_VERSION
def upgrade_user_version(self) -> None:
"""Upgrade the user version to the latest version.
This method should be called once all required operations to migrate from one
version to another have been run.
"""
log.sql.debug(
f"Migrating from version {self._user_version} " f"to {self._USER_VERSION}"
)
self.query(f"PRAGMA user_version = {self._USER_VERSION.to_int()}").run()
self._user_version = self._USER_VERSION
def close(self) -> None:
"""Close the SQL connection."""
database = self.qt_database()
database.close()
sip.delete(database)
QSqlDatabase.removeDatabase(self._path)
def transaction(self) -> "Transaction":
"""Return a Transaction object linked to this Database."""
return Transaction(self)
class Transaction(contextlib.AbstractContextManager): # type: ignore[type-arg]
"""A Database transaction that can be used as a context manager."""
def __init__(self, database: Database) -> None:
self._database = database
def __enter__(self) -> None:
log.sql.debug("Starting a transaction")
db = self._database.qt_database()
ok = db.transaction()
if not ok:
error = db.lastError()
msg = f'Failed to start a transaction: "{error.text()}"'
raise_sqlite_error(msg, error)
def __exit__(
self,
_exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
_exc_tb: Optional[types.TracebackType],
) -> None:
db = self._database.qt_database()
if exc_val:
log.sql.debug("Rolling back a transaction")
db.rollback()
else:
log.sql.debug("Committing a transaction")
ok = db.commit()
if not ok:
error = db.lastError()
msg = f'Failed to commit a transaction: "{error.text()}"'
raise_sqlite_error(msg, error)
class Query:
"""A prepared SQL query."""
def __init__(
self, database: Database, querystr: str, forward_only: bool = True
) -> None:
"""Prepare a new SQL query.
Args:
database: The Database object on which to operate.
querystr: String to prepare query from.
forward_only: Optimization for queries that will only step forward.
Must be false for completion queries.
"""
self._database = database
self.query = QSqlQuery(database.qt_database())
log.sql.vdebug(f"Preparing: {querystr}") # type: ignore[attr-defined]
ok = self.query.prepare(querystr)
self._check_ok("prepare", ok)
self.query.setForwardOnly(forward_only)
self._placeholders: List[str] = []
def __iter__(self) -> Iterator[Any]:
if not self.query.isActive():
raise BugError("Cannot iterate inactive query")
rec = self.query.record()
fields = [rec.fieldName(i) for i in range(rec.count())]
rowtype = collections.namedtuple( # type: ignore[misc]
"ResultRow", fields
)
while self.query.next():
rec = self.query.record()
yield rowtype(*[rec.value(i) for i in range(rec.count())])
def _check_ok(self, step: str, ok: bool) -> None:
if not ok:
query = self.query.lastQuery()
error = self.query.lastError()
msg = f'Failed to {step} query "{query}": "{error.text()}"'
raise_sqlite_error(msg, error)
def _validate_bound_values(self) -> None:
"""Make sure all placeholders are bound."""
qt_bound_values = self.query.boundValues()
if machinery.IS_QT5:
# Qt 5: Returns a dict
values = list(qt_bound_values.values())
else:
# Qt 6: Returns a list
values = qt_bound_values
if None in values:
raise BugError("Missing bound values!")
def _bind_values(self, values: Mapping[str, Any]) -> Dict[str, Any]:
self._placeholders = list(values)
for key, val in values.items():
self.query.bindValue(f":{key}", val)
self._validate_bound_values()
return self.bound_values()
def run(self, **values: Any) -> "Query":
"""Execute the prepared query."""
log.sql.debug(self.query.lastQuery())
bound_values = self._bind_values(values)
if bound_values:
log.sql.debug(f" {bound_values}")
ok = self.query.exec()
self._check_ok("exec", ok)
return self
def run_batch(self, values: Mapping[str, MutableSequence[Any]]) -> None:
"""Execute the query in batch mode."""
log.sql.debug(f'Running SQL query (batch): "{self.query.lastQuery()}"')
self._bind_values(values)
db = self._database.qt_database()
ok = db.transaction()
self._check_ok("transaction", ok)
ok = self.query.execBatch()
try:
self._check_ok("execBatch", ok)
except Error:
# Not checking the return value here, as we're failing anyways...
db.rollback()
raise
ok = db.commit()
self._check_ok("commit", ok)
def value(self) -> Any:
"""Return the result of a single-value query (e.g. an EXISTS)."""
if not self.query.next():
raise BugError("No result for single-result query")
return self.query.record().value(0)
def rows_affected(self) -> int:
"""Return how many rows were affected by a non-SELECT query."""
assert not self.query.isSelect(), self
assert self.query.isActive(), self
rows = self.query.numRowsAffected()
assert rows != -1
return rows
def bound_values(self) -> Dict[str, Any]:
return {
f":{key}": self.query.boundValue(f":{key}") for key in self._placeholders
}
class SqlTable(QObject):
"""Interface to a SQL table.
Attributes:
_name: Name of the SQL table this wraps.
database: The Database to which this table belongs.
Signals:
changed: Emitted when the table is modified.
"""
changed = pyqtSignal()
database: Database
def __init__(
self,
database: Database,
name: str,
fields: List[str],
constraints: Optional[Dict[str, str]] = None,
parent: Optional[QObject] = None,
) -> None:
"""Wrapper over a table in the SQL database.
Args:
database: The Database to which this table belongs.
name: Name of the table.
fields: A list of field names.
constraints: A dict mapping field names to constraint strings.
"""
super().__init__(parent)
self._name = name
self.database = database
self._create_table(fields, constraints)
def _create_table(
self,
fields: List[str],
constraints: Optional[Dict[str, str]],
*,
force: bool = False,
) -> None:
"""Create the table if the database is uninitialized.
If the table already exists, this does nothing (except with force=True), so it
can e.g. be called on every user_version change.
"""
if not self.database.user_version_changed() and not force:
return
constraints = constraints or {}
column_defs = [f'{field} {constraints.get(field, "")}' for field in fields]
q = self.database.query(
f"CREATE TABLE IF NOT EXISTS {self._name} ({', '.join(column_defs)})"
)
q.run()
def create_index(self, name: str, field: str) -> None:
"""Create an index over this table if the database is uninitialized.
Args:
name: Name of the index, should be unique.
field: Name of the field to index.
"""
if not self.database.user_version_changed():
return
q = self.database.query(
f"CREATE INDEX IF NOT EXISTS {name} ON {self._name} ({field})"
)
q.run()
def __iter__(self) -> Iterator[Any]:
"""Iterate rows in the table."""
q = self.database.query(f"SELECT * FROM {self._name}")
q.run()
return iter(q)
def contains_query(self, field: str) -> Query:
"""Return a prepared query that checks for the existence of an item.
Args:
field: Field to match.
"""
return self.database.query(
f"SELECT EXISTS(SELECT * FROM {self._name} WHERE {field} = :val)"
)
def __len__(self) -> int:
"""Return the count of rows in the table."""
q = self.database.query(f"SELECT count(*) FROM {self._name}")
q.run()
return q.value()
def __bool__(self) -> bool:
"""Check whether there's any data in the table."""
q = self.database.query(f"SELECT 1 FROM {self._name} LIMIT 1")
q.run()
return q.query.next()
def delete(self, field: str, value: Any) -> None:
"""Remove all rows for which `field` equals `value`.
Args:
field: Field to use as the key.
value: Key value to delete.
"""
q = self.database.query(f"DELETE FROM {self._name} where {field} = :val")
q.run(val=value)
if not q.rows_affected():
raise KeyError(f"No row with {field} = {value!r}")
self.changed.emit()
def _insert_query(self, values: Mapping[str, Any], replace: bool) -> Query:
params = ", ".join(f":{key}" for key in values)
columns = ", ".join(values)
verb = "REPLACE" if replace else "INSERT"
return self.database.query(
f"{verb} INTO {self._name} ({columns}) values({params})"
)
def insert(self, values: Mapping[str, Any], replace: bool = False) -> None:
"""Append a row to the table.
Args:
values: A dict with a value to insert for each field name.
replace: If set, replace existing values.
"""
q = self._insert_query(values, replace)
q.run(**values)
self.changed.emit()
def insert_batch(
self, values: Mapping[str, MutableSequence[Any]], replace: bool = False
) -> None:
"""Performantly append multiple rows to the table.
Args:
values: A dict with a list of values to insert for each field name.
replace: If true, overwrite rows with a primary key match.
"""
q = self._insert_query(values, replace)
q.run_batch(values)
self.changed.emit()
def delete_all(self) -> None:
"""Remove all rows from the table."""
self.database.query(f"DELETE FROM {self._name}").run()
self.changed.emit()
def select(self, sort_by: str, sort_order: str, limit: int = -1) -> Query:
"""Prepare, run, and return a select statement on this table.
Args:
sort_by: name of column to sort by.
sort_order: 'asc' or 'desc'.
limit: max number of rows in result, defaults to -1 (unlimited).
Return: A prepared and executed select query.
"""
q = self.database.query(
f"SELECT * FROM {self._name} ORDER BY {sort_by} {sort_order} LIMIT :limit"
)
q.run(limit=limit)
return q
def version() -> str:
"""Return the sqlite version string."""
try:
with contextlib.closing(Database(":memory:")) as in_memory_db:
return in_memory_db.query("select sqlite_version()").run().value()
except KnownError as e:
return f"UNAVAILABLE ({e})"
|
queries | event_query | from typing import Dict, List, Optional, Tuple, Union
from ee.clickhouse.materialized_columns.columns import ColumnName
from ee.clickhouse.queries.column_optimizer import EnterpriseColumnOptimizer
from ee.clickhouse.queries.groups_join_query import GroupsJoinQuery
from posthog.models.filters.filter import Filter
from posthog.models.filters.path_filter import PathFilter
from posthog.models.filters.properties_timeline_filter import PropertiesTimelineFilter
from posthog.models.filters.retention_filter import RetentionFilter
from posthog.models.filters.session_recordings_filter import SessionRecordingsFilter
from posthog.models.filters.stickiness_filter import StickinessFilter
from posthog.models.property import PropertyName
from posthog.models.team import Team
from posthog.queries.event_query.event_query import EventQuery
from posthog.utils import PersonOnEventsMode
class EnterpriseEventQuery(EventQuery):
_column_optimizer: EnterpriseColumnOptimizer
def __init__(
self,
filter: Union[
Filter,
PathFilter,
RetentionFilter,
StickinessFilter,
SessionRecordingsFilter,
PropertiesTimelineFilter,
],
team: Team,
round_interval=False,
should_join_distinct_ids=False,
should_join_persons=False,
# Extra events/person table columns to fetch since parent query needs them
extra_fields: List[ColumnName] = [],
extra_event_properties: List[PropertyName] = [],
extra_person_fields: List[ColumnName] = [],
override_aggregate_users_by_distinct_id: Optional[bool] = None,
person_on_events_mode: PersonOnEventsMode = PersonOnEventsMode.DISABLED,
**kwargs,
) -> None:
super().__init__(
filter=filter,
team=team,
round_interval=round_interval,
should_join_distinct_ids=should_join_distinct_ids,
should_join_persons=should_join_persons,
extra_fields=extra_fields,
extra_event_properties=extra_event_properties,
extra_person_fields=extra_person_fields,
override_aggregate_users_by_distinct_id=override_aggregate_users_by_distinct_id,
person_on_events_mode=person_on_events_mode,
**kwargs,
)
self._column_optimizer = EnterpriseColumnOptimizer(self._filter, self._team_id)
def _get_groups_query(self) -> Tuple[str, Dict]:
if isinstance(self._filter, PropertiesTimelineFilter):
raise Exception("Properties Timeline never needs groups query")
return GroupsJoinQuery(
self._filter,
self._team_id,
self._column_optimizer,
person_on_events_mode=self._person_on_events_mode,
).get_join_query()
|
protos | pipeline_pb2 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: object_detection/protos/pipeline.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1"))
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pb2
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from app.object_detection.protos import (
eval_pb2 as object__detection_dot_protos_dot_eval__pb2,
)
from app.object_detection.protos import (
input_reader_pb2 as object__detection_dot_protos_dot_input__reader__pb2,
)
from app.object_detection.protos import (
model_pb2 as object__detection_dot_protos_dot_model__pb2,
)
from app.object_detection.protos import (
train_pb2 as object__detection_dot_protos_dot_train__pb2,
)
DESCRIPTOR = _descriptor.FileDescriptor(
name="object_detection/protos/pipeline.proto",
package="object_detection.protos",
syntax="proto2",
serialized_pb=_b(
'\n&object_detection/protos/pipeline.proto\x12\x17object_detection.protos\x1a"object_detection/protos/eval.proto\x1a*object_detection/protos/input_reader.proto\x1a#object_detection/protos/model.proto\x1a#object_detection/protos/train.proto"\xca\x02\n\x17TrainEvalPipelineConfig\x12\x36\n\x05model\x18\x01 \x01(\x0b\x32\'.object_detection.protos.DetectionModel\x12:\n\x0ctrain_config\x18\x02 \x01(\x0b\x32$.object_detection.protos.TrainConfig\x12@\n\x12train_input_reader\x18\x03 \x01(\x0b\x32$.object_detection.protos.InputReader\x12\x38\n\x0b\x65val_config\x18\x04 \x01(\x0b\x32#.object_detection.protos.EvalConfig\x12?\n\x11\x65val_input_reader\x18\x05 \x01(\x0b\x32$.object_detection.protos.InputReader'
),
dependencies=[
object__detection_dot_protos_dot_eval__pb2.DESCRIPTOR,
object__detection_dot_protos_dot_input__reader__pb2.DESCRIPTOR,
object__detection_dot_protos_dot_model__pb2.DESCRIPTOR,
object__detection_dot_protos_dot_train__pb2.DESCRIPTOR,
],
)
_TRAINEVALPIPELINECONFIG = _descriptor.Descriptor(
name="TrainEvalPipelineConfig",
full_name="object_detection.protos.TrainEvalPipelineConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="model",
full_name="object_detection.protos.TrainEvalPipelineConfig.model",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="train_config",
full_name="object_detection.protos.TrainEvalPipelineConfig.train_config",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="train_input_reader",
full_name="object_detection.protos.TrainEvalPipelineConfig.train_input_reader",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="eval_config",
full_name="object_detection.protos.TrainEvalPipelineConfig.eval_config",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="eval_input_reader",
full_name="object_detection.protos.TrainEvalPipelineConfig.eval_input_reader",
index=4,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=222,
serialized_end=552,
)
_TRAINEVALPIPELINECONFIG.fields_by_name[
"model"
].message_type = object__detection_dot_protos_dot_model__pb2._DETECTIONMODEL
_TRAINEVALPIPELINECONFIG.fields_by_name[
"train_config"
].message_type = object__detection_dot_protos_dot_train__pb2._TRAINCONFIG
_TRAINEVALPIPELINECONFIG.fields_by_name[
"train_input_reader"
].message_type = object__detection_dot_protos_dot_input__reader__pb2._INPUTREADER
_TRAINEVALPIPELINECONFIG.fields_by_name[
"eval_config"
].message_type = object__detection_dot_protos_dot_eval__pb2._EVALCONFIG
_TRAINEVALPIPELINECONFIG.fields_by_name[
"eval_input_reader"
].message_type = object__detection_dot_protos_dot_input__reader__pb2._INPUTREADER
DESCRIPTOR.message_types_by_name["TrainEvalPipelineConfig"] = _TRAINEVALPIPELINECONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TrainEvalPipelineConfig = _reflection.GeneratedProtocolMessageType(
"TrainEvalPipelineConfig",
(_message.Message,),
dict(
DESCRIPTOR=_TRAINEVALPIPELINECONFIG,
__module__="object_detection.protos.pipeline_pb2",
# @@protoc_insertion_point(class_scope:object_detection.protos.TrainEvalPipelineConfig)
),
)
_sym_db.RegisterMessage(TrainEvalPipelineConfig)
# @@protoc_insertion_point(module_scope)
|
utils | spoolSupport | # ===============================================================================
# Copyright (C) 2010 Diego Duclos
#
# This file is part of eos.
#
# eos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# eos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with eos. If not, see <http://www.gnu.org/licenses/>.
# ===============================================================================
import math
from collections import namedtuple
from eos.const import SpoolType
from eos.utils.float import floatUnerr
SpoolOptions = namedtuple("SpoolOptions", ("spoolType", "spoolAmount", "force"))
def calculateSpoolup(modMaxValue, modStepValue, modCycleTime, spoolType, spoolAmount):
"""
Calculate damage multiplier increment based on passed parameters. Module cycle time
is specified in seconds.
Returns spoolup value, amount of cycles to reach it and time to reach it.
"""
if not modMaxValue or not modStepValue:
return 0, 0, 0
if spoolType == SpoolType.SPOOL_SCALE:
# Find out at which point of spoolup scale we're on, find out how many cycles
# is enough to reach it and recalculate spoolup value for that amount of cycles
cycles = math.ceil(floatUnerr(modMaxValue * spoolAmount / modStepValue))
spoolValue = min(modMaxValue, cycles * modStepValue)
return spoolValue, cycles, cycles * modCycleTime
elif spoolType == SpoolType.CYCLE_SCALE:
# For cycle scale, find out max amount of cycles and scale against it
cycles = round(spoolAmount * math.ceil(floatUnerr(modMaxValue / modStepValue)))
spoolValue = min(modMaxValue, cycles * modStepValue)
return spoolValue, cycles, cycles * modCycleTime
elif spoolType == SpoolType.TIME:
cycles = min(
# How many full cycles mod had by passed time
math.floor(floatUnerr(spoolAmount / modCycleTime)),
# Max amount of cycles
math.ceil(floatUnerr(modMaxValue / modStepValue)),
)
spoolValue = min(modMaxValue, cycles * modStepValue)
return spoolValue, cycles, cycles * modCycleTime
elif spoolType == SpoolType.CYCLES:
cycles = min(
# Consider full cycles only
math.floor(spoolAmount),
# Max amount of cycles
math.ceil(floatUnerr(modMaxValue / modStepValue)),
)
spoolValue = min(modMaxValue, cycles * modStepValue)
return spoolValue, cycles, cycles * modCycleTime
else:
return 0, 0, 0
def resolveSpoolOptions(spoolOptions, module):
# Rely on passed options if they are forcing us to do so
if spoolOptions is not None and spoolOptions.force:
return spoolOptions.spoolType, spoolOptions.spoolAmount
# If we're not forced to use options and module has options set, prefer on-module values
elif module is not None and module.spoolType is not None:
return module.spoolType, module.spoolAmount
# Otherwise - rely on passed options
elif spoolOptions is not None:
return spoolOptions.spoolType, spoolOptions.spoolAmount
else:
return None, None
|
canto-curses | taglist | # -*- coding: utf-8 -*-
# Canto-curses - ncurses RSS reader
# Copyright (C) 2016 Jack Miller <jack@codezen.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
import curses
import logging
import os
import re
import shlex
from canto_next.hooks import on_hook, remove_hook, unhook_all
from canto_next.plugins import Plugin
from .command import (
_int_check,
_int_range,
_string,
register_arg_types,
register_commands,
unregister_all,
)
from .guibase import GuiBase
from .locks import config_lock
from .reader import Reader
from .tag import Tag, alltags
from .tagcore import alltagcores, tag_updater
log = logging.getLogger("TAGLIST")
# TagList is the class renders a classical Canto list of tags into the given
# panel. It defers to the Tag class for the actual individual tag rendering.
# This is the level at which commands are taken and the backend is communicated
# with.
class TagListPlugin(Plugin):
pass
class TagList(GuiBase):
def init(self, pad, callbacks):
GuiBase.init(self)
# Drawing information
self.pad = pad
self.height, self.width = self.pad.getmaxyx()
# Callback information
self.callbacks = callbacks
# Holster for a list of items for batch operations.
self.got_items = []
self.first_sel = None
self.first_story = None
self.last_story = None
self.tags = []
self.spacing = callbacks["get_opt"]("taglist.spacing")
# Hold config log so we don't miss any new TagCores or get updates
# before we're ready.
on_hook("curses_eval_tags_changed", self.on_eval_tags_changed, self)
on_hook("curses_items_added", self.on_items_added, self)
on_hook("curses_items_removed", self.on_items_removed, self)
on_hook("curses_tag_updated", self.on_tag_updated, self)
on_hook("curses_stories_added", self.on_stories_added, self)
on_hook("curses_stories_removed", self.on_stories_removed, self)
on_hook("curses_opt_change", self.on_opt_change, self)
on_hook("curses_new_tagcore", self.on_new_tagcore, self)
on_hook("curses_del_tagcore", self.on_del_tagcore, self)
args = {
"cursor-offset": ("[cursor-offset]", self.type_cursor_offset),
"item-list": (
"[item-list]: List of item indices (tab complete to show)\n Simple: 1,3,6,5\n Ranges: 1-100\n All: *\n Selected item: .\n Domains tag,1,2,3 for 1,2,3 of current tag",
self.type_item_list,
self.hook_item_list,
),
"item-state": (
"[item-state]: Any word, can be inverted with minus ex: '-read' or 'marked'",
self.type_item_state,
),
"tag-list": (
"[tag-list]: List of tag indices (tab complete to show)\n Simple: 1,3,6,5\n Ranges: 1-100\n Selected tag: .\n All: *",
self.type_tag_list,
self.hook_tag_list,
),
# string because tag-item will manually bash in user: prefix
"user-tag": (
"[user-tag]: Any string, like 'favorite', or 'cool'",
self.type_user_tag,
),
"category": (
"[category]: Any string, like 'news' or 'comics'",
self.type_category,
),
}
base_cmds = {
"remote delfeed": (
self.cmd_delfeed,
["tag-list"],
"Unsubscribe from feeds.",
),
}
nav_cmds = {
"page-down": (self.cmd_page_down, [], "Move down a page of items"),
"page-up": (self.cmd_page_up, [], "Move up a page of items"),
"next-tag": (self.cmd_next_tag, [], "Scroll to next tag"),
"prev-tag": (self.cmd_prev_tag, [], "Scroll to previous tag"),
"next-marked": (self.cmd_next_marked, [], "Scroll to next marked item"),
"prev-marked": (self.cmd_prev_marked, [], "Scroll to previous marked item"),
"rel-set-cursor 1": (lambda: self.cmd_rel_set_cursor(1), [], "Next item"),
"rel-set-cursor -1": (
lambda: self.cmd_rel_set_cursor(-1),
[],
"Previous item",
),
}
hidden_cmds = {
"rel-set-cursor": (
self.cmd_rel_set_cursor,
["cursor-offset"],
"Move the cursor by cursor-offset items",
),
}
grouping_cmds = {
"foritems": (
self.cmd_foritems,
["item-list"],
"Collect items for future commands\n\nAfter a foritems call, subsequent commands that take [item-lists] will use them.\n\nCan be cleared with clearitems.",
),
"foritem": (
self.cmd_foritem,
["item-list"],
"Collect first item for future commands\n\nAfter a foritem call, subsequent commands that take [item-lists] will use the first item given.\n\nCan be cleared with clearitems.",
),
"clearitems": (
self.cmd_clearitems,
[],
"Clear collected items (see foritem / foritems)",
),
}
item_cmds = {
"goto": (self.cmd_goto, ["item-list"], "Open story links in browser"),
"reader": (self.cmd_reader, ["item-list"], "Open the built-in reader"),
"tag-item": (
self.cmd_tag_item,
["user-tag", "item-list"],
"Add a tag to individual items",
),
"tags": (self.cmd_tags, ["item-list"], "Show tag of selected items"),
"item-state": (
self.cmd_item_state,
["item-state", "item-list"],
"Set item state (i.e. 'item-state read .')",
),
"tag-state": (
self.cmd_tag_state,
["item-state", "tag-list"],
"Set item state for all items in tag (i.e. 'tag-state read .')",
),
}
collapse_cmds = {
"collapse": (
self.cmd_collapse,
["tag-list"],
"Collapse tags - reduce the tag's output to a simple status line.",
),
"uncollapse": (
self.cmd_uncollapse,
["tag-list"],
"Uncollapse tags - show the full content of a tag",
),
"toggle-collapse": (
self.cmd_toggle_collapse,
["tag-list"],
"Toggle collapsed state of tags.",
),
}
search_cmds = {
"search": (self.cmd_search, ["string"], "Search items for string"),
"search-regex": (
self.cmd_search_regex,
["string"],
"Search items for regex",
),
}
tag_cmds = {
"promote": (
self.cmd_promote,
["tag-list"],
"Move tags up in the display order (opposite of demote)",
),
"demote": (
self.cmd_demote,
["tag-list"],
"Move tags down in the display order (opposite of promote)",
),
}
tag_group_cmds = {
"categorize": (
self.cmd_categorize,
["category", "tag-list"],
"Categorize a tag",
),
"remove-category": (
self.cmd_remove_category,
["category", "tag-list"],
"Remove a tag from a category",
),
"categories": (
self.cmd_categories,
["tag-list"],
"Query what categories a tag is in.",
),
"show-category": (
self.cmd_show_category,
["category"],
"Show only tags in category.",
),
}
register_commands(self, base_cmds, "Base")
register_commands(self, nav_cmds, "Navigation")
register_commands(self, hidden_cmds, "hidden")
register_commands(self, grouping_cmds, "Grouping")
register_commands(self, item_cmds, "Item")
register_commands(self, collapse_cmds, "Collapse")
register_commands(self, search_cmds, "Search")
register_commands(self, tag_cmds, "Tag")
register_commands(self, tag_group_cmds, "Tag Grouping")
register_arg_types(self, args)
self.plugin_class = TagListPlugin
self.update_plugin_lookups()
def die(self):
log.debug("Cleaning up hooks...")
unhook_all(self)
unregister_all(self)
def tag_by_item(self, item):
return item.parent_tag
def tag_by_obj(self, obj):
if obj.is_tag:
return obj
return obj.parent_tag
# Types return (completion generator, validator)
# None completions indicates that the help text should be enough (which
# happens if it's a generic type without bounds)
def type_cursor_offset(self):
return (None, _int_check)
def unhook_item_list(self, vars):
# Perhaps this should be a separate hook for command completion?
if "input_prompt" in vars:
self.callbacks["set_opt"]("story.enumerated", False)
self.callbacks["release_gui"]()
remove_hook("curses_var_change", self.unhook_item_list)
def hook_item_list(self):
if not self.callbacks["get_opt"]("story.enumerated"):
self.callbacks["set_opt"]("story.enumerated", True)
self.callbacks["release_gui"]()
on_hook("curses_var_change", self.unhook_item_list, self)
def type_item_list(self):
all_items = []
for tag in self.tags:
if tag.collapsed:
continue
for s in tag:
all_items.append(s)
domains = {"all": all_items}
syms = {"all": {}}
sel = self.callbacks["get_var"]("selected")
if sel:
# If we have a selection, we have a sensible tag domain
tag = self.tag_by_obj(sel)
domains["tag"] = [x for x in tag]
syms["tag"] = {}
if not sel.is_tag:
syms["tag"]["."] = [domains["tag"].index(sel)]
syms["tag"]["*"] = range(0, len(domains["tag"]))
syms["all"]["."] = [all_items.index(sel)]
elif len(sel) > 0:
syms["tag"]["."] = [0]
syms["tag"]["*"] = range(0, len(sel))
else:
syms["tag"]["."] = []
syms["tag"]["*"] = []
else:
syms["all"]["."] = []
syms["all"]["*"] = range(0, len(all_items))
# if we have items, pass them in, otherwise pass in selected which is the implied context
fallback = self.got_items[:]
if fallback == [] and sel and not sel.is_tag:
fallback = [sel]
return (None, lambda x: _int_range("item", domains, syms, fallback, x))
def unhook_tag_list(self, vars):
# Perhaps this should be a separate hook for command completion?
if "input_prompt" in vars:
self.callbacks["set_opt"]("taglist.tags_enumerated", False)
self.callbacks["release_gui"]()
remove_hook("curses_var_change", self.unhook_tag_list)
def hook_tag_list(self):
if not self.callbacks["get_opt"]("taglist.tags_enumerated"):
self.callbacks["set_opt"]("taglist.tags_enumerated", True)
self.callbacks["release_gui"]()
on_hook("curses_var_change", self.unhook_tag_list, self)
def type_tag_list(self):
vtags = self.callbacks["get_var"]("taglist_visible_tags")
domains = {"all": vtags}
syms = {"all": {}}
sel = self.callbacks["get_var"]("selected")
deftags = []
if sel and sel.is_tag:
deftags = [sel]
syms["all"]["."] = [vtags.index(sel)]
elif sel:
deftags = [self.tag_by_item(sel)]
syms["all"]["."] = [vtags.index(deftags[0])]
else:
syms["all"]["."] = []
syms["all"]["*"] = range(0, len(vtags))
for i, tag in enumerate(vtags):
if tag.tag.startswith("maintag:"):
syms["all"][tag.tag[8:]] = [i]
return (None, lambda x: _int_range("tag", domains, syms, deftags, x))
# This will accept any state, but should offer some completions for sensible ones
def type_item_state(self):
return (["read", "marked", "-read", "-marked"], lambda x: (True, x))
def on_new_tagcore(self, tagcore):
log.debug("Instantiating Tag() for %s", tagcore.tag)
Tag(tagcore, self.callbacks)
self.callbacks["set_var"]("needs_refresh", True)
def on_del_tagcore(self, tagcore):
log.debug("taglist on_del_tag")
for tagobj in alltags:
if tagobj.tag == tagcore.tag:
tagobj.die()
self.callbacks["set_var"]("needs_refresh", True)
# We really shouldn't care about item being added (it's a TagCore event)
# but we do need to release the gui thread so that it can handle sync
# caused by an empty Tag's TagCore getting items.
def on_items_added(self, tagcore, items):
self.callbacks["release_gui"]()
def on_items_removed(self, tagcore, items):
self.callbacks["release_gui"]()
def on_tag_updated(self, tagcore):
self.callbacks["release_gui"]()
def on_eval_tags_changed(self):
self.callbacks["force_sync"]()
self.callbacks["release_gui"]()
# Called with sync_lock, so we are unrestricted.
def on_stories_added(self, tag, items):
# Items being added implies we need to remap them
self.callbacks["set_var"]("needs_refresh", True)
# Called with sync_lock, so we are unrestricted.
def on_stories_removed(self, tag, items):
# Items being removed implies we need to remap them.
self.callbacks["set_var"]("needs_refresh", True)
def on_opt_change(self, conf):
if "taglist" not in conf:
return
if "search_attributes" in conf["taglist"]:
log.info("Fetching any needed search attributes")
need_attrs = {}
sa = self.callbacks["get_opt"]("taglist.search_attributes")
# Make sure that we have all attributes needed for a search.
for tag in alltagcores:
for item in tag:
tag_updater.need_attributes(item, sa)
if "spacing" in conf["taglist"]:
self.spacing = conf["taglist"]["spacing"]
self.callbacks["set_var"]("needs_refresh", True)
def cmd_goto(self, items):
log.debug("GOTO: %s", items)
self._goto([item.content["link"] for item in items])
def cmd_tag_state(self, state, tags):
attributes = {}
for tag in tags:
for item in tag:
if item.handle_state(state):
attributes[item.id] = {"canto-state": item.content["canto-state"]}
if attributes:
tag_updater.set_attributes(attributes)
# item-state: Add/remove state for multiple items.
def cmd_item_state(self, state, items):
attributes = {}
for item in items:
if item.handle_state(state):
attributes[item.id] = {"canto-state": item.content["canto-state"]}
if attributes:
tag_updater.set_attributes(attributes)
# tag-item : Same as above, with tags.
def cmd_tag_item(self, tag, items):
# Proper prefix
if tag[0] in "-%":
tag = tag[0] + "user:" + tag[1:]
else:
tag = "user:" + tag
attributes = {}
for item in items:
if item.handle_tag(tag):
attributes[item.id] = {"canto-tags": item.content["canto-tags"]}
if attributes:
tag_updater.set_attributes(attributes)
def cmd_tags(self, items):
for item in items:
if "title" in item.content:
log.info("'%s' in tags:\n" % item.content["title"])
log.info(item.parent_tag.tag)
if "canto-tags" in item.content:
for tag in item.content["canto-tags"]:
if tag.startswith("user:"):
log.info(tag[5:])
else:
log.info(tag)
def _iterate_forward(self, start):
ns = start.next_sel
o = start
lines = 0
# No next item, bail.
if not ns:
return (None, lines)
# Force changes to all objects between
# start and next sel.
while o and o != ns:
lines += o.lines(self.width)
o = o.next_obj
return (ns, lines)
def _iterate_backward(self, start):
ps = start.prev_sel
o = start
lines = 0
# No prev item, bail.
if not ps:
return (None, lines)
# Force changes to all objects between
# start and prev sel.
while o and o != ps:
o = o.prev_obj
lines += o.lines(self.width)
return (ps, lines)
def cmd_rel_set_cursor(self, relidx):
sel = self.callbacks["get_var"]("selected")
if sel:
target_idx = sel.sel_offset + relidx
curpos = sel.curpos
if target_idx < 0:
target_idx = 0
while sel.sel_offset != target_idx:
if target_idx < sel.sel_offset and sel.prev_sel:
sel, lines = self._iterate_backward(sel)
curpos -= lines
elif target_idx > sel.sel_offset and sel.next_sel:
sel, lines = self._iterate_forward(sel)
curpos += lines
else:
break
self._set_cursor(sel, curpos)
else:
self._set_cursor(self.first_sel, 0)
def _set_cursor(self, item, window_location):
# May end up as None
sel = self.callbacks["get_var"]("selected")
if sel:
sel.unselect()
self.callbacks["set_var"]("selected", item)
if item:
conf = self.callbacks["get_conf"]()
curstyle = conf["taglist"]["cursor"]
# Convert window position for absolute positioning, edge
# positioning uses given window_location.
if curstyle["type"] == "bottom":
window_location = 0
elif curstyle["type"] == "middle":
window_location = int((self.height - 1) / 2)
elif curstyle["type"] == "top":
window_location = self.height - 1
# If the tag header is larger than the edge, the scroll will never
# be triggered (redraw resets screen position to keep items visible
# despite the tag header).
tag = self.tag_by_obj(item)
wl_top = max(curstyle["edge"], tag.lines(self.width))
# Similarly, if the current item is larger than the (edge + 1), the
# scroll won't be triggered, so we take the max edge there too.
wl_bottom = (self.height - 1) - max(
curstyle["edge"], item.lines(self.width)
)
if window_location > wl_bottom:
if curstyle["scroll"] == "scroll":
window_location = wl_bottom
elif curstyle["scroll"] == "page":
window_location = wl_top
elif window_location < wl_top:
if curstyle["scroll"] == "scroll":
window_location = wl_top
elif curstyle["scroll"] == "page":
window_location = wl_bottom
self.callbacks["set_var"]("target_obj", item)
self.callbacks["set_var"]("target_offset", window_location)
item.select()
else:
self.callbacks["set_var"]("target_obj", self.first_sel)
if self.first_sel:
self.callbacks["set_var"]("target_offset", self.first_sel.curpos)
# foritems gets a valid list of items by index.
def cmd_foritems(self, items):
self.got_items = items
def cmd_foritem(self, items):
if len(items) > 0:
self.got_items = [items[0]]
else:
self.got_items = []
# clearitems clears all the items set by foritems.
def cmd_clearitems(self):
log.debug("Clearing ITEMS!")
self.got_items = []
def cmd_page_up(self):
target_offset = self.callbacks["get_var"]("target_offset")
target_obj = self.callbacks["get_var"]("target_obj")
sel = self.callbacks["get_var"]("selected")
# No items, forget about it
if not target_obj:
return
scroll = self.height - 1
if sel:
while scroll > 0 and sel.prev_sel:
pstory = sel.prev_sel
while sel != pstory:
scroll -= sel.lines(self.width)
sel = sel.prev_obj
self._set_cursor(sel, target_offset)
else:
while scroll > 0 and target_obj.prev_obj:
target_obj = target_obj.prev_obj
scroll -= target_obj.lines(self.width)
self.callbacks["set_var"]("target_obj", target_obj)
self.callbacks["set_var"]("target_offset", target_offset)
self.callbacks["set_var"]("needs_redraw", True)
def cmd_page_down(self):
target_offset = self.callbacks["get_var"]("target_offset")
target_obj = self.callbacks["get_var"]("target_obj")
sel = self.callbacks["get_var"]("selected")
# No items, forget about it.
if not target_obj:
return
scroll = self.height - 1
if sel:
while scroll > 0 and sel.next_sel:
if scroll < sel.lines(self.width):
break
nstory = sel.next_sel
while sel != nstory:
scroll -= sel.lines(self.width)
sel = sel.next_obj
self._set_cursor(sel, target_offset)
else:
while scroll > 0 and target_obj.next_obj:
scroll -= target_obj.lines(self.width)
if scroll < 0:
break
target_obj = target_obj.next_obj
self.callbacks["set_var"]("target_obj", target_obj)
self.callbacks["set_var"]("target_offset", 0)
self.callbacks["set_var"]("needs_redraw", True)
def cmd_next_tag(self):
sel = self.callbacks["get_var"]("selected")
if not sel:
return self._set_cursor(self.first_sel, 0)
target_offset = self.callbacks["get_var"]("target_offset")
tag = self.tag_by_obj(sel)
while sel and self.tag_by_obj(sel) == tag:
if sel.next_sel == None:
break
sel = sel.next_sel
self._set_cursor(sel, target_offset)
def cmd_prev_tag(self):
sel = self.callbacks["get_var"]("selected")
if not sel:
return self._set_cursor(self.first_sel, 0)
target_offset = self.callbacks["get_var"]("target_offset")
tag = self.tag_by_obj(sel)
while sel and self.tag_by_obj(sel) == tag:
if sel.prev_sel == None:
break
sel = sel.prev_sel
if sel:
newtag = self.tag_by_obj(sel)
if newtag.collapsed:
sel = newtag
else:
sel = newtag[0]
self._set_cursor(sel, target_offset)
def cmd_reader(self, items):
self.callbacks["set_var"]("reader_item", items[0])
self.callbacks["set_var"]("reader_offset", 0)
self.callbacks["add_window"](Reader)
def cmd_promote(self, tags):
for tag in tags:
log.debug("Promoting %s\n", tag.tag)
# Refetch because a promote call will cause our eval_tag hook to
# recreate visible_tags.
visible_tags = self.callbacks["get_var"]("taglist_visible_tags")
curidx = visible_tags.index(tag)
# Obviously makes no sense on top tag.
if curidx == 0:
return
# Re-order tags and update internal list order.
self.callbacks["switch_tags"](tag.tag, visible_tags[curidx - 1].tag)
self.callbacks["set_var"]("needs_refresh", True)
def cmd_demote(self, tags):
for tag in tags:
log.debug("Demoting %s\n", tag.tag)
visible_tags = self.callbacks["get_var"]("taglist_visible_tags")
# Obviously makes no sense on bottom or only tag.
if tag == visible_tags[-1] or len(visible_tags) == 1:
return
curidx = visible_tags.index(tag)
self.callbacks["switch_tags"](tag.tag, visible_tags[curidx + 1].tag)
self.callbacks["set_var"]("needs_refresh", True)
def _collapse_tag(self, tag):
log.debug("Collapsing %s\n", tag.tag)
# If we're collapsing the selection, select
# the tag instead.
s = self.callbacks["get_var"]("selected")
if s and s in tag:
toffset = self.callbacks["get_var"]("target_offset")
self._set_cursor(tag, toffset)
self.callbacks["set_tag_opt"](tag.tag, "collapsed", True)
def cmd_collapse(self, tags):
for tag in tags:
self._collapse_tag(tag)
def _uncollapse_tag(self, tag):
log.debug("Uncollapsing %s\n", tag.tag)
# If we're uncollapsing the selected tag,
# go ahead and select the first item.
s = self.callbacks["get_var"]("selected")
if s and tag == s and len(tag) != 0:
toffset = self.callbacks["get_var"]("target_offset") + tag.lines(self.width)
self._set_cursor(tag[0], toffset)
self.callbacks["set_tag_opt"](tag.tag, "collapsed", False)
def cmd_uncollapse(self, tags):
for tag in tags:
self._uncollapse_tag(tag)
def cmd_toggle_collapse(self, tags):
for tag in tags:
if self.callbacks["get_tag_opt"](tag.tag, "collapsed"):
self._uncollapse_tag(tag)
else:
self._collapse_tag(tag)
def search(self, regex):
try:
rgx = re.compile(regex)
except Exception as e:
self.callbacks["set_var"]("error_msg", e)
return
story = self.first_story
terms = self.callbacks["get_opt"]("taglist.search_attributes")
while story:
for t in terms:
# Shouldn't happen unless a search happens before
# the daemon can respond to the ATTRIBUTES request.
if t not in story.content:
continue
if rgx.match(story.content[t]):
story.mark()
break
else:
story.unmark()
story = story.next_story
self.callbacks["set_var"]("needs_redraw", True)
def cmd_search(self, term):
if not term:
term = self.callbacks["input"]("search:", False)
if not term:
return
rgx = ".*" + re.escape(term) + ".*"
return self.search(rgx)
def cmd_search_regex(self, term):
if not term:
term = self.callbacks["input"]("search-regex:", False)
if not term:
return
return self.search(term)
def cmd_next_marked(self):
start = self.callbacks["get_var"]("selected")
# This works for tags and stories alike.
if start:
cur = start.next_story
else:
start = self.first_story
cur = start
# There's nothing to search
if not cur:
return
curpos = cur.curpos
while not cur or not cur.marked:
# Wrap to top
if cur == None:
cur = self.first_story
curpos = self.first_story.curpos
else:
cur, lines = self._iterate_forward(cur)
curpos += lines
# Make sure we don't infinite loop.
if cur == start:
if not cur.marked:
self.callbacks["set_var"]("info_msg", "No marked items.")
break
self._set_cursor(cur, curpos)
def cmd_prev_marked(self):
start = self.callbacks["get_var"]("selected")
# This works for tags and stories alike.
if start:
cur = start.prev_story
else:
start = self.last_story
cur = start
# There's nothing to search
if not cur:
return
curpos = cur.curpos
while not cur or not cur.marked:
# Wrap to bottom
if cur == None:
cur = self.last_story
curpos = self.last_story.curpos
else:
cur, lines = self._iterate_backward(cur)
curpos -= lines
# Make sure we don't infinite loop.
if cur == start:
self.callbacks["set_var"]("info_msg", "No marked items.")
break
self._set_cursor(cur, curpos)
def type_user_tag(self):
utags = []
for tag in alltagcores:
if tag.tag.startswith("user:"):
utags.append(tag.tag[5:])
return (utags, lambda x: (True, x))
def type_category(self):
def category_validator(x):
if x.lower() == "none":
return (True, None)
else:
return (True, x)
categories = []
for tag in alltagcores:
if tag.tag.startswith("category:"):
categories.append(tag.tag[9:])
return (categories, category_validator)
def cmd_categorize(self, category, tags):
if not category:
return
for tag in tags:
tc = self.callbacks["get_tag_conf"](tag.tag)
fullcat = "category:" + category
if fullcat not in tc["extra_tags"]:
tc["extra_tags"].append(fullcat)
self.callbacks["set_tag_conf"](tag.tag, tc)
log.info("%s is now in category %s" % (tag, category))
def cmd_remove_category(self, category, tags):
if not category:
return
for tag in tags:
tc = self.callbacks["get_tag_conf"](tag.tag)
fullcat = "category:" + category
if fullcat in tc["extra_tags"]:
tc["extra_tags"].remove(fullcat)
self.callbacks["set_tag_conf"](tag.tag, tc)
log.info("%s is no longer in category %s" % (tag, category))
def cmd_categories(self, tags):
for tag in tags:
tc = self.callbacks["get_tag_conf"](tag.tag)
categories = [x[9:] for x in tc["extra_tags"] if x.startswith("category:")]
if categories == []:
log.info("%s - No categories" % tag)
else:
log.info("%s - %s" % (tag, " ".join(categories)))
popped_cats = []
for tag in alltagcores:
if tag.tag.startswith("category:"):
popped_cats.append(tag.tag[9:])
if popped_cats:
log.info("\nAvailable categories:")
for cat in popped_cats:
log.info(cat)
else:
log.info("\nNo categories available.")
def cmd_show_category(self, category):
if category:
tag_updater.transform(
"categories", "InTags('" + shlex.quote("category:" + category) + "')"
)
else:
tag_updater.transform("categories", "None")
tag_updater.update()
def cmd_delfeed(self, tags):
for tag in tags:
if tag.tag.startswith("maintag:"):
self._remote_argv(["canto-remote", "delfeed", tag.tag[8:]])
else:
log.info("tag %s is not a feed tag")
def update_tag_lists(self):
curtags = self.callbacks["get_var"]("curtags")
self.tags = []
# Make sure to honor the order of tags in curtags.
for tag in curtags:
for tagobj in alltags:
if tagobj.tag == tag:
self.tags.append(tagobj)
# If selected is stale (i.e. its tag was deleted, the item should stick
# around in all other cases) then unset it.
sel = self.callbacks["get_var"]("selected")
tobj = self.callbacks["get_var"]("target_obj")
if sel and (
(sel.is_tag and sel not in self.tags) or (not sel.is_tag and sel.is_dead)
):
log.debug("Stale selection")
self.callbacks["set_var"]("selected", None)
if tobj and (
(tobj.is_tag and tobj not in self.tags)
or (not tobj.is_tag and tobj.is_dead)
):
log.debug("Stale target obj")
self.callbacks["set_var"]("target_obj", None)
self.callbacks["set_var"]("target_offset", 0)
hide_empty = self.callbacks["get_opt"]("taglist.hide_empty_tags")
cur_item_offset = 0
cur_sel_offset = 0
t = []
for i, tag in enumerate(self.tags):
if hide_empty and len(tag) == 0:
continue
# Update index info
tag.set_item_offset(cur_item_offset)
tag.set_sel_offset(cur_sel_offset)
tag.set_tag_offset(i)
tag.set_visible_tag_offset(len(t))
if self.callbacks["get_tag_opt"](tag.tag, "collapsed"):
cur_sel_offset += 1
else:
cur_sel_offset += len(tag)
cur_item_offset += len(tag)
t.append(tag)
self.callbacks["set_var"]("taglist_visible_tags", t)
def update_target_obj(self):
# Set initial target_obj if none already set, or if it's stale.
target_obj = self.callbacks["get_var"]("target_obj")
if target_obj:
return
vistags = self.callbacks["get_var"]("taglist_visible_tags")
if vistags:
self.callbacks["set_var"]("target_obj", vistags[0])
self.callbacks["set_var"]("target_offset", 0)
else:
self.callbacks["set_var"]("target_obj", None)
self.callbacks["set_var"]("target_offset", 0)
# Refresh updates information used to render the objects.
# Effectively, we build a doubly linked list out of all
# of the objects by setting obj.prev_obj and obj.next_obj.
def refresh(self):
log.debug("Taglist REFRESH!\n")
self.update_tag_lists()
self.update_target_obj()
self.first_story = None
prev_obj = None
prev_story = None
prev_sel = None
for tag in self.callbacks["get_var"]("taglist_visible_tags"):
tag.curpos = self.height
tag.prev_obj = prev_obj
tag.next_obj = None
tag.prev_story = prev_story
tag.next_story = None
tag.prev_sel = prev_sel
tag.next_sel = None
if prev_obj != None:
prev_obj.next_obj = tag
prev_obj = tag
# Collapsed tags (with items) skip stories.
if self.callbacks["get_tag_opt"](tag.tag, "collapsed"):
if prev_sel:
prev_sel.next_sel = tag
prev_sel = tag
continue
for story in tag:
story.curpos = self.height
if not self.first_story:
self.first_story = story
story.prev_obj = prev_obj
story.next_obj = None
prev_obj.next_obj = story
prev_obj = story
if prev_story != None:
prev_story.next_story = story
story.prev_story = prev_story
story.next_story = None
prev_story = story
# We want next_story to be accessible from all objects, so head
# back and set it for any without one, even if it wasn't the
# last story object (i.e. if it's a tag)
cur = story.prev_obj
while cur and cur.next_story == None:
cur.next_story = story
cur = cur.prev_obj
if prev_sel != None:
prev_sel.next_sel = story
story.prev_sel = prev_sel
story.next_sel = None
prev_sel = story
# Keep track of last story.
self.last_story = story
self.callbacks["set_var"]("needs_redraw", True)
# curpos - position in visible windown, can be negative
# main_offset - starting line from top of pad
def _partial_render(self, obj, main_offset, curpos, footer=False):
lines = obj.pads(self.width)
pad = obj.pad
if footer:
lines = obj.footlines
pad = obj.footpad
draw_lines = lines
if curpos + lines > 0:
start = 0
# If we're crossing the boundary to onscreen
# trim render window.
if curpos < 0:
start = -1 * curpos
draw_lines += curpos
# If we're crossing the boundary to offscreen
# trim render window.
if main_offset + draw_lines > self.height:
draw_lines = self.height - main_offset
if draw_lines:
pad.overwrite(
self.pad,
start,
0,
main_offset,
0,
main_offset + (draw_lines - 1),
self.width - 1,
)
return (main_offset + draw_lines, curpos + lines)
return (main_offset, curpos + lines)
def redraw(self):
log.debug("Taglist REDRAW (%s)!\n", self.width)
self.pad.erase()
target_obj = self.callbacks["get_var"]("target_obj")
target_offset = self.callbacks["get_var"]("target_offset")
# Bail if we have no item.
if target_obj == None:
self.pad.addstr("All tags empty.")
self.callbacks["refresh"]()
return
# Step 0. Bounding. Make sure we're trying to render the
# item to a place it's visible.
# If we're trying to render the target_obj to a screen
# position less then the length of it's tag header, then
# we'd overwrite on writing the floating header, so adjust
# the target_offset.
if not target_obj.is_tag:
tag = target_obj.parent_tag
tl = tag.lines(self.width)
if target_offset < tl:
target_offset = tl
elif target_offset < 0:
target_offset = 0
# If we're trying to render too close to the bottom, we also
# need an adjustment.
tol = target_obj.lines(self.width)
if target_offset > ((self.height - 1) - tol):
target_offset = (self.height - 1) - tol
# Step 1. Find first object based on target_obj and target_offset,
# This will cause any changes to be resolved for objects on screen
# before and including the target object.
obj = target_obj
curpos = target_offset
top_adjusted = False
while curpos > 0:
if obj.prev_obj:
curpos -= obj.prev_obj.lines(self.width)
obj = obj.prev_obj
# If there aren't enough items to render before this item and
# get to the top, adjust offset
else:
top_adjusted = True
target_offset -= curpos
curpos = 0
# Step 2. Adjust offset, if necessary, to keep blank space from
# the bottom of the list. This also causes any changes to be resolved
# for objects on screen after the target object.
last_obj = target_obj
last_off = target_offset
while last_off < (self.height - 1):
if last_obj:
last_off += last_obj.lines(self.width)
last_obj = last_obj.next_obj
# Not enough items to render after our item,
# adjust offset. Unfortunately, this means that
# we need to refigure out everything above, so
# we recurse, but as long as we haven't top_adjusted
# we should only ever have a single level of
# recursion and none of the refresh work we've done
# at this level has been wasted.
elif not top_adjusted:
rem = (self.height - 1) - last_off
self.callbacks["set_var"]("target_offset", target_offset + rem)
self.redraw()
return
else:
break
# Any adjustments should be reflected.
self.callbacks["set_var"]("target_offset", target_offset)
# Step 3. Update self.first_sel. This is useful for making
# initial selection based on the current screen position.
# If there are only tags on screen, first_sel could be None
self.first_sel = obj
while self.first_sel.is_tag:
if self.callbacks["get_tag_opt"](obj.tag, "collapsed"):
break
# We use obj instead of sel here because next_sel will only be set
# if the current object is selectable, which it isn't if it's not
# collapsed.
if self.first_sel.next_obj:
self.first_sel = self.first_sel.next_obj
else:
break
# Step 4. Render.
rendered_header = False
w_offset = 0
while obj != None:
# Refresh if necessary, update curpos for scrolling.
obj.lines(self.width)
obj.curpos = curpos
# Copy item into window
w_offset, curpos = self._partial_render(obj, w_offset, curpos)
# Render floating header, if we've covered enough ground.
if not rendered_header and curpos > 0:
tag = self.tag_by_obj(obj)
if curpos >= tag.lines(self.width):
self._partial_render(tag, 0, 0)
rendered_header = True
# If we're at the end of a list, or the next item is a tag we need
# to render the tag footer for the current tag.
obj.extra_lines = 0
if (not obj.next_obj) or obj.next_obj.is_tag:
if obj.is_tag:
tag = obj
else:
tag = self.tag_by_item(obj)
tag.lines(self.width)
obj.extra_lines = tag.footlines
w_offset, curpos = self._partial_render(tag, w_offset, curpos, True)
# Set this because if we don't have room above the footer for
# the header (implied by this block executing with
# rendered_header == False), then actually rendering one looks
# broken.
rendered_header = True
elif (not obj.is_tag) and self.spacing:
curpos += self.spacing
w_offset += self.spacing
obj.extra_lines += self.spacing
if w_offset >= self.height:
break
obj = obj.next_obj
self.callbacks["refresh"]()
def is_input(self):
return False
def get_opt_name(self):
return "taglist"
def get_height(self, mheight):
return mheight
def get_width(self, mwidth):
return mwidth
|
config | configfiles | # SPDX-FileCopyrightText: Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# SPDX-License-Identifier: GPL-3.0-or-later
"""Configuration files residing on disk."""
import configparser
import contextlib
import enum
import os.path
import pathlib
import re
import sys
import textwrap
import traceback
import types
from typing import (
TYPE_CHECKING,
Any,
Dict,
Iterable,
Iterator,
List,
Mapping,
MutableMapping,
Optional,
Tuple,
cast,
)
import qutebrowser
import yaml
from qutebrowser.config import config, configdata, configexc, configtypes, configutils
from qutebrowser.keyinput import keyutils
from qutebrowser.qt.core import QObject, QSettings, pyqtSignal, pyqtSlot, qVersion
from qutebrowser.utils import log, qtutils, standarddir, urlmatch, utils, version
if TYPE_CHECKING:
from qutebrowser.misc import savemanager
# The StateConfig instance
state = cast("StateConfig", None)
_SettingsType = Dict[str, Dict[str, Any]]
class VersionChange(enum.Enum):
"""The type of version change when comparing two versions."""
unknown = enum.auto()
equal = enum.auto()
downgrade = enum.auto()
patch = enum.auto()
minor = enum.auto()
major = enum.auto()
def matches_filter(self, filterstr: str) -> bool:
"""Whether the change matches a given filter.
This is intended to use filters like "major" (show major only), "minor" (show
major/minor) or "patch" (show all changes).
"""
allowed_values: Dict[str, List[VersionChange]] = {
"major": [VersionChange.major],
"minor": [VersionChange.major, VersionChange.minor],
"patch": [VersionChange.major, VersionChange.minor, VersionChange.patch],
"never": [],
}
return self in allowed_values[filterstr]
class StateConfig(configparser.ConfigParser):
"""The "state" file saving various application state."""
def __init__(self) -> None:
super().__init__()
self._filename = os.path.join(standarddir.data(), "state")
self.read(self._filename, encoding="utf-8")
self.qt_version_changed = False
self.qtwe_version_changed = False
self.qutebrowser_version_changed = VersionChange.unknown
self.chromium_version_changed = VersionChange.unknown
self._set_changed_attributes()
for sect in ["general", "geometry", "inspector"]:
try:
self.add_section(sect)
except configparser.DuplicateSectionError:
pass
deleted_keys = [
("general", "fooled"),
("general", "backend-warning-shown"),
("general", "old-qt-warning-shown"),
("general", "serviceworker_workaround"),
("geometry", "inspector"),
]
for sect, key in deleted_keys:
self[sect].pop(key, None)
qt_version = qVersion()
assert qt_version is not None
self["general"]["qt_version"] = qt_version
self["general"]["qtwe_version"] = self._qtwe_version_str()
self["general"]["chromium_version"] = self._chromium_version_str()
self["general"]["version"] = qutebrowser.__version__
def _has_webengine(self) -> bool:
"""Check if QtWebEngine is available.
Note that it's too early to use objects.backend here...
"""
try:
# pylint: disable=unused-import,redefined-outer-name
import qutebrowser.qt.webenginewidgets
except ImportError:
return False
return True
def _qtwe_versions(self) -> Optional[version.WebEngineVersions]:
"""Get the QtWebEngine versions."""
if not self._has_webengine():
return None
return version.qtwebengine_versions(avoid_init=True)
def _qtwe_version_str(self) -> str:
"""Get the QtWebEngine version string."""
versions = self._qtwe_versions()
if versions is None:
return "no"
return str(versions.webengine)
def _chromium_version_str(self) -> str:
"""Get the Chromium major version string."""
versions = self._qtwe_versions()
if versions is None:
return "no"
return str(versions.chromium_major)
def _set_changed_attributes(self) -> None:
"""Set qt_version_changed/qutebrowser_version_changed attributes.
We handle this here, so we can avoid setting qt_version_changed if
the config is brand new, but can still set it when qt_version wasn't
there before...
"""
if "general" not in self:
return
old_qt_version = self["general"].get("qt_version", None)
self.qt_version_changed = old_qt_version != qVersion()
old_qtwe_version = self["general"].get("qtwe_version", None)
self.qtwe_version_changed = old_qtwe_version != self._qtwe_version_str()
self._set_qutebrowser_changed_attribute()
self._set_chromium_changed_attribute()
def _set_qutebrowser_changed_attribute(self) -> None:
"""Detect a qutebrowser version change."""
old_qutebrowser_version = self["general"].get("version", None)
if old_qutebrowser_version is None:
return
try:
old_version = utils.VersionNumber.parse(old_qutebrowser_version)
except ValueError:
log.init.warning(f"Unable to parse old version {old_qutebrowser_version}")
return
new_version = utils.VersionNumber.parse(qutebrowser.__version__)
if old_version == new_version:
self.qutebrowser_version_changed = VersionChange.equal
elif new_version < old_version:
self.qutebrowser_version_changed = VersionChange.downgrade
elif old_version.segments[:2] == new_version.segments[:2]:
self.qutebrowser_version_changed = VersionChange.patch
elif old_version.major == new_version.major:
self.qutebrowser_version_changed = VersionChange.minor
else:
self.qutebrowser_version_changed = VersionChange.major
def _set_chromium_changed_attribute(self) -> None:
if not self._has_webengine():
return
old_chromium_version_str = self["general"].get("chromium_version", None)
if old_chromium_version_str in ["no", None]:
old_qtwe_version = self["general"].get("qtwe_version", None)
if old_qtwe_version in ["no", None]:
return
try:
old_chromium_version = version.WebEngineVersions.from_webengine(
old_qtwe_version, source="config"
).chromium_major
except ValueError:
log.init.warning(
f"Unable to parse old QtWebEngine version {old_qtwe_version}"
)
return
else:
try:
old_chromium_version = int(old_chromium_version_str)
except ValueError:
log.init.warning(
f"Unable to parse old Chromium version {old_chromium_version_str}"
)
return
new_versions = version.qtwebengine_versions(avoid_init=True)
new_chromium_version = new_versions.chromium_major
if old_chromium_version is None or new_chromium_version is None:
return
if old_chromium_version <= 87 and new_chromium_version >= 90: # Qt 5 -> Qt 6
self.chromium_version_changed = VersionChange.major
elif old_chromium_version > new_chromium_version:
self.chromium_version_changed = VersionChange.downgrade
elif old_chromium_version == new_chromium_version:
self.chromium_version_changed = VersionChange.equal
else:
self.chromium_version_changed = VersionChange.minor
def init_save_manager(self, save_manager: "savemanager.SaveManager") -> None:
"""Make sure the config gets saved properly.
We do this outside of __init__ because the config gets created before
the save_manager exists.
"""
save_manager.add_saveable("state-config", self._save)
def _save(self) -> None:
"""Save the state file to the configured location."""
with open(self._filename, "w", encoding="utf-8") as f:
self.write(f)
class YamlConfig(QObject):
"""A config stored on disk as YAML file.
Class attributes:
VERSION: The current version number of the config file.
"""
VERSION = 2
changed = pyqtSignal()
def __init__(self, parent: QObject = None) -> None:
super().__init__(parent)
self._filename = os.path.join(standarddir.config(auto=True), "autoconfig.yml")
self._dirty = False
self._values: Dict[str, configutils.Values] = {}
for name, opt in configdata.DATA.items():
self._values[name] = configutils.Values(opt)
def init_save_manager(self, save_manager: "savemanager.SaveManager") -> None:
"""Make sure the config gets saved properly.
We do this outside of __init__ because the config gets created before
the save_manager exists.
"""
save_manager.add_saveable("yaml-config", self._save, self.changed)
def __iter__(self) -> Iterator[configutils.Values]:
"""Iterate over configutils.Values items."""
yield from self._values.values()
@pyqtSlot()
def _mark_changed(self) -> None:
"""Mark the YAML config as changed."""
self._dirty = True
self.changed.emit()
def _save(self) -> None:
"""Save the settings to the YAML file if they've changed."""
if not self._dirty:
return
settings: _SettingsType = {}
for name, values in sorted(self._values.items()):
if not values:
continue
settings[name] = {}
for scoped in values:
key = "global" if scoped.pattern is None else str(scoped.pattern)
settings[name][key] = scoped.value
data = {"config_version": self.VERSION, "settings": settings}
with qtutils.savefile_open(self._filename) as f:
f.write(
textwrap.dedent(
"""
# If a config.py file exists, this file is ignored unless it's explicitly loaded
# via config.load_autoconfig(). For more information, see:
# https://github.com/qutebrowser/qutebrowser/blob/main/doc/help/configuring.asciidoc#loading-autoconfigyml
# DO NOT edit this file by hand, qutebrowser will overwrite it.
# Instead, create a config.py - see :help for details.
""".lstrip("\n")
)
)
utils.yaml_dump(data, f)
def _pop_object(self, yaml_data: Any, key: str, typ: type) -> Any:
"""Get a global object from the given data."""
if not isinstance(yaml_data, dict):
desc = configexc.ConfigErrorDesc(
"While loading data", "Toplevel object is not a dict"
)
raise configexc.ConfigFileErrors("autoconfig.yml", [desc])
if key not in yaml_data:
desc = configexc.ConfigErrorDesc(
"While loading data",
"Toplevel object does not contain '{}' key".format(key),
)
raise configexc.ConfigFileErrors("autoconfig.yml", [desc])
data = yaml_data.pop(key)
if not isinstance(data, typ):
desc = configexc.ConfigErrorDesc(
"While loading data",
"'{}' object is not a {}".format(key, typ.__name__),
)
raise configexc.ConfigFileErrors("autoconfig.yml", [desc])
return data
def load(self) -> None:
"""Load configuration from the configured YAML file."""
try:
with open(self._filename, "r", encoding="utf-8") as f:
yaml_data = utils.yaml_load(f)
except FileNotFoundError:
return
except OSError as e:
desc = configexc.ConfigErrorDesc("While reading", e)
raise configexc.ConfigFileErrors("autoconfig.yml", [desc])
except yaml.YAMLError as e:
desc = configexc.ConfigErrorDesc("While parsing", e)
raise configexc.ConfigFileErrors("autoconfig.yml", [desc])
config_version = self._pop_object(yaml_data, "config_version", int)
if config_version == 1:
settings = self._load_legacy_settings_object(yaml_data)
self._mark_changed()
elif config_version > self.VERSION:
desc = configexc.ConfigErrorDesc(
"While reading", "Can't read config from incompatible newer version"
)
raise configexc.ConfigFileErrors("autoconfig.yml", [desc])
else:
settings = self._load_settings_object(yaml_data)
self._dirty = False
migrations = YamlMigrations(settings, parent=self)
migrations.changed.connect(self._mark_changed)
migrations.migrate()
self._validate_names(settings)
self._build_values(settings)
def _load_settings_object(self, yaml_data: Any) -> _SettingsType:
"""Load the settings from the settings: key."""
return self._pop_object(yaml_data, "settings", dict)
def _load_legacy_settings_object(self, yaml_data: Any) -> _SettingsType:
data = self._pop_object(yaml_data, "global", dict)
settings = {}
for name, value in data.items():
settings[name] = {"global": value}
return settings
def _build_values(self, settings: Mapping[str, Any]) -> None:
"""Build up self._values from the values in the given dict."""
errors = []
for name, yaml_values in settings.items():
if not isinstance(yaml_values, dict):
errors.append(
configexc.ConfigErrorDesc(
"While parsing {!r}".format(name), "value is not a dict"
)
)
continue
values = configutils.Values(configdata.DATA[name])
if "global" in yaml_values:
values.add(yaml_values.pop("global"))
for pattern, value in yaml_values.items():
if not isinstance(pattern, str):
errors.append(
configexc.ConfigErrorDesc(
"While parsing {!r}".format(name),
"pattern is not of type string",
)
)
continue
try:
urlpattern = urlmatch.UrlPattern(pattern)
except urlmatch.ParseError as e:
errors.append(
configexc.ConfigErrorDesc(
"While parsing pattern {!r} for {!r}".format(pattern, name),
e,
)
)
continue
values.add(value, urlpattern)
self._values[name] = values
if errors:
raise configexc.ConfigFileErrors("autoconfig.yml", errors)
def _validate_names(self, settings: _SettingsType) -> None:
"""Make sure all settings exist."""
unknown = []
for name in settings:
if name not in configdata.DATA:
unknown.append(name)
if unknown:
errors = [
configexc.ConfigErrorDesc(
"While loading options", "Unknown option {}".format(e)
)
for e in sorted(unknown)
]
raise configexc.ConfigFileErrors("autoconfig.yml", errors)
def set_obj(
self, name: str, value: Any, *, pattern: urlmatch.UrlPattern = None
) -> None:
"""Set the given setting to the given value."""
self._values[name].add(value, pattern)
self._mark_changed()
def unset(self, name: str, *, pattern: urlmatch.UrlPattern = None) -> None:
"""Remove the given option name if it's configured."""
changed = self._values[name].remove(pattern)
if changed:
self._mark_changed()
def clear(self) -> None:
"""Clear all values from the YAML file."""
for values in self._values.values():
values.clear()
self._mark_changed()
class YamlMigrations(QObject):
"""Automated migrations for autoconfig.yml."""
changed = pyqtSignal()
# Note: settings is Any because it's not validated yet.
def __init__(self, settings: Any, parent: QObject = None) -> None:
super().__init__(parent)
self._settings = settings
def migrate(self) -> None:
"""Migrate older configs to the newest format."""
self._migrate_configdata()
self._migrate_bindings_default()
self._migrate_font_default_family()
self._migrate_font_replacements()
self._migrate_bool("tabs.favicons.show", "always", "never")
self._migrate_bool("scrolling.bar", "always", "overlay")
self._migrate_bool("qt.force_software_rendering", "software-opengl", "none")
self._migrate_renamed_bool(
old_name="content.webrtc_public_interfaces_only",
new_name="content.webrtc_ip_handling_policy",
true_value="default-public-interface-only",
false_value="all-interfaces",
)
self._migrate_renamed_bool(
old_name="tabs.persist_mode_on_change",
new_name="tabs.mode_on_change",
true_value="persist",
false_value="normal",
)
self._migrate_renamed_bool(
old_name="statusbar.hide",
new_name="statusbar.show",
true_value="never",
false_value="always",
)
self._migrate_renamed_bool(
old_name="content.ssl_strict",
new_name="content.tls.certificate_errors",
true_value="block",
false_value="load-insecurely",
ask_value="ask",
)
self._migrate_renamed_bool(
old_name="content.javascript.can_access_clipboard",
new_name="content.javascript.clipboard",
true_value="access",
false_value="none",
)
for setting in [
"colors.webpage.force_dark_color_scheme",
"colors.webpage.prefers_color_scheme_dark",
]:
self._migrate_renamed_bool(
old_name=setting,
new_name="colors.webpage.preferred_color_scheme",
true_value="dark",
false_value="auto",
)
for setting in [
"tabs.title.format",
"tabs.title.format_pinned",
"window.title_format",
]:
self._migrate_string_value(
setting, r"(?<!{)\{title\}(?!})", r"{current_title}"
)
self._migrate_to_multiple(
"fonts.tabs", ("fonts.tabs.selected", "fonts.tabs.unselected")
)
self._migrate_to_multiple(
"content.media_capture",
(
"content.media.audio_capture",
"content.media.audio_video_capture",
"content.media.video_capture",
),
)
# content.headers.user_agent can't be empty to get the default anymore.
setting = "content.headers.user_agent"
self._migrate_none(setting, configdata.DATA[setting].default)
self._remove_empty_patterns()
def _migrate_configdata(self) -> None:
"""Migrate simple renamed/deleted options."""
for name in list(self._settings):
if name in configdata.MIGRATIONS.renamed:
new_name = configdata.MIGRATIONS.renamed[name]
log.config.debug("Renaming {} to {}".format(name, new_name))
self._settings[new_name] = self._settings[name]
del self._settings[name]
self.changed.emit()
elif name in configdata.MIGRATIONS.deleted:
log.config.debug("Removing {}".format(name))
del self._settings[name]
self.changed.emit()
def _migrate_bindings_default(self) -> None:
"""bindings.default can't be set in autoconfig.yml anymore.
=> Ignore old values.
"""
if "bindings.default" not in self._settings:
return
del self._settings["bindings.default"]
self.changed.emit()
def _migrate_font_default_family(self) -> None:
old_name = "fonts.monospace"
new_name = "fonts.default_family"
if old_name not in self._settings:
return
old_default_fonts = (
'Monospace, "DejaVu Sans Mono", Monaco, '
'"Bitstream Vera Sans Mono", "Andale Mono", "Courier New", '
'Courier, "Liberation Mono", monospace, Fixed, Consolas, Terminal'
)
self._settings[new_name] = {}
for scope, val in self._settings[old_name].items():
old_fonts = val.replace(old_default_fonts, "").rstrip(" ,")
new_fonts = configutils.FontFamilies.from_str(old_fonts)
self._settings[new_name][scope] = list(new_fonts)
del self._settings[old_name]
self.changed.emit()
def _migrate_font_replacements(self) -> None:
"""Replace 'monospace' replacements by 'default_family'."""
for name, values in self._settings.items():
if not isinstance(values, dict):
continue
try:
opt = configdata.DATA[name]
except KeyError:
continue
if not isinstance(opt.typ, configtypes.FontBase):
continue
for scope, val in values.items():
if isinstance(val, str) and val.endswith(" monospace"):
new_val = val.replace("monospace", "default_family")
self._settings[name][scope] = new_val
self.changed.emit()
def _migrate_bool(self, name: str, true_value: str, false_value: str) -> None:
if name not in self._settings:
return
values = self._settings[name]
if not isinstance(values, dict):
return
for scope, val in values.items():
if isinstance(val, bool):
new_value = true_value if val else false_value
self._settings[name][scope] = new_value
self.changed.emit()
def _migrate_renamed_bool(
self,
old_name: str,
new_name: str,
true_value: str,
false_value: str,
ask_value: str = None,
) -> None:
if old_name not in self._settings:
return
self._settings[new_name] = {}
for scope, val in self._settings[old_name].items():
if val == "ask":
assert ask_value is not None
new_value = ask_value
elif val:
new_value = true_value
else:
new_value = false_value
self._settings[new_name][scope] = new_value
del self._settings[old_name]
self.changed.emit()
def _migrate_none(self, name: str, value: str) -> None:
if name not in self._settings:
return
values = self._settings[name]
if not isinstance(values, dict):
return
for scope, val in values.items():
if val is None:
self._settings[name][scope] = value
self.changed.emit()
def _migrate_to_multiple(self, old_name: str, new_names: Iterable[str]) -> None:
if old_name not in self._settings:
return
for new_name in new_names:
self._settings[new_name] = {}
for scope, val in self._settings[old_name].items():
self._settings[new_name][scope] = val
del self._settings[old_name]
self.changed.emit()
def _migrate_string_value(self, name: str, source: str, target: str) -> None:
if name not in self._settings:
return
values = self._settings[name]
if not isinstance(values, dict):
return
for scope, val in values.items():
if isinstance(val, str):
new_val = re.sub(source, target, val)
if new_val != val:
self._settings[name][scope] = new_val
self.changed.emit()
def _remove_empty_patterns(self) -> None:
"""Remove *. host patterns from the config.
Those used to be valid (and could be accidentally produced by using tSH
on about:blank), but aren't anymore.
"""
scope = "*://*./*"
for name, values in self._settings.items():
if not isinstance(values, dict):
continue
if scope in values:
del self._settings[name][scope]
self.changed.emit()
class ConfigAPI:
"""Object which gets passed to config.py as "config" object.
This is a small wrapper over the Config object, but with more
straightforward method names (get/set call get_obj/set_obj) and a more
shallow API.
Attributes:
_config: The main Config object to use.
_keyconfig: The KeyConfig object.
_warn_autoconfig: Whether to warn if autoconfig.yml wasn't loaded.
errors: Errors which occurred while setting options.
configdir: The qutebrowser config directory, as pathlib.Path.
datadir: The qutebrowser data directory, as pathlib.Path.
"""
def __init__(
self,
conf: config.Config,
keyconfig: config.KeyConfig,
warn_autoconfig: bool,
):
self._config = conf
self._keyconfig = keyconfig
self.errors: List[configexc.ConfigErrorDesc] = []
self.configdir = pathlib.Path(standarddir.config())
self.datadir = pathlib.Path(standarddir.data())
self._warn_autoconfig = warn_autoconfig
@contextlib.contextmanager
def _handle_error(self, action: str) -> Iterator[None]:
"""Catch config-related exceptions and save them in self.errors."""
try:
yield
except configexc.ConfigFileErrors as e:
for err in e.errors:
new_err = err.with_text(e.basename)
self.errors.append(new_err)
except configexc.Error as e:
text = f"While {action}"
self.errors.append(configexc.ConfigErrorDesc(text, e))
except urlmatch.ParseError as e:
text = f"While {action} and parsing pattern"
self.errors.append(configexc.ConfigErrorDesc(text, e))
except keyutils.KeyParseError as e:
text = f"While {action} and parsing key"
self.errors.append(configexc.ConfigErrorDesc(text, e))
def finalize(self) -> None:
"""Do work which needs to be done after reading config.py."""
if self._warn_autoconfig:
desc = configexc.ConfigErrorDesc(
"autoconfig loading not specified",
(
"Your config.py should call either `config.load_autoconfig()`"
" (to load settings configured via the GUI) or "
"`config.load_autoconfig(False)` (to not do so)"
),
)
self.errors.append(desc)
with self._handle_error("updating mutated values"):
self._config.update_mutables()
def load_autoconfig(self, load_config: bool = True) -> None:
"""Load the autoconfig.yml file which is used for :set/:bind/etc."""
self._warn_autoconfig = False
if load_config:
with self._handle_error("reading 'autoconfig.yml'"):
read_autoconfig()
def get(self, name: str, pattern: str = None) -> Any:
"""Get a setting value from the config, optionally with a pattern."""
with self._handle_error(f"getting '{name}'"):
urlpattern = urlmatch.UrlPattern(pattern) if pattern else None
return self._config.get_mutable_obj(name, pattern=urlpattern)
def set(self, name: str, value: Any, pattern: str = None) -> None:
"""Set a setting value in the config, optionally with a pattern."""
with self._handle_error(f"setting '{name}'"):
urlpattern = urlmatch.UrlPattern(pattern) if pattern else None
self._config.set_obj(name, value, pattern=urlpattern)
def bind(self, key: str, command: Optional[str], mode: str = "normal") -> None:
"""Bind a key to a command, with an optional key mode."""
with self._handle_error(f"binding '{key}'"):
seq = keyutils.KeySequence.parse(key)
if command is None:
raise configexc.Error(
"Can't bind {key} to None (maybe you "
"want to use config.unbind('{key}') "
"instead?)".format(key=key)
)
self._keyconfig.bind(seq, command, mode=mode)
def unbind(self, key: str, mode: str = "normal") -> None:
"""Unbind a key from a command, with an optional key mode."""
with self._handle_error(f"unbinding '{key}'"):
seq = keyutils.KeySequence.parse(key)
self._keyconfig.unbind(seq, mode=mode)
def source(self, filename: str) -> None:
"""Read the given config file from disk."""
if not os.path.isabs(filename):
# We don't use self.configdir here so we get the proper file when starting
# with a --config-py argument given.
filename = os.path.join(os.path.dirname(standarddir.config_py()), filename)
try:
read_config_py(filename)
except configexc.ConfigFileErrors as e:
self.errors += e.errors
@contextlib.contextmanager
def pattern(self, pattern: str) -> Iterator[config.ConfigContainer]:
"""Get a ConfigContainer for the given pattern."""
# We need to propagate the exception so we don't need to return
# something.
urlpattern = urlmatch.UrlPattern(pattern)
container = config.ConfigContainer(
config=self._config, configapi=self, pattern=urlpattern
)
yield container
class ConfigPyWriter:
"""Writer for config.py files from given settings."""
def __init__(
self,
options: List[Tuple[Optional[urlmatch.UrlPattern], configdata.Option, Any]],
bindings: MutableMapping[str, Mapping[str, Optional[str]]],
*,
commented: bool,
) -> None:
self._options = options
self._bindings = bindings
self._commented = commented
def write(self, filename: str) -> None:
"""Write the config to the given file."""
with open(filename, "w", encoding="utf-8") as f:
f.write("\n".join(self._gen_lines()))
def _line(self, line: str) -> str:
"""Get an (optionally commented) line."""
if self._commented:
if line.startswith("#"):
return "#" + line
else:
return "# " + line
else:
return line
def _gen_lines(self) -> Iterator[str]:
"""Generate a config.py with the given settings/bindings.
Yields individual lines.
"""
yield from self._gen_header()
yield from self._gen_options()
yield from self._gen_bindings()
def _gen_header(self) -> Iterator[str]:
"""Generate the initial header of the config."""
yield self._line("# Autogenerated config.py")
yield self._line("#")
note = (
"NOTE: config.py is intended for advanced users who are "
"comfortable with manually migrating the config file on "
"qutebrowser upgrades. If you prefer, you can also configure "
"qutebrowser using the :set/:bind/:config-* commands without "
"having to write a config.py file."
)
for line in textwrap.wrap(note):
yield self._line("# {}".format(line))
yield self._line("#")
yield self._line("# Documentation:")
yield self._line("# qute://help/configuring.html")
yield self._line("# qute://help/settings.html")
yield ""
if self._commented:
# When generated from an autoconfig.yml with commented=False,
# we don't want to load that autoconfig.yml anymore.
yield self._line(
"# This is here so configs done via the GUI are " "still loaded."
)
yield self._line("# Remove it to not load settings done via the " "GUI.")
yield self._line("config.load_autoconfig(True)")
yield ""
else:
yield self._line(
"# Change the argument to True to still load settings "
"configured via autoconfig.yml"
)
yield self._line("config.load_autoconfig(False)")
yield ""
def _gen_options(self) -> Iterator[str]:
"""Generate the options part of the config."""
for pattern, opt, value in self._options:
if opt.name in ["bindings.commands", "bindings.default"]:
continue
for line in textwrap.wrap(opt.description):
yield self._line("# {}".format(line))
yield self._line("# Type: {}".format(opt.typ.get_name()))
valid_values = opt.typ.get_valid_values()
if valid_values is not None and valid_values.generate_docs:
yield self._line("# Valid values:")
for val in valid_values:
try:
desc = valid_values.descriptions[val]
yield self._line("# - {}: {}".format(val, desc))
except KeyError:
yield self._line("# - {}".format(val))
if pattern is None:
yield self._line("c.{} = {!r}".format(opt.name, value))
else:
yield self._line(
"config.set({!r}, {!r}, {!r})".format(opt.name, value, str(pattern))
)
yield ""
def _gen_bindings(self) -> Iterator[str]:
"""Generate the bindings part of the config."""
normal_bindings = self._bindings.pop("normal", {})
if normal_bindings:
yield self._line("# Bindings for normal mode")
for key, command in sorted(normal_bindings.items()):
if command is None:
yield self._line("config.unbind({!r})".format(key))
else:
yield self._line("config.bind({!r}, {!r})".format(key, command))
yield ""
for mode, mode_bindings in sorted(self._bindings.items()):
yield self._line("# Bindings for {} mode".format(mode))
for key, command in sorted(mode_bindings.items()):
if command is None:
yield self._line("config.unbind({!r}, mode={!r})".format(key, mode))
else:
yield self._line(
"config.bind({!r}, {!r}, mode={!r})".format(key, command, mode)
)
yield ""
def read_config_py(
filename: str,
raising: bool = False,
warn_autoconfig: bool = False,
) -> None:
"""Read a config.py file.
Arguments;
filename: The name of the file to read.
raising: Raise exceptions happening in config.py.
This is needed during tests to use pytest's inspection.
warn_autoconfig: Whether to warn if config.load_autoconfig() wasn't specified.
"""
assert config.instance is not None
assert config.key_instance is not None
api = ConfigAPI(
config.instance,
config.key_instance,
warn_autoconfig=warn_autoconfig,
)
container = config.ConfigContainer(config.instance, configapi=api)
basename = os.path.basename(filename)
module = types.ModuleType("config")
module.config = api # type: ignore[attr-defined]
module.c = container # type: ignore[attr-defined]
module.__file__ = filename
try:
with open(filename, mode="rb") as f:
source = f.read()
except OSError as e:
text = "Error while reading {}".format(basename)
desc = configexc.ConfigErrorDesc(text, e)
raise configexc.ConfigFileErrors(basename, [desc])
try:
code = compile(source, filename, "exec")
except ValueError as e:
# source contains NUL bytes
desc = configexc.ConfigErrorDesc("Error while compiling", e)
raise configexc.ConfigFileErrors(basename, [desc])
except SyntaxError as e:
desc = configexc.ConfigErrorDesc(
"Unhandled exception", e, traceback=traceback.format_exc()
)
raise configexc.ConfigFileErrors(basename, [desc])
try:
# Save and restore sys variables
with saved_sys_properties():
# Add config directory to python path, so config.py can import
# other files in logical places
config_dir = os.path.dirname(filename)
if config_dir not in sys.path:
sys.path.insert(0, config_dir)
exec(code, module.__dict__)
except Exception as e:
if raising:
raise
api.errors.append(
configexc.ConfigErrorDesc(
"Unhandled exception", exception=e, traceback=traceback.format_exc()
)
)
api.finalize()
config.instance.config_py_loaded = True
if api.errors:
raise configexc.ConfigFileErrors("config.py", api.errors)
def read_autoconfig() -> None:
"""Read the autoconfig.yml file."""
try:
config.instance.read_yaml()
except configexc.ConfigFileErrors:
raise # caught in outer block
except configexc.Error as e:
desc = configexc.ConfigErrorDesc("Error", e)
raise configexc.ConfigFileErrors("autoconfig.yml", [desc])
@contextlib.contextmanager
def saved_sys_properties() -> Iterator[None]:
"""Save various sys properties such as sys.path and sys.modules."""
old_path = sys.path.copy()
old_modules = sys.modules.copy()
try:
yield
finally:
sys.path = old_path
for module in set(sys.modules).difference(old_modules):
del sys.modules[module]
def init() -> None:
"""Initialize config storage not related to the main config."""
global state
try:
state = StateConfig()
except (configparser.Error, UnicodeDecodeError) as e:
msg = "While loading state file from {}".format(standarddir.data())
desc = configexc.ConfigErrorDesc(msg, e)
raise configexc.ConfigFileErrors("state", [desc], fatal=True)
# Set the QSettings path to something like
# ~/.config/qutebrowser/qsettings/qutebrowser/qutebrowser.conf so it
# doesn't overwrite our config.
#
# This fixes one of the corruption issues here:
# https://github.com/qutebrowser/qutebrowser/issues/515
path = os.path.join(standarddir.config(auto=True), "qsettings")
for fmt in [QSettings.Format.NativeFormat, QSettings.Format.IniFormat]:
QSettings.setPath(fmt, QSettings.Scope.UserScope, path)
|
qltk | information | # Copyright 2004-2005 Joe Wreschnig, Michael Urman, Iñigo Serna
# 2016-2022 Nick Boultbee
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import time
from collections import defaultdict
from gi.repository import Gtk, Pango
from quodlibet import _, app, ngettext, qltk, util
from quodlibet.formats import PEOPLE
from quodlibet.qltk.bookmarks import EditBookmarksPane
from quodlibet.qltk.cover import CoverImage
from quodlibet.qltk.lyrics import LyricsPane
from quodlibet.qltk.window import PersistentWindowMixin, Window
from quodlibet.qltk.x import Align
from quodlibet.util import connect_destroy, tag
from quodlibet.util.i18n import numeric_phrase
from quodlibet.util.path import filesize, unexpand
from quodlibet.util.tags import readable
from senf import fsn2text
def Label(label=None, markup=None, ellipsize=False):
if markup:
l = Gtk.Label()
l.set_markup(markup)
else:
l = Gtk.Label(label=label)
l.set_selectable(True)
l.set_alignment(0, 0)
if ellipsize:
l.set_ellipsize(Pango.EllipsizeMode.END)
return l
class TitleLabel(Gtk.Label):
def __init__(self, text, is_markup=False):
super().__init__()
self.set_ellipsize(Pango.EllipsizeMode.END)
markup = text if is_markup else (util.italic(text))
markup = "<span size='xx-large'>%s</span>" % markup
self.set_markup(markup)
self.set_selectable(True)
class ReactiveCoverImage(CoverImage):
def __init__(self, resize=False, size=125, song=None, tooltip=None):
super().__init__(resize, size, song)
self.set_property("no-show-all", True)
def show_cover(cover, success):
if success:
cover.show()
cover.disconnect(signal_id)
signal_id = self.connect("cover-visible", show_cover)
self.set_song(song)
if tooltip:
self.get_child().set_tooltip_text(tooltip)
def Frame(name, widget):
def hx(value):
return hex(int(value * 255))[2:]
f = Gtk.Frame()
qltk.add_css(f, "* {opacity: 0.9}")
l = Gtk.Label()
l.set_markup(util.escape(name))
qltk.add_css(l, " * {opacity: 0.6; padding: 0px 2px;}")
f.set_label_widget(l)
a = Align(top=6, left=12, bottom=6, right=6)
f.add(a)
a.add(widget)
return f
def Table(rows):
# Gtk.Table doesn't allow 0 rows
t = Gtk.Table(n_rows=max(rows, 1), n_columns=2)
t.set_col_spacings(6)
t.set_row_spacings(6)
t.set_homogeneous(False)
return t
def SW():
swin = Gtk.ScrolledWindow()
swin.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC)
return swin
class NoSongs(Gtk.Label):
def __init__(self):
super().__init__(label=_("No songs are selected."))
self.title = _("No Songs")
class OneSong(qltk.Notebook):
def __init__(self, library, song, lyrics=True, bookmarks=True):
super().__init__()
vbox = Gtk.VBox(spacing=12)
vbox.set_border_width(12)
self._title(song, vbox)
self._album(song, vbox)
self._people(song, vbox)
self._library(song, vbox)
self._file(song, vbox)
self._additional(song, vbox)
sw = SW()
sw.title = _("Information")
sw.add_with_viewport(vbox)
self.append_page(sw)
if lyrics:
lyrics = LyricsPane(song)
lyrics.title = _("Lyrics")
self.append_page(lyrics)
if bookmarks:
bookmarks = EditBookmarksPane(None, song)
bookmarks.title = _("Bookmarks")
bookmarks.set_border_width(12)
self.append_page(bookmarks)
connect_destroy(library, "changed", self.__check_changed, vbox, song)
def _switch_to_lyrics(self):
self.set_current_page(1)
def __check_changed(self, library, songs, vbox, song):
if song in songs:
for c in vbox.get_children():
vbox.remove(c)
c.destroy()
self._title(song, vbox)
self._album(song, vbox)
self._people(song, vbox)
self._library(song, vbox)
self._file(song, vbox)
self._additional(song, vbox)
parent = qltk.get_top_parent(self)
if parent:
parent.set_title(self.title + " - Quod Libet")
vbox.show_all()
def _title(self, song, box):
text = song.comma("title")
if "version" in song:
text += "\n" + util.escape(song.comma("version"))
box.pack_start(TitleLabel(text), False, False, 0)
self.title = song.comma("title")
def _album(self, song, box):
if "album" not in song:
return
text = [f"<span size='x-large'>{util.italic(song.comma('album'))}</span>"]
secondary = []
if "discnumber" in song:
secondary.append(_("Disc %s") % song["discnumber"])
if "discsubtitle" in song:
secondary.append(util.italic(song.comma("discsubtitle")))
if "tracknumber" in song:
secondary.append(_("Track %s") % song["tracknumber"])
if secondary:
text.append(" - ".join(secondary))
if "date" in song:
text.append(util.escape(song.comma("date")))
if "organization" in song or "labelid" in song:
t = util.escape(song.comma("~organization~labelid"))
text.append(t)
if "producer" in song:
text.append(_("Produced by %s") % (util.escape(song.comma("producer"))))
w = Label(markup="\n".join(text), ellipsize=True)
hb = Gtk.HBox(spacing=12)
hb.pack_start(w, True, True, 0)
box.pack_start(Frame(tag("album"), hb), False, False, 0)
cover = ReactiveCoverImage(song=song)
hb.pack_start(cover, False, True, 0)
def _people(self, song, box):
data = []
if "artist" in song:
title = _("artist") if len(song.list("artist")) == 1 else _("artists")
title = util.capitalize(title)
data.append((title, song["artist"]))
for tag_ in [
"performer",
"lyricist",
"arranger",
"composer",
"conductor",
"author",
]:
if tag_ in song:
name = (
tag(tag_)
if len(song.list(tag_)) == 1
else readable(tag_, plural=True)
)
data.append((name, song[tag_]))
performers = defaultdict(list)
for tag_ in song:
if "performer:" in tag_:
for person in song.list(tag_):
role = util.title(tag_.split(":", 1)[1])
performers[role].append(person)
if performers:
text = "\n".join(
"%s (%s)" % (", ".join(names), part)
for part, names in performers.items()
)
name = tag("performer") if len(performers) == 1 else _("performers")
data.append((name, text))
table = Table(len(data))
for i, (key, text) in enumerate(data):
key = util.capitalize(util.escape(key) + ":")
table.attach(
Label(markup=key), 0, 1, i, i + 1, xoptions=Gtk.AttachOptions.FILL
)
label = Label(text, ellipsize=True)
table.attach(label, 1, 2, i, i + 1)
box.pack_start(Frame(tag("~people"), table), False, False, 0)
def _library(self, song, box):
def counter(i):
return (
_("Never")
if i == 0
else numeric_phrase("%(n)d time", "%(n)d times", i, "n")
)
def ftime(t):
if t == 0:
return _("Unknown")
else:
return str(time.strftime("%c", time.localtime(t)))
playcount = counter(song.get("~#playcount", 0))
skipcount = counter(song.get("~#skipcount", 0))
lastplayed = ftime(song.get("~#lastplayed", 0))
if lastplayed == _("Unknown"):
lastplayed = _("Never")
added = ftime(song.get("~#added", 0))
rating = song("~rating")
has_rating = "~#rating" in song
t = Table(5)
table = [
(_("added"), added, True),
(_("last played"), lastplayed, True),
(_("plays"), playcount, True),
(_("skips"), skipcount, True),
(_("rating"), rating, has_rating),
]
for i, (l, r, s) in enumerate(table):
l = util.capitalize(l + ":")
lab = Label(l)
t.attach(lab, 0, 1, i + 1, i + 2, xoptions=Gtk.AttachOptions.FILL)
label = Label(r)
label.set_sensitive(s)
t.attach(label, 1, 2, i + 1, i + 2)
box.pack_start(Frame(_("Library"), t), False, False, 0)
def _file(self, song, box):
def ftime(t):
if t == 0:
return _("Unknown")
else:
return str(time.strftime("%c", time.localtime(t)))
fn = fsn2text(unexpand(song["~filename"]))
length = util.format_time_preferred(song.get("~#length", 0))
size = util.format_size(song.get("~#filesize") or filesize(song["~filename"]))
mtime = ftime(util.path.mtime(song["~filename"]))
format_ = song("~format")
codec = song("~codec")
encoding = song.comma("~encoding")
bitrate = song("~bitrate")
table = [
(_("path"), fn),
(_("length"), length),
(_("format"), format_),
(_("codec"), codec),
(_("encoding"), encoding),
(_("bitrate"), bitrate),
(_("file size"), size),
(_("modified"), mtime),
]
t = Table(len(table))
for i, (tag_, text) in enumerate(table):
tag_ = util.capitalize(util.escape(tag_) + ":")
lab = Label(text)
lab.set_ellipsize(Pango.EllipsizeMode.MIDDLE)
t.attach(Label(tag_), 0, 1, i, i + 1, xoptions=Gtk.AttachOptions.FILL)
t.attach(lab, 1, 2, i, i + 1)
box.pack_start(Frame(_("File"), t), False, False, 0)
def _additional(self, song, box):
if "website" not in song and "comment" not in song:
return
markup_data = []
if "comment" in song:
comments = song.list("comment")
markups = [util.italic(c) for c in comments]
markup_data.append(("comment", markups))
if "website" in song:
markups = [
'<a href="%(url)s">%(text)s</a>'
% {"text": util.escape(website), "url": util.escape(website)}
for website in song.list("website")
]
markup_data.append(("website", markups))
table = Table(1)
for i, (key, markups) in enumerate(markup_data):
title = readable(key, plural=len(markups) > 1)
lab = Label(markup=util.capitalize(util.escape(title) + ":"))
table.attach(lab, 0, 1, i, i + 1, xoptions=Gtk.AttachOptions.FILL)
lab = Label(markup="\n".join(markups), ellipsize=True)
table.attach(lab, 1, 2, i, i + 1)
box.pack_start(Frame(_("Additional"), table), False, False, 0)
class OneAlbum(qltk.Notebook):
def __init__(self, songs):
super().__init__()
swin = SW()
swin.title = _("Information")
vbox = Gtk.VBox(spacing=12)
vbox.set_border_width(12)
swin.add_with_viewport(vbox)
# Needed to get proper track/disc/part ordering
songs = sorted(songs)
self._title(songs, vbox)
self._album(songs, vbox)
self._people(songs, vbox)
self._description(songs, vbox)
self.append_page(swin)
def _title(self, songs, box):
song = songs[0]
self.title = text = song["album"]
markup = util.italic(text)
if "date" in song:
markup += " <small>(%s)</small>" % util.escape(song("~year"))
box.pack_start(TitleLabel(markup, is_markup=True), False, False, 0)
def _album(self, songs, box):
text = []
discs = {}
for song in songs:
try:
discs[song("~#disc")] = int(song["tracknumber"].split("/")[1])
except (AttributeError, ValueError, IndexError, KeyError):
discs[song("~#disc")] = max(
[song("~#track", discs.get(song("~#disc"), 0))]
)
tracks = sum(discs.values())
discs = len(discs)
length = sum([song.get("~#length", 0) for song in songs])
if tracks == 0 or tracks < len(songs):
tracks = len(songs)
parts = []
if discs > 1:
parts.append(ngettext("%d disc", "%d discs", discs) % discs)
parts.append(ngettext("%d track", "%d tracks", tracks) % tracks)
if tracks != len(songs):
parts.append(
ngettext("%d selected", "%d selected", len(songs)) % len(songs)
)
text.append(", ".join(parts))
text.append("(%s)" % util.format_time_preferred(length))
if "location" in song:
text.append(util.escape(song["location"]))
if "organization" in song or "labelid" in song:
t = util.escape(song.comma("~organization~labelid"))
text.append(t)
if "producer" in song:
text.append(_("Produced by %s") % (util.escape(song.comma("producer"))))
w = Label(markup="\n".join(text), ellipsize=True)
hb = Gtk.HBox(spacing=12)
hb.pack_start(w, True, True, 0)
hb.pack_start(ReactiveCoverImage(song=song), False, True, 0)
box.pack_start(hb, False, False, 0)
def _people(self, songs, box):
tags_ = PEOPLE
people = defaultdict(set)
for song in songs:
for t in tags_:
if t in song:
people[t] |= set(song.list(t))
data = []
# Preserve order of people
for tag_ in tags_:
values = people.get(tag_)
if values:
name = readable(tag_, plural=len(values) > 1)
data.append((name, "\n".join(values)))
table = Table(len(data))
for i, (key, text) in enumerate(data):
key = util.capitalize(util.escape(key) + ":")
table.attach(
Label(markup=key), 0, 1, i, i + 1, xoptions=Gtk.AttachOptions.FILL
)
label = Label(text, ellipsize=True)
table.attach(label, 1, 2, i, i + 1)
box.pack_start(Frame(tag("~people"), table), False, False, 0)
def _description(self, songs, box):
text = []
cur_disc = songs[0]("~#disc", 1) - 1
cur_part = None
cur_track = songs[0]("~#track", 1) - 1
for song in songs:
track = song("~#track", 0)
disc = song("~#disc", 0)
part = song.get("part")
if disc != cur_disc:
if cur_disc:
text.append("")
cur_track = song("~#track", 1) - 1
cur_part = None
cur_disc = disc
if disc:
text.append("%s" % (_("Disc %s") % disc))
if part != cur_part:
ts = " " * bool(disc)
cur_part = part
if part:
text.append("%s%s" % (ts, util.escape(part)))
cur_track += 1
ts = " " * (bool(disc) + bool(part))
while cur_track < track:
text.append(
"{ts}{cur: >2}. {text}".format(
ts=ts, cur=cur_track, text=_("Track unavailable")
)
)
cur_track += 1
title = util.italic(song.comma("~title~version"))
text.append(f"{ts}{track: >2}. {title}")
l = Label(markup="\n".join(text), ellipsize=True)
box.pack_start(Frame(_("Track List"), l), False, False, 0)
class OneArtist(qltk.Notebook):
def __init__(self, songs):
super().__init__()
swin = SW()
swin.title = _("Information")
vbox = Gtk.VBox(spacing=12)
vbox.set_border_width(12)
swin.add_with_viewport(vbox)
self._title(songs, vbox)
self._album(songs, vbox)
self.append_page(swin)
def _title(self, songs, box):
self.title = songs[0]("artist")
l = TitleLabel(self.title)
box.pack_start(l, False, False, 0)
def _album(self, songs, box):
albums, noalbum = _sort_albums(songs)
def format(args):
date, song, album = args
markup = f"<big>{util.italic(album)}</big>"
return "%s (%s)" % (markup, date[:4]) if date else markup
get_cover = app.cover_manager.get_cover
covers = [(a, get_cover(s), s) for d, s, a in albums]
albums = [format(a) for a in albums]
if noalbum:
albums.append(
ngettext("%d song with no album", "%d songs with no album", noalbum)
% noalbum
)
l = Label(markup="\n".join(albums), ellipsize=True)
box.pack_start(Frame(_("Selected Discography"), l), False, False, 0)
covers = [ac for ac in covers if bool(ac[1])]
t = Gtk.Table(n_rows=4, n_columns=(len(covers) // 4) + 1)
t.set_col_spacings(12)
t.set_row_spacings(12)
added = set()
for i, (album, cover, song) in enumerate(covers):
if cover.name in added:
continue
cov = ReactiveCoverImage(song=song, tooltip=album)
c = i % 4
r = i // 4
t.attach(
cov, c, c + 1, r, r + 1, xoptions=Gtk.AttachOptions.EXPAND, yoptions=0
)
added.add(cover.name)
box.pack_start(t, True, True, 0)
def _sort_albums(songs):
""":return: a tuple of (albums, count) where
count is the number of album-less songs and
albums is a list of (date, song, album), sorted"""
no_album_count = 0
albums = {}
for song in songs:
if "album" in song:
albums[song.list("album")[0]] = song
else:
no_album_count += 1
albums = [(song.get("date", ""), song, album) for album, song in albums.items()]
albums.sort()
return albums, no_album_count
class ManySongs(qltk.Notebook):
def __init__(self, songs):
super().__init__()
swin = SW()
swin.title = _("Information")
vbox = Gtk.VBox(spacing=12)
vbox.set_border_width(12)
swin.add_with_viewport(vbox)
self._title(songs, vbox)
self._people(songs, vbox)
self._album(songs, vbox)
self._file(songs, vbox)
self.append_page(swin)
def _title(self, songs, box):
self.title = ngettext("%d song", "%d songs", len(songs)) % len(songs)
markup = util.escape(self.title)
box.pack_start(TitleLabel(markup, is_markup=True), False, False, 0)
def _people(self, songs, box):
artists = set()
none = 0
for song in songs:
if "artist" in song:
artists.update(song.list("artist"))
else:
none += 1
artists = sorted(artists)
num_artists = len(artists)
if none:
artists.append(
ngettext("%d song with no artist", "%d songs with no artist", none)
% none
)
label = Label(markup=util.escape("\n".join(artists)), ellipsize=True)
frame = Frame("%s (%d)" % (util.capitalize(_("artists")), num_artists), label)
box.pack_start(frame, False, False, 0)
def _album(self, songs, box):
albums = set()
none = 0
for song in songs:
if "album" in song:
albums.update(song.list("album"))
else:
none += 1
albums = sorted(albums)
num_albums = len(albums)
markup = "\n".join(util.italic(a) for a in albums)
if none:
text = (
ngettext("%d song with no album", "%d songs with no album", none) % none
)
markup += f"\n{util.escape(text)}"
label = Label()
label.set_markup(markup)
albums = util.capitalize(_("albums"))
box.pack_start(Frame(f"{albums} ({num_albums})", label), False, False, 0)
def _file(self, songs, box):
length = 0
size = 0
for song in songs:
length += song.get("~#length", 0)
try:
size += filesize(song["~filename"])
except EnvironmentError:
pass
table = Table(2)
table.attach(
Label(_("Total length:")), 0, 1, 0, 1, xoptions=Gtk.AttachOptions.FILL
)
table.attach(Label(util.format_time_preferred(length)), 1, 2, 0, 1)
table.attach(
Label(_("Total size:")), 0, 1, 1, 2, xoptions=Gtk.AttachOptions.FILL
)
table.attach(Label(util.format_size(size)), 1, 2, 1, 2)
box.pack_start(Frame(_("Files"), table), False, False, 0)
class Information(Window, PersistentWindowMixin):
def __init__(self, library, songs, parent=None):
super().__init__(dialog=False)
self.set_default_size(400, 400)
self.set_transient_for(qltk.get_top_parent(parent))
self.enable_window_tracking("quodlibet_information")
if len(songs) > 1:
connect_destroy(library, "changed", self.__check_changed)
if len(songs) > 0:
connect_destroy(library, "removed", self.__check_removed)
self.__songs = songs
self.__update(library)
self.get_child().show_all()
def __check_changed(self, library, songs):
changed = set(songs)
for song in self.__songs:
if song in changed:
self.__update(library)
break
def __check_removed(self, library, songs):
gone = set(songs)
old = len(self.__songs)
self.__songs = [s for s in self.__songs if s not in gone]
if len(self.__songs) != old:
self.__update(library)
def __update(self, library):
songs = self.__songs
if self.get_child():
self.get_child().destroy()
self.__songs = songs
if not songs:
self.add(NoSongs())
elif len(songs) == 1:
self.add(OneSong(library, songs[0]))
else:
tags = [(s.get("artist", ""), s.get("album", "")) for s in songs]
artists, albums = zip(*tags)
if min(albums) == max(albums) and albums[0]:
self.add(OneAlbum(songs))
elif min(artists) == max(artists) and artists[0]:
self.add(OneArtist(songs))
else:
self.add(ManySongs(songs))
self.set_title(self.get_child().title + " - Quod Libet")
self.get_child().show_all()
|
core | Messages | # Copyright 2007, 2015 Free Software Foundation, Inc.
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-2.0-or-later
#
import sys
import traceback
# A list of functions that can receive a message.
MESSENGERS_LIST = list()
_indent = ""
# Global FlowGraph Error and the file that caused it
flowgraph_error = None
flowgraph_error_file = None
def register_messenger(messenger):
"""
Append the given messenger to the list of messengers.
Args:
messenger: a method that takes a string
"""
MESSENGERS_LIST.append(messenger)
def set_indent(level=0):
global _indent
_indent = " " * level
def send(message):
"""
Give the message to each of the messengers.
Args:
message: a message string
"""
for messenger in MESSENGERS_LIST:
messenger(_indent + message)
# register stdout by default
register_messenger(sys.stdout.write)
###########################################################################
# Special functions for specific program functionalities
###########################################################################
def send_init(platform):
msg = (
"<<< Welcome to {config.name} {config.version} >>>\n\n"
"Block paths:\n\t{paths}\n"
)
send(
msg.format(
config=platform.config, paths="\n\t".join(platform.config.block_paths)
)
)
def send_xml_errors_if_any(xml_failures):
if xml_failures:
send(
"\nXML parser: Found {0} erroneous XML file{1} while loading the "
'block tree (see "Help/Parser errors" for details)\n'.format(
len(xml_failures), "s" if len(xml_failures) > 1 else ""
)
)
def send_start_load(file_path):
send('\nLoading: "%s"\n' % file_path)
def send_error_msg_load(error):
send(">>> Error: %s\n" % error)
def send_error_load(error):
send_error_msg_load(error)
traceback.print_exc()
def send_end_load():
send(">>> Done\n")
def send_fail_load(error):
send("Error: %s\n>>> Failure\n" % error)
traceback.print_exc()
def send_start_gen(file_path):
send("\nGenerating: %r\n" % file_path)
def send_auto_gen(file_path):
send(">>> Generating: %r\n" % file_path)
def send_fail_gen(error):
send("Generate Error: %s\n>>> Failure\n" % error)
traceback.print_exc()
def send_start_exec(file_path):
send("\nExecuting: %s\n" % file_path)
def send_verbose_exec(verbose):
send(verbose)
def send_end_exec(code=0):
send("\n>>> Done%s\n" % (" (return code %s)" % code if code else ""))
def send_fail_save(file_path):
send(">>> Error: Cannot save: %s\n" % file_path)
def send_fail_connection(msg=""):
send(
">>> Error: Cannot create connection.\n" + ("\t{}\n".format(msg) if msg else "")
)
def send_fail_load_preferences(prefs_file_path):
send('>>> Error: Cannot load preferences file: "%s"\n' % prefs_file_path)
def send_fail_save_preferences(prefs_file_path):
send('>>> Error: Cannot save preferences file: "%s"\n' % prefs_file_path)
def send_warning(warning):
send(">>> Warning: %s\n" % warning)
def send_flowgraph_error_report(flowgraph):
"""verbose error report for flowgraphs"""
error_list = flowgraph.get_error_messages()
if not error_list:
return
send("*" * 50 + "\n")
summary_msg = "{} errors from flowgraph:\n".format(len(error_list))
send(summary_msg)
for err in error_list:
send(err)
send("\n" + "*" * 50 + "\n")
|
neubot | system_posix | # neubot/system_posix.py
#
# Copyright (c) 2010-2011
# Nexa Center for Internet & Society, Politecnico di Torino (DAUIN)
# and Simone Basso <bassosimone@gmail.com>
#
# This file is part of Neubot <http://www.neubot.org/>.
#
# Neubot is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Neubot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Neubot. If not, see <http://www.gnu.org/licenses/>.
#
"""
Code for UNIX
"""
# NB: This code is currently being refactored.
#
# When we MUST exit better to use os._exit() rather than
# sys.exit() because the former cannot be catched while
# the latter can.
#
UNPRIV_USER = "_neubot"
import os
import syslog
from neubot import utils_hier, utils_posix, utils_rc
def __logger(severity, message):
"""Log @message at the given @severity using syslog"""
#
# Implemented using syslog becuse SysLogHandler is
# difficult to use: you need to know the path to the
# system specific ``/dev/log``.
#
if severity == "ERROR":
syslog.syslog(syslog.LOG_ERR, message)
elif severity == "WARNING":
syslog.syslog(syslog.LOG_WARNING, message)
elif severity == "DEBUG":
syslog.syslog(syslog.LOG_DEBUG, message)
else:
syslog.syslog(syslog.LOG_INFO, message)
def get_background_logger():
"""Return the background logger"""
syslog.openlog("neubot", syslog.LOG_PID, syslog.LOG_DAEMON)
return __logger
def _get_profile_dir():
"""The profile directory is always LOCALSTATEDIR"""
return utils_hier.LOCALSTATEDIR
def _want_rwx_dir(datadir):
"""
This function ensures that the unprivileged user is the
owner of the directory that contains Neubot database.
Otherwise sqlite3 fails to lock the database for writing
(it creates a lockfile for that).
Read more at http://www.neubot.org/node/14
"""
# Does the directory exist?
if not os.path.isdir(datadir):
os.mkdir(datadir, 493) # 0755 in base 10
# Change directory ownership
if os.getuid() == 0:
passwd = getpwnam()
os.chown(datadir, passwd.pw_uid, passwd.pw_gid)
def go_background():
"""Detach from the shell and run in background"""
utils_posix.daemonize(pidfile="/var/run/neubot.pid")
def getpwnam():
"""Wrapper for getpwnam"""
cnf = utils_rc.parse_safe("/etc/neubot/users")
unpriv_user = cnf.get("unpriv_user", UNPRIV_USER)
passwd = utils_posix.getpwnam(unpriv_user)
return passwd
def drop_privileges():
"""
Drop root privileges and run on behalf of the specified
unprivileged users.
"""
passwd = getpwnam()
utils_posix.chuser(passwd)
def _want_rw_file(path):
"""
Ensure that the given file is readable and writable
by its owner. If running as root force ownership
to be of the unprivileged user.
"""
# Create file if non-existent
filep = open(path, "ab+")
filep.close()
# Enforce file ownership
if os.getuid() == 0:
passwd = getpwnam()
os.chown(path, passwd.pw_uid, passwd.pw_gid)
# Set permissions
os.chmod(path, 420) # 0644 in base 10
def has_enough_privs():
"""Returns true if this process has enough privileges"""
return os.getuid() == 0
|
isso | migrate | # -*- encoding: utf-8 -*-
import functools
import io
import json
import logging
import os
import re
import sys
import textwrap
from collections import defaultdict
from time import mktime, strptime, time
from urllib.parse import urlparse
from xml.etree import ElementTree
from isso.utils import anonymize
logger = logging.getLogger("isso")
def strip(val):
if isinstance(val, (str,)):
return val.strip()
return val
class Progress(object):
def __init__(self, end):
self.end = end or 1
self.istty = sys.stdout.isatty()
self.last = 0
def update(self, i, message):
if not self.istty or message is None:
return
cols = int((os.popen("stty size", "r").read()).split()[1])
message = message[: cols - 7]
if time() - self.last > 0.2:
sys.stdout.write("\r{0}".format(" " * cols))
sys.stdout.write("\r[{0:.0%}] {1}".format(i / self.end, message))
sys.stdout.flush()
self.last = time()
def finish(self, message):
self.last = 0
self.update(self.end, message + "\n")
class Disqus(object):
# Format documented at https://help.disqus.com/en/articles/1717164-comments-export
ns = "{http://disqus.com}"
internals = "{http://disqus.com/disqus-internals}"
def __init__(self, db, xmlfile, empty_id=False):
self.threads = set([])
self.comments = set([])
self.db = db
self.xmlfile = xmlfile
self.empty_id = empty_id
def insert(self, thread, posts):
path = urlparse(thread.find("%slink" % Disqus.ns).text).path
remap = dict()
if path not in self.db.threads:
thread_title = thread.find(Disqus.ns + "title").text or ""
self.db.threads.new(path, thread_title.strip())
for item in sorted(posts, key=lambda k: k["created"]):
dsq_id = item.pop("dsq:id")
item["parent"] = remap.get(item.pop("dsq:parent", None))
rv = self.db.comments.add(path, item)
remap[dsq_id] = rv["id"]
self.comments.update(set(remap.keys()))
def migrate(self):
tree = ElementTree.parse(self.xmlfile)
res = defaultdict(list)
for post in tree.findall(Disqus.ns + "post"):
email = post.find("{0}author/{0}email".format(Disqus.ns))
ip = post.find(Disqus.ns + "ipAddress")
item = {
"dsq:id": post.attrib.get(Disqus.internals + "id"),
"text": post.find(Disqus.ns + "message").text,
"author": post.find("{0}author/{0}name".format(Disqus.ns)).text,
"email": email.text if email is not None else "",
"created": mktime(
strptime(
post.find(Disqus.ns + "createdAt").text, "%Y-%m-%dT%H:%M:%SZ"
)
),
"remote_addr": anonymize(ip.text if ip is not None else "0.0.0.0"),
"mode": 1 if post.find(Disqus.ns + "isDeleted").text == "false" else 4,
}
if post.find(Disqus.ns + "parent") is not None:
item["dsq:parent"] = post.find(Disqus.ns + "parent").attrib.get(
Disqus.internals + "id"
)
res[
post.find("%sthread" % Disqus.ns).attrib.get(Disqus.internals + "id")
].append(item)
progress = Progress(len(tree.findall(Disqus.ns + "thread")))
for i, thread in enumerate(tree.findall(Disqus.ns + "thread")):
# Workaround for not crashing with empty thread ids:
thread_id = thread.find(Disqus.ns + "id")
if not thread_id:
thread_id = dict(text="<empty thread id>", empty=True)
progress.update(i, thread_id.get("text"))
# skip (possibly?) duplicate, but empty thread elements
if thread_id.get("empty") and not self.empty_id:
continue
id = thread.attrib.get(Disqus.internals + "id")
if id in res:
self.threads.add(id)
self.insert(thread, res[id])
# in case a comment has been deleted (and no further childs)
self.db.comments._remove_stale()
progress.finish(
"{0} threads, {1} comments".format(len(self.threads), len(self.comments))
)
orphans = (
set(
map(
lambda e: e.attrib.get(Disqus.internals + "id"),
tree.findall(Disqus.ns + "post"),
)
)
- self.comments
)
if orphans and not self.threads:
print("Isso couldn't import any thread, try again with --empty-id")
elif orphans:
print("Found %i orphans:" % len(orphans))
for post in tree.findall(Disqus.ns + "post"):
if post.attrib.get(Disqus.internals + "id") not in orphans:
continue
email = post.find("{0}author/{0}email".format(Disqus.ns))
print(
" * {0} by {1} <{2}>".format(
post.attrib.get(Disqus.internals + "id"),
post.find("{0}author/{0}name".format(Disqus.ns)).text,
email.text if email is not None else "",
)
)
print(
textwrap.fill(
post.find(Disqus.ns + "message").text,
initial_indent=" ",
subsequent_indent=" ",
)
)
print("")
class WordPress(object):
ns = "{http://wordpress.org/export/1.0/}"
def __init__(self, db, xmlfile):
self.db = db
self.xmlfile = xmlfile
self.count = 0
for line in io.open(xmlfile, encoding="utf-8"):
m = WordPress.detect(line)
if m:
self.ns = WordPress.ns.replace("1.0", m.group(1))
break
else:
logger.warning("No WXR namespace found, assuming 1.0")
def insert(self, thread):
url = urlparse(thread.find("link").text)
path = url.path
if url.query:
path += "?" + url.query
self.db.threads.new(path, thread.find("title").text.strip())
comments = list(map(self.Comment, thread.findall(self.ns + "comment")))
comments.sort(key=lambda k: k["id"])
remap = {}
ids = set(c["id"] for c in comments)
self.count += len(ids)
while comments:
for i, item in enumerate(comments):
if item["parent"] in ids:
continue
item["parent"] = remap.get(item["parent"], None)
rv = self.db.comments.add(path, item)
remap[item["id"]] = rv["id"]
ids.remove(item["id"])
comments.pop(i)
break
else:
# should never happen, but... it's WordPress.
return
def migrate(self):
tree = ElementTree.parse(self.xmlfile)
skip = 0
items = tree.findall("channel/item")
progress = Progress(len(items))
for i, thread in enumerate(items):
if (
thread.find("title").text is None
or thread.find(self.ns + "comment") is None
):
skip += 1
continue
progress.update(i, thread.find("title").text)
self.insert(thread)
progress.finish(
"{0} threads, {1} comments".format(len(items) - skip, self.count)
)
def _process_comment_content(self, text):
# WordPress comment text renders a single newline between two blocks of
# text as a <br> tag, so add an explicit Markdown line break on import
# (Otherwise multiple blocks of text separated by single newlines are
# all shown as one long line.)
text = re.sub(r"(?!^\n)\n(?!^\n)", " \n", text, 0)
return strip(text)
def Comment(self, el):
return {
"text": self._process_comment_content(
el.find(self.ns + "comment_content").text
),
"author": strip(el.find(self.ns + "comment_author").text),
"email": strip(el.find(self.ns + "comment_author_email").text),
"website": strip(el.find(self.ns + "comment_author_url").text),
"remote_addr": anonymize(
strip(el.find(self.ns + "comment_author_IP").text)
),
"created": mktime(
strptime(
strip(el.find(self.ns + "comment_date_gmt").text),
"%Y-%m-%d %H:%M:%S",
)
),
"mode": 1 if el.find(self.ns + "comment_approved").text == "1" else 2,
"id": int(el.find(self.ns + "comment_id").text),
"parent": int(el.find(self.ns + "comment_parent").text) or None,
}
@classmethod
def detect(cls, peek):
return re.compile("http://wordpress.org/export/(1\\.\\d)/").search(peek)
class Generic(object):
"""A generic importer.
The source format is a json with the following format:
A list of threads, each item being a dict with the following data:
- id: a text representing the unique thread id
- title: the title of the thread
- comments: the list of comments
Each item in that list of comments is a dict with the following data:
- id: an integer with the unique id of the comment inside the thread (it can be repeated
among different threads); this will be used to order the comment inside the thread
- author: the author's name
- email: the author's email
- website: the author's website
- remote_addr: the author's IP
- created: a timestamp, in the format "%Y-%m-%d %H:%M:%S"
"""
def __init__(self, db, json_file):
self.db = db
self.json_file = json_file
self.count = 0
def insert(self, thread):
"""Process a thread and insert its comments in the DB."""
thread_id = thread["id"]
title = thread["title"]
self.db.threads.new(thread_id, title)
comments = list(map(self._build_comment, thread["comments"]))
comments.sort(key=lambda comment: comment["id"])
self.count += len(comments)
for comment in comments:
self.db.comments.add(thread_id, comment)
def migrate(self):
"""Process the input file and fill the DB."""
with io.open(self.json_file, "rt", encoding="utf8") as fh:
threads = json.load(fh)
progress = Progress(len(threads))
for i, thread in enumerate(threads):
progress.update(i, str(i))
self.insert(thread)
progress.finish("{0} threads, {1} comments".format(len(threads), self.count))
def _build_comment(self, raw_comment):
return {
"text": raw_comment["text"],
"author": raw_comment["author"],
"email": raw_comment["email"],
"website": raw_comment["website"],
"created": mktime(strptime(raw_comment["created"], "%Y-%m-%d %H:%M:%S")),
"mode": 1,
"id": int(raw_comment["id"]),
"parent": None,
"remote_addr": raw_comment["remote_addr"],
}
@classmethod
def detect(cls, peek):
"""Return if peek looks like the beginning of a JSON file.
Note that we can not check the JSON properly as we only receive here
the original file truncated.
"""
return peek.startswith("[{")
def autodetect(peek):
if 'xmlns="http://disqus.com' in peek:
return Disqus
m = WordPress.detect(peek)
if m:
return WordPress
if Generic.detect(peek):
return Generic
return None
def dispatch(type, db, dump, empty_id=False):
if db.execute("SELECT * FROM comments").fetchone():
if input("Isso DB is not empty! Continue? [y/N]: ") not in ("y", "Y"):
raise SystemExit("Abort.")
if type == "disqus":
cls = Disqus
elif type == "wordpress":
cls = WordPress
elif type == "generic":
cls = Generic
else:
with io.open(dump, encoding="utf-8") as fp:
cls = autodetect(fp.read(io.DEFAULT_BUFFER_SIZE))
if cls is None:
raise SystemExit("Unknown format, abort.")
if cls is Disqus:
cls = functools.partial(cls, empty_id=empty_id)
cls(db, dump).migrate()
|
views | person | from typing import Dict, List, Optional, Tuple
from ee.clickhouse.queries.funnels.funnel_correlation_persons import (
FunnelCorrelationActors,
)
from posthog.api.person import PersonViewSet
from posthog.constants import (
FUNNEL_CORRELATION_PERSON_LIMIT,
FUNNEL_CORRELATION_PERSON_OFFSET,
INSIGHT_FUNNELS,
)
from posthog.decorators import cached_by_filters
from posthog.models import Filter
from posthog.utils import format_query_params_absolute_url
from rest_framework import request, response
from rest_framework.decorators import action
class EnterprisePersonViewSet(PersonViewSet):
@action(methods=["GET", "POST"], url_path="funnel/correlation", detail=False)
def funnel_correlation(
self, request: request.Request, **kwargs
) -> response.Response:
if request.user.is_anonymous or not self.team:
return response.Response(data=[])
return self._respond_with_cached_results(
self.calculate_funnel_correlation_persons(request)
)
@cached_by_filters
def calculate_funnel_correlation_persons(
self, request: request.Request
) -> Dict[str, Tuple[List, Optional[str], Optional[str], int]]:
filter = Filter(
request=request, data={"insight": INSIGHT_FUNNELS}, team=self.team
)
if not filter.correlation_person_limit:
filter = filter.shallow_clone({FUNNEL_CORRELATION_PERSON_LIMIT: 100})
base_uri = request.build_absolute_uri("/")
actors, serialized_actors, raw_count = FunnelCorrelationActors(
filter=filter, team=self.team, base_uri=base_uri
).get_actors()
_should_paginate = raw_count >= filter.correlation_person_limit
next_url = (
format_query_params_absolute_url(
request,
filter.correlation_person_offset + filter.correlation_person_limit,
offset_alias=FUNNEL_CORRELATION_PERSON_OFFSET,
limit_alias=FUNNEL_CORRELATION_PERSON_LIMIT,
)
if _should_paginate
else None
)
initial_url = format_query_params_absolute_url(request, 0)
# cached_function expects a dict with the key result
return {
"result": (
serialized_actors,
next_url,
initial_url,
raw_count - len(serialized_actors),
)
}
class LegacyEnterprisePersonViewSet(EnterprisePersonViewSet):
legacy_team_compatibility = True
|
puddlestuff | audio_filter | # -*- coding: utf-8 -*-
import logging
import re
from pyparsing import *
from . import audioinfo, findfunc
from .puddleobjects import gettaglist
from .util import to_string
def str_cmp(a, b):
if not isinstance(a, str):
a = "\\".join(a)
if not isinstance(b, str):
b = "\\".join(b)
return a.lower() == b.lower()
FIELDS = set(z.lower() for z in gettaglist()).union(audioinfo.FILETAGS)
def parse_arg(audio, text):
if not isinstance(text, str):
return text
if text[0] == "%" and text[-1] == "%":
return to_string(audio.get(text[1:-1], ""))
elif text in FIELDS:
return to_string(audio.get(text, ""))
else:
if text[0] == '"' and text[-1] == '"':
text = text[1:-1]
return findfunc.parsefunc(text, audio)
def wrap_bool(original):
def __bool__(self):
if hasattr(self, "args"):
self.args = [parse_arg(self.audio, z) for z in self.args]
else:
self.arg = parse_arg(self.audio, self.arg)
return original(self)
return __bool__
class BoolOperand(object):
def __init__(self, t):
self.args = t[0][0::2]
class BoolAnd(BoolOperand):
@wrap_bool
def __bool__(self):
logging.debug("and: " + str(self.args))
for a in self.args:
if not bool(a):
return False
return True
class BoolOr(BoolOperand):
@wrap_bool
def __bool__(self):
logging.debug("or: " + str(self.args))
for a in self.args:
if bool(a):
return True
return False
class BoolNot(BoolOperand):
def __init__(self, t):
self.arg = t[0][1]
@wrap_bool
def __bool__(self):
logging.debug("not: " + str(self.arg))
if isinstance(self.arg, str):
arg = self.arg.lower()
for v in self.audio.values():
if isinstance(v, str):
v = [v]
v = "\\\\".join(v).lower()
if arg in v:
return False
return True
return not bool(self.arg)
class Greater(BoolOperand):
@wrap_bool
def __bool__(self):
logging.debug("greater: " + str(self.args))
try:
self.args = list(map(float, self.args))
except ValueError:
pass
return self.args[0] > self.args[1]
class Less(BoolOperand):
@wrap_bool
def __bool__(self):
logging.debug("less: " + str(self.args))
try:
self.args = list(map(float, self.args))
except ValueError:
pass
return self.args[0] < self.args[1]
class Equal(BoolOperand):
@wrap_bool
def __bool__(self):
logging.debug("equal: " + str(self.args))
return str_cmp(self.args[0], self.args[1])
class Missing(BoolOperand):
def __init__(self, t):
self.arg = t[0][1]
def __bool__(self):
logging.debug("missing: " + str(self.arg))
if getattr(self, "audio", None):
return not (self.arg in self.audio)
return False
class Present(BoolOperand):
def __init__(self, t):
self.arg = t[0][1]
def __bool__(self):
logging.debug("present: " + str(self.arg))
if getattr(self, "audio", None):
return self.arg in self.audio
return False
class BoolIs(BoolOperand):
@wrap_bool
def __bool__(self):
logging.debug("is: " + str(self.args))
return str_cmp(self.args[0], self.args[1])
class Has(BoolOperand):
@wrap_bool
def __bool__(self):
logging.debug("has: " + str(self.args))
return self.args[1].lower() in self.args[0].lower()
class Matches(BoolOperand):
@wrap_bool
def __bool__(self):
logging.debug("matches: " + str(self.args))
return not re.search(self.args[1].lower(), self.args[0].lower()) is None
bool_exprs = [
(CaselessLiteral("missing"), 1, opAssoc.RIGHT, Missing),
(CaselessLiteral("present"), 1, opAssoc.RIGHT, Present),
(CaselessLiteral("greater"), 2, opAssoc.LEFT, Greater),
(CaselessLiteral("less"), 2, opAssoc.LEFT, Less),
(CaselessLiteral("equal"), 2, opAssoc.LEFT, Equal),
(CaselessLiteral("has"), 2, opAssoc.LEFT, Has),
(CaselessLiteral("matches"), 2, opAssoc.LEFT, Matches),
(CaselessLiteral("is"), 2, opAssoc.LEFT, BoolIs),
(CaselessLiteral("and"), 2, opAssoc.LEFT, BoolAnd),
(CaselessLiteral("or"), 2, opAssoc.LEFT, BoolOr),
(CaselessLiteral("not"), 1, opAssoc.RIGHT, BoolNot),
]
field_expr = Combine("%" + Word(alphanums + "_") + "%")
tokens = QuotedString('"', unquoteResults=False) | field_expr | Word(alphanums + "_")
bool_expr = infixNotation(tokens, bool_exprs)
bool_expr.enablePackrat()
def parse(audio, expr):
for i in bool_exprs:
i[3].audio = audio
try:
res = bool_expr.parseString(expr)[0]
except ParseException as e:
res = expr
if isinstance(res, str):
res = res.lower()
for field, value in audio.items():
if isinstance(value, str):
value = [value]
elif isinstance(value, (int, float)):
value = [str(value)]
try:
logging.debug("simple filter: %s in %s", res, value)
if res in "\\\\".join(value).lower():
return True
except TypeError as e:
continue
else:
return bool(res)
return False
if __name__ == "__main__":
audio = audioinfo.Tag("clen.mp3")
# parse(audio, "not p")
# parse(audio, 'not missing artist')
# parse(audio, '7 greater 6')
# parse(audio, '%track% greater 14')
# parse(audio, '%track% greater "$add($len(%artist%), 50)"')
# t = time.time()
# parse(audio, '(not missing artist) and (20 greater 19)')
# parse(audio, 'not (20 greater 19)')
# print time.time() - t
# parse(audio, 'not missing artist and 18 greater 19')
# parse(audio, 'artist is "Carl Douglas"')
# parse(audio, "artist has aarl")
# parse(audio, "artist has Carl")
import time
t = time.time()
print(audio.filepath)
print(parse(audio, "__filename has clen"))
|
draftfunctions | downgrade | # ***************************************************************************
# * Copyright (c) 2009, 2010 Yorik van Havre <yorik@uncreated.net> *
# * Copyright (c) 2009, 2010 Ken Cline <cline@frii.com> *
# * Copyright (c) 2020 Eliud Cabrera Castillo <e.cabrera-castillo@tum.de> *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
"""Provides functions to downgrade objects by different methods.
See also the `upgrade` function.
"""
## @package downgrade
# \ingroup draftfunctions
# \brief Provides functions to downgrade objects by different methods.
import draftfunctions.cut as cut
import draftutils.gui_utils as gui_utils
import draftutils.utils as utils
## \addtogroup draftfunctions
# @{
import FreeCAD as App
from draftutils.messages import _msg
from draftutils.translate import translate
def downgrade(objects, delete=False, force=None):
"""Downgrade the given objects.
This is a counterpart to `upgrade`.
Parameters
----------
objects: Part::Feature or list
A single object to downgrade or a list
containing various such objects.
delete: bool, optional
It defaults to `False`.
If it is `True`, the old objects are deleted, and only the resulting
object is kept.
force: str, optional
It defaults to `None`.
Its value can be used to force a certain method of downgrading.
It can be any of: `'explode'`, `'shapify'`, `'subtr'`, `'splitFaces'`,
`'cut2'`, `'getWire'`, `'splitWires'`, or `'splitCompounds'`.
Returns
-------
tuple
A tuple containing two lists, a list of new objects
and a list of objects to be deleted.
None
If there is a problem it will return `None`.
See Also
--------
upgrade
"""
_name = "downgrade"
utils.print_header(_name, "Downgrade objects")
if not isinstance(objects, list):
objects = [objects]
delete_list = []
add_list = []
doc = App.ActiveDocument
# actions definitions
def explode(obj):
"""Explode a Draft block."""
pl = obj.Placement
for o in obj.Components:
o.Placement = pl.multiply(o.Placement)
if App.GuiUp:
o.ViewObject.Visibility = True
delete_list.append(obj)
return True
def cut2(objects):
"""Cut first object from the last one."""
newobj = cut.cut(objects[0], objects[1])
if newobj:
add_list.append(newobj)
return newobj
return None
def splitCompounds(objects):
"""Split solids contained in compound objects into new objects."""
result = False
for o in objects:
if o.Shape.Solids:
for s in o.Shape.Solids:
newobj = doc.addObject("Part::Feature", "Solid")
newobj.Shape = s
add_list.append(newobj)
result = True
delete_list.append(o)
return result
def splitFaces(objects):
"""Split faces contained in objects into new objects."""
result = False
params = App.ParamGet("User parameter:BaseApp/Preferences/Mod/Draft")
preserveFaceColor = params.GetBool("preserveFaceColor") # True
preserveFaceNames = params.GetBool("preserveFaceNames") # True
for o in objects:
if App.GuiUp and preserveFaceColor and o.ViewObject:
voDColors = o.ViewObject.DiffuseColor
else:
voDColors = None
oLabel = o.Label if hasattr(o, "Label") else ""
if o.Shape.Faces:
for ind, f in enumerate(o.Shape.Faces):
newobj = doc.addObject("Part::Feature", "Face")
newobj.Shape = f
if preserveFaceNames:
newobj.Label = "{} {}".format(oLabel, newobj.Label)
if App.GuiUp and preserveFaceColor and voDColors:
# At this point, some single-color objects might have
# just a single value in voDColors for all faces,
# so we handle that
if ind < len(voDColors):
tcolor = voDColors[ind]
else:
tcolor = voDColors[0]
# does is not applied visually on its own
# just in case
newobj.ViewObject.DiffuseColor[0] = tcolor
# this gets applied, works by itself too
newobj.ViewObject.ShapeColor = tcolor
add_list.append(newobj)
result = True
delete_list.append(o)
return result
def subtr(objects):
"""Subtract objects from the first one."""
faces = []
for o in objects:
if o.Shape.Faces:
faces.extend(o.Shape.Faces)
delete_list.append(o)
u = faces.pop(0)
for f in faces:
u = u.cut(f)
if not u.isNull():
newobj = doc.addObject("Part::Feature", "Subtraction")
newobj.Shape = u
add_list.append(newobj)
return newobj
return None
def getWire(obj):
"""Get the wire from a face object."""
result = False
for w in obj.Shape.Faces[0].Wires:
newobj = doc.addObject("Part::Feature", "Wire")
newobj.Shape = w
add_list.append(newobj)
result = True
delete_list.append(obj)
return result
def splitWires(objects):
"""Split the wires contained in objects into edges."""
result = False
for o in objects:
if o.Shape.Edges:
for e in o.Shape.Edges:
newobj = doc.addObject("Part::Feature", "Edge")
newobj.Shape = e
add_list.append(newobj)
delete_list.append(o)
result = True
return result
def delete_object(obj):
if obj.FullName == "?": # Already deleted.
return
# special case: obj is a body or belongs to a body:
if obj.TypeId == "PartDesign::Body":
obj.removeObjectsFromDocument()
if hasattr(obj, "_Body") and obj._Body is not None:
obj = obj._Body
obj.removeObjectsFromDocument()
else:
for parent in obj.InList:
if parent.TypeId == "PartDesign::Body" and obj in parent.Group:
obj = parent
obj.removeObjectsFromDocument()
break
doc.removeObject(obj.Name)
# analyzing objects
faces = []
edges = []
onlyedges = True
parts = []
solids = []
result = None
for o in objects:
if hasattr(o, "Shape"):
for s in o.Shape.Solids:
solids.append(s)
for f in o.Shape.Faces:
faces.append(f)
for e in o.Shape.Edges:
edges.append(e)
if o.Shape.ShapeType != "Edge":
onlyedges = False
parts.append(o)
objects = parts
if force:
if force in (
"explode",
"shapify",
"subtr",
"splitFaces",
"cut2",
"getWire",
"splitWires",
):
# TODO: Using eval to evaluate a string is not ideal
# and potentially a security risk.
# How do we execute the function without calling eval?
# Best case, a series of if-then statements.
shapify = utils.shapify
result = eval(force)(objects)
else:
_msg(translate("draft", "Upgrade: Unknown force method:") + " " + force)
result = None
else:
# applying transformation automatically
# we have a block, we explode it
if len(objects) == 1 and utils.get_type(objects[0]) == "Block":
result = explode(objects[0])
if result:
_msg(translate("draft", "Found 1 block: exploding it"))
# we have one multi-solids compound object: extract its solids
elif len(objects) == 1 and hasattr(objects[0], "Shape") and len(solids) > 1:
result = splitCompounds(objects)
# print(result)
if result:
_msg(translate("draft", "Found 1 multi-solids compound: exploding it"))
# special case, we have one parametric object: we "de-parametrize" it
elif (
len(objects) == 1
and hasattr(objects[0], "Shape")
and hasattr(objects[0], "Base")
and not objects[0].isDerivedFrom("PartDesign::Feature")
):
result = utils.shapify(objects[0])
if result:
_msg(
translate(
"draft", "Found 1 parametric object: breaking its dependencies"
)
)
add_list.append(result)
# delete_list.append(objects[0])
# we have only 2 objects: cut 2nd from 1st
elif len(objects) == 2:
result = cut2(objects)
if result:
_msg(translate("draft", "Found 2 objects: subtracting them"))
elif len(faces) > 1:
# one object with several faces: split it
if len(objects) == 1:
result = splitFaces(objects)
if result:
_msg(translate("draft", "Found several faces: splitting them"))
# several objects: remove all the faces from the first one
else:
result = subtr(objects)
if result:
_msg(
translate(
"draft",
"Found several objects: subtracting them from the first one",
)
)
# only one face: we extract its wires
elif len(faces) > 0:
result = getWire(objects[0])
if result:
_msg(translate("draft", "Found 1 face: extracting its wires"))
# no faces: split wire into single edges
elif not onlyedges:
result = splitWires(objects)
if result:
_msg(translate("draft", "Found only wires: extracting their edges"))
# no result has been obtained
if not result:
_msg(translate("draft", "No more downgrade possible"))
if delete:
for o in delete_list:
delete_object(o)
delete_list = []
gui_utils.select(add_list)
return add_list, delete_list
## @}
|
extractor | hotstar | # coding: utf-8
from __future__ import unicode_literals
import hashlib
import hmac
import json
import re
import time
import uuid
from ..compat import compat_HTTPError, compat_str
from ..utils import (
ExtractorError,
determine_ext,
int_or_none,
str_or_none,
try_get,
url_or_none,
)
from .common import InfoExtractor
class HotStarBaseIE(InfoExtractor):
_AKAMAI_ENCRYPTION_KEY = (
b"\x05\xfc\x1a\x01\xca\xc9\x4b\xc4\x12\xfc\x53\x12\x07\x75\xf9\xee"
)
def _call_api_impl(self, path, video_id, headers, query, data=None):
st = int(time.time())
exp = st + 6000
auth = "st=%d~exp=%d~acl=/*" % (st, exp)
auth += (
"~hmac="
+ hmac.new(
self._AKAMAI_ENCRYPTION_KEY, auth.encode(), hashlib.sha256
).hexdigest()
)
h = {"hotstarauth": auth}
h.update(headers)
return self._download_json(
"https://api.hotstar.com/" + path,
video_id,
headers=h,
query=query,
data=data,
)
def _call_api(self, path, video_id, query_name="contentId"):
response = self._call_api_impl(
path,
video_id,
{
"x-country-code": "IN",
"x-platform-code": "JIO",
},
{
query_name: video_id,
"tas": 10000,
},
)
if response["statusCode"] != "OK":
raise ExtractorError(response["body"]["message"], expected=True)
return response["body"]["results"]
def _call_api_v2(self, path, video_id, headers, query=None, data=None):
h = {"X-Request-Id": compat_str(uuid.uuid4())}
h.update(headers)
try:
return self._call_api_impl(path, video_id, h, query, data)
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError):
if e.cause.code == 402:
self.raise_login_required()
message = self._parse_json(e.cause.read().decode(), video_id)["message"]
if message in (
"Content not available in region",
"Country is not supported",
):
raise self.raise_geo_restricted(message)
raise ExtractorError(message)
raise e
class HotStarIE(HotStarBaseIE):
IE_NAME = "hotstar"
_VALID_URL = r"https?://(?:www\.)?hotstar\.com/(?:.+[/-])?(?P<id>\d{10})"
_TESTS = [
{
# contentData
"url": "https://www.hotstar.com/can-you-not-spread-rumours/1000076273",
"info_dict": {
"id": "1000076273",
"ext": "mp4",
"title": "Can You Not Spread Rumours?",
"description": "md5:c957d8868e9bc793ccb813691cc4c434",
"timestamp": 1447248600,
"upload_date": "20151111",
"duration": 381,
},
"params": {
# m3u8 download
"skip_download": True,
},
},
{
# contentDetail
"url": "https://www.hotstar.com/movies/radha-gopalam/1000057157",
"only_matching": True,
},
{
"url": "http://www.hotstar.com/sports/cricket/rajitha-sizzles-on-debut-with-329/2001477583",
"only_matching": True,
},
{
"url": "http://www.hotstar.com/1000000515",
"only_matching": True,
},
{
# only available via api v2
"url": "https://www.hotstar.com/tv/ek-bhram-sarvagun-sampanna/s-2116/janhvi-targets-suman/1000234847",
"only_matching": True,
},
{
"url": "https://www.hotstar.com/in/tv/start-music/1260005217/cooks-vs-comalis/1100039717",
"only_matching": True,
},
]
_GEO_BYPASS = False
_DEVICE_ID = None
_USER_TOKEN = None
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
app_state = self._parse_json(
self._search_regex(
r"<script>window\.APP_STATE\s*=\s*({.+?})</script>",
webpage,
"app state",
),
video_id,
)
video_data = {}
getters = list(
lambda x, k=k: x["initialState"]["content%s" % k]["content"]
for k in ("Data", "Detail")
)
for v in app_state.values():
content = try_get(v, getters, dict)
if content and content.get("contentId") == video_id:
video_data = content
break
title = video_data["title"]
if video_data.get("drmProtected"):
raise ExtractorError("This video is DRM protected.", expected=True)
headers = {"Referer": url}
formats = []
geo_restricted = False
if not self._USER_TOKEN:
self._DEVICE_ID = compat_str(uuid.uuid4())
self._USER_TOKEN = self._call_api_v2(
"um/v3/users",
video_id,
{
"X-HS-Platform": "PCTV",
"Content-Type": "application/json",
},
data=json.dumps(
{
"device_ids": [
{
"id": self._DEVICE_ID,
"type": "device_id",
}
],
}
).encode(),
)["user_identity"]
playback_sets = self._call_api_v2(
"play/v2/playback/content/" + video_id,
video_id,
{
"X-HS-Platform": "web",
"X-HS-AppVersion": "6.99.1",
"X-HS-UserToken": self._USER_TOKEN,
},
query={
"device-id": self._DEVICE_ID,
"desired-config": "encryption:plain",
"os-name": "Windows",
"os-version": "10",
},
)["data"]["playBackSets"]
for playback_set in playback_sets:
if not isinstance(playback_set, dict):
continue
format_url = url_or_none(playback_set.get("playbackUrl"))
if not format_url:
continue
format_url = re.sub(r"(?<=//staragvod)(\d)", r"web\1", format_url)
tags = str_or_none(playback_set.get("tagsCombination")) or ""
if tags and "encryption:plain" not in tags:
continue
ext = determine_ext(format_url)
try:
if "package:hls" in tags or ext == "m3u8":
formats.extend(
self._extract_m3u8_formats(
format_url,
video_id,
"mp4",
entry_protocol="m3u8_native",
m3u8_id="hls",
headers=headers,
)
)
elif "package:dash" in tags or ext == "mpd":
formats.extend(
self._extract_mpd_formats(
format_url, video_id, mpd_id="dash", headers=headers
)
)
elif ext == "f4m":
# produce broken files
pass
else:
formats.append(
{
"url": format_url,
"width": int_or_none(playback_set.get("width")),
"height": int_or_none(playback_set.get("height")),
}
)
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
geo_restricted = True
continue
if not formats and geo_restricted:
self.raise_geo_restricted(countries=["IN"])
self._sort_formats(formats)
for f in formats:
f.setdefault("http_headers", {}).update(headers)
image = try_get(video_data, lambda x: x["image"]["h"], compat_str)
return {
"id": video_id,
"title": title,
"thumbnail": "https://img1.hotstarext.com/image/upload/" + image
if image
else None,
"description": video_data.get("description"),
"duration": int_or_none(video_data.get("duration")),
"timestamp": int_or_none(
video_data.get("broadcastDate") or video_data.get("startDate")
),
"formats": formats,
"channel": video_data.get("channelName"),
"channel_id": str_or_none(video_data.get("channelId")),
"series": video_data.get("showName"),
"season": video_data.get("seasonName"),
"season_number": int_or_none(video_data.get("seasonNo")),
"season_id": str_or_none(video_data.get("seasonId")),
"episode": title,
"episode_number": int_or_none(video_data.get("episodeNo")),
}
class HotStarPlaylistIE(HotStarBaseIE):
IE_NAME = "hotstar:playlist"
_VALID_URL = r"https?://(?:www\.)?hotstar\.com/(?:[a-z]{2}/)?tv/[^/]+/s-\w+/list/[^/]+/t-(?P<id>\w+)"
_TESTS = [
{
"url": "https://www.hotstar.com/tv/savdhaan-india/s-26/list/popular-clips/t-3_2_26",
"info_dict": {
"id": "3_2_26",
},
"playlist_mincount": 20,
},
{
"url": "https://www.hotstar.com/tv/savdhaan-india/s-26/list/extras/t-2480",
"only_matching": True,
},
{
"url": "https://www.hotstar.com/us/tv/masterchef-india/s-830/list/episodes/t-1_2_830",
"only_matching": True,
},
]
def _real_extract(self, url):
playlist_id = self._match_id(url)
collection = self._call_api("o/v1/tray/find", playlist_id, "uqId")
entries = [
self.url_result(
"https://www.hotstar.com/%s" % video["contentId"],
ie=HotStarIE.ie_key(),
video_id=video["contentId"],
)
for video in collection["assets"]["items"]
if video.get("contentId")
]
return self.playlist_result(entries, playlist_id)
|
accounts | FastshareCz | # -*- coding: utf-8 -*-
import re
import time
from ..base.account import BaseAccount
from ..helpers import set_cookie
class FastshareCz(BaseAccount):
__name__ = "FastshareCz"
__type__ = "account"
__version__ = "0.18"
__status__ = "testing"
__description__ = """Fastshare.cz account plugin"""
__license__ = "GPLv3"
__authors__ = [
("zoidberg", "zoidberg@mujmail.cz"),
("stickell", "l.stickell@yahoo.it"),
("ondrej", "git@ondrej.it"),
("GammaC0de", "nitzo2001[AT]yahoo[DOT]com"),
]
TRAFFICLEFT_PATTERN = r"<td>([\d\.]+) ([KMGT]B)\s*</td>"
VALID_UNTILL_PATTERN = r">Active until ([\d.]+)<"
def grab_info(self, user, password, data):
html = self.load("https://www.fastshare.cz/user")
m = re.search(self.VALID_UNTILL_PATTERN, html)
if m is not None:
validuntil = time.mktime(
time.strptime(m.group(1) + " 23:59:59", "%d.%m.%Y %H:%M:%S")
)
premium = True
trafficleft = -1
else:
validuntil = -1
m = re.search(self.TRAFFICLEFT_PATTERN, html)
if m is not None:
trafficleft = self.parse_traffic(m.group(1), m.group(2))
premium = bool(trafficleft)
if not premium:
trafficleft = None
else:
premium = False
trafficleft = None
return {
"validuntil": validuntil,
"trafficleft": trafficleft,
"premium": premium,
}
def signin(self, user, password, data):
set_cookie(self.req.cj, "fastshare.cz", "lang", "en")
html = self.load("https://www.fastshare.cz/user")
if 'href="/logout.php"' in html:
self.skip_login()
html = self.load(
"https://www.fastshare.cz/sql.php",
post={"login": user, "heslo": password},
)
if 'href="/logout.php"' not in html:
self.fail_login()
|
backfill | gilded_user_comments | # The contents of this file are subject to the Common Public Attribution
# License Version 1.0. (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://code.reddit.com/LICENSE. The License is based on the Mozilla Public
# License Version 1.1, but Sections 14 and 15 have been added to cover use of
# software over a computer network and provide for limited attribution for the
# Original Developer. In addition, Exhibit A has been modified to be consistent
# with Exhibit B.
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for
# the specific language governing rights and limitations under the License.
#
# The Original Code is reddit.
#
# The Original Developer is the Initial Developer. The Initial Developer of
# the Original Code is reddit Inc.
#
# All portions of the code written by reddit are Copyright (c) 2006-2015 reddit
# Inc. All Rights Reserved.
###############################################################################
"""Fill in the gilded comment listing for users.
This listing is stored in get_gilded_user_comments and seen on
/user/<username>/gilded.
"""
import datetime
from pylons import app_globals as g
from r2.lib.db.queries import get_gilded_user_comments
from r2.lib.utils import Storage
from r2.models import Comment, GildingsByDay, Thing
from r2.models.query_cache import CachedQueryMutator
date = datetime.datetime.now(g.tz)
earliest_date = datetime.datetime(2012, 10, 01, tzinfo=g.tz)
already_seen = set()
with CachedQueryMutator() as m:
while date > earliest_date:
gildings = GildingsByDay.get_gildings(date)
fullnames = [x["thing"] for x in gildings]
things = Thing._by_fullname(fullnames, data=True, return_dict=False)
comments = {t._fullname: t for t in things if isinstance(t, Comment)}
for gilding in gildings:
fullname = gilding["thing"]
if fullname in comments and fullname not in already_seen:
thing = gilding["thing"] = comments[fullname]
gilding_object = Storage(gilding)
m.insert(get_gilded_user_comments(thing.author_id),
[gilding_object])
already_seen.add(fullname)
date -= datetime.timedelta(days=1)
|
landing | register | """ class views for login/register views """
import pytz
from bookwyrm import emailing, forms, models
from bookwyrm.settings import DOMAIN
from django.contrib.auth import login
from django.core.exceptions import PermissionDenied
from django.shortcuts import get_object_or_404, redirect
from django.template.response import TemplateResponse
from django.utils.decorators import method_decorator
from django.views import View
from django.views.decorators.debug import sensitive_post_parameters, sensitive_variables
# pylint: disable=no-self-use
class Register(View):
"""register a user"""
def get(self, request): # pylint: disable=unused-argument
"""whether or not you're logged in, just go to the home view"""
return redirect("/")
@sensitive_variables("password")
@method_decorator(sensitive_post_parameters("password"))
def post(self, request):
"""join the server"""
settings = models.SiteSettings.get()
# no registration allowed when the site is being installed
if settings.install_mode:
raise PermissionDenied()
if not settings.allow_registration:
invite_code = request.POST.get("invite_code")
if not invite_code:
raise PermissionDenied()
invite = get_object_or_404(models.SiteInvite, code=invite_code)
if not invite.valid():
raise PermissionDenied()
else:
invite = None
form = forms.RegisterForm(request.POST)
if not form.is_valid():
data = {
"login_form": forms.LoginForm(),
"register_form": form,
"invite": invite,
"valid": invite.valid() if invite else True,
}
if invite:
return TemplateResponse(request, "landing/invite.html", data)
return TemplateResponse(request, "landing/login.html", data)
localname = form.data["localname"].strip()
email = form.data["email"]
password = form.data["password"]
try:
preferred_timezone = pytz.timezone(form.data.get("preferred_timezone"))
except pytz.exceptions.UnknownTimeZoneError:
preferred_timezone = pytz.utc
# make sure the email isn't blocked as spam
email_domain = email.split("@")[-1]
if models.EmailBlocklist.objects.filter(domain=email_domain).exists():
# treat this like a successful registration, but don't do anything
return redirect("confirm-email")
username = f"{localname}@{DOMAIN}"
user = models.User.objects.create_user(
username,
email,
password,
localname=localname,
local=True,
allow_reactivation=settings.require_confirm_email,
deactivation_reason="pending" if settings.require_confirm_email else None,
is_active=not settings.require_confirm_email,
preferred_timezone=preferred_timezone,
)
if invite:
invite.times_used += 1
invite.invitees.add(user)
invite.save()
if settings.require_confirm_email:
emailing.email_confirmation_email(user)
return redirect("confirm-email")
login(request, user)
return redirect("get-started-profile")
class ConfirmEmailCode(View):
"""confirm email address"""
def get(self, request, code): # pylint: disable=unused-argument
"""you got the code! good work"""
settings = models.SiteSettings.get()
if request.user.is_authenticated:
return redirect("/")
if not settings.require_confirm_email:
return redirect("login")
# look up the user associated with this code
try:
user = models.User.objects.get(
confirmation_code=code, deactivation_reason="pending"
)
except models.User.DoesNotExist:
return TemplateResponse(
request, "confirm_email/confirm_email.html", {"valid": False}
)
# update the user
user.reactivate()
# direct the user to log in
return redirect("login", confirmed="confirmed")
class ConfirmEmail(View):
"""enter code to confirm email address"""
def get(self, request): # pylint: disable=unused-argument
"""you need a code! keep looking"""
settings = models.SiteSettings.get()
if request.user.is_authenticated or not settings.require_confirm_email:
return redirect("/")
return TemplateResponse(
request, "confirm_email/confirm_email.html", {"valid": True}
)
def post(self, request):
"""same as clicking the link"""
code = request.POST.get("code")
return ConfirmEmailCode().get(request, code)
class ResendConfirmEmail(View):
"""you probably didn't get the email because celery is slow but you can try this"""
def get(self, request):
"""resend link landing page"""
return TemplateResponse(request, "confirm_email/resend.html")
def post(self, request):
"""resend confirmation link"""
email = request.POST.get("email")
try:
user = models.User.objects.get(email=email)
emailing.email_confirmation_email(user)
except models.User.DoesNotExist:
pass
return TemplateResponse(
request, "confirm_email/confirm_email.html", {"valid": True}
)
|
core | rpcserver | #
# Copyright (C) 2008,2009 Andrew Resch <andrewresch@gmail.com>
#
# This file is part of Deluge and is licensed under GNU General Public License 3.0, or later, with
# the additional special exception to link portions of this program with the OpenSSL library.
# See LICENSE for more details.
#
"""RPCServer Module"""
import logging
import os
import sys
import traceback
from collections import namedtuple
from types import FunctionType
from typing import Callable, TypeVar, overload
import deluge.component as component
import deluge.configmanager
from deluge.core.authmanager import (
AUTH_LEVEL_ADMIN,
AUTH_LEVEL_DEFAULT,
AUTH_LEVEL_NONE,
)
from deluge.crypto_utils import check_ssl_keys, get_context_factory
from deluge.error import (
DelugeError,
IncompatibleClient,
NotAuthorizedError,
WrappedException,
_ClientSideRecreateError,
)
from deluge.event import ClientDisconnectedEvent
from deluge.transfer import DelugeTransferProtocol
from twisted.internet import defer, reactor
from twisted.internet.protocol import Factory, connectionDone
RPC_RESPONSE = 1
RPC_ERROR = 2
RPC_EVENT = 3
log = logging.getLogger(__name__)
TCallable = TypeVar("TCallable", bound=Callable)
@overload
def export(func: TCallable) -> TCallable:
...
@overload
def export(auth_level: int) -> Callable[[TCallable], TCallable]:
...
def export(auth_level=AUTH_LEVEL_DEFAULT):
"""
Decorator function to register an object's method as an RPC. The object
will need to be registered with an :class:`RPCServer` to be effective.
:param func: the function to export
:type func: function
:param auth_level: the auth level required to call this method
:type auth_level: int
"""
def wrap(func, *args, **kwargs):
func._rpcserver_export = True
func._rpcserver_auth_level = auth_level
rpc_text = "**RPC exported method** (*Auth level: %s*)" % auth_level
# Append the RPC text while ensuring correct docstring formatting.
if func.__doc__:
if func.__doc__.endswith(" "):
indent = func.__doc__.split("\n")[-1]
func.__doc__ += f"\n{indent}"
else:
func.__doc__ += "\n\n"
func.__doc__ += rpc_text
else:
func.__doc__ = rpc_text
return func
if isinstance(auth_level, FunctionType):
func = auth_level
auth_level = AUTH_LEVEL_DEFAULT
return wrap(func)
else:
return wrap
def format_request(call):
"""
Format the RPCRequest message for debug printing
:param call: the request
:type call: a RPCRequest
:returns: a formatted string for printing
:rtype: str
"""
try:
s = call[1] + "("
if call[2]:
s += ", ".join([str(x) for x in call[2]])
if call[3]:
if call[2]:
s += ", "
s += ", ".join([key + "=" + str(value) for key, value in call[3].items()])
s += ")"
except UnicodeEncodeError:
return "UnicodeEncodeError, call: %s" % call
else:
return s
class DelugeRPCProtocol(DelugeTransferProtocol):
def __init__(self):
super().__init__()
# namedtuple subclass with auth_level, username for the connected session.
self.AuthLevel = namedtuple("SessionAuthlevel", "auth_level, username")
def message_received(self, request):
"""
This method is called whenever a message is received from a client. The
only message that a client sends to the server is a RPC Request message.
If the RPC Request message is valid, then the method is called in
:meth:`dispatch`.
:param request: the request from the client.
:type data: tuple
"""
if not isinstance(request, tuple):
log.debug("Received invalid message: type is not tuple")
return
if len(request) < 1:
log.debug("Received invalid message: there are no items")
return
for call in request:
if len(call) != 4:
log.debug(
"Received invalid rpc request: number of items " "in request is %s",
len(call),
)
continue
# log.debug('RPCRequest: %s', format_request(call))
reactor.callLater(0, self.dispatch, *call)
def sendData(self, data): # NOQA: N802
"""
Sends the data to the client.
:param data: the object that is to be sent to the client. This should
be one of the RPC message types.
:type data: object
"""
try:
self.transfer_message(data)
except Exception as ex:
log.warning("Error occurred when sending message: %s.", ex)
log.exception(ex)
raise
def connectionMade(self): # NOQA: N802
"""
This method is called when a new client connects.
"""
peer = self.transport.getPeer()
log.info("Deluge Client connection made from: %s:%s", peer.host, peer.port)
# Set the initial auth level of this session to AUTH_LEVEL_NONE
self.factory.authorized_sessions[self.transport.sessionno] = self.AuthLevel(
AUTH_LEVEL_NONE, ""
)
def connectionLost(self, reason=connectionDone): # NOQA: N802
"""
This method is called when the client is disconnected.
:param reason: the reason the client disconnected.
:type reason: str
"""
# We need to remove this session from various dicts
del self.factory.authorized_sessions[self.transport.sessionno]
if self.transport.sessionno in self.factory.session_protocols:
del self.factory.session_protocols[self.transport.sessionno]
if self.transport.sessionno in self.factory.interested_events:
del self.factory.interested_events[self.transport.sessionno]
if self.factory.state == "running":
component.get("EventManager").emit(
ClientDisconnectedEvent(self.factory.session_id)
)
log.info("Deluge client disconnected: %s", reason.value)
def valid_session(self):
return self.transport.sessionno in self.factory.authorized_sessions
def dispatch(self, request_id, method, args, kwargs):
"""
This method is run when a RPC Request is made. It will run the local method
and will send either a RPC Response or RPC Error back to the client.
:param request_id: the request_id from the client (sent in the RPC Request)
:type request_id: int
:param method: the local method to call. It must be registered with
the :class:`RPCServer`.
:type method: str
:param args: the arguments to pass to `method`
:type args: list
:param kwargs: the keyword-arguments to pass to `method`
:type kwargs: dict
"""
def send_error():
"""
Sends an error response with the contents of the exception that was raised.
"""
exc_type, exc_value, dummy_exc_trace = sys.exc_info()
formated_tb = traceback.format_exc()
try:
self.sendData(
(
RPC_ERROR,
request_id,
exc_type.__name__,
exc_value._args,
exc_value._kwargs,
formated_tb,
)
)
except AttributeError:
# This is not a deluge exception (object has no attribute '_args), let's wrap it
log.warning(
"An exception occurred while sending RPC_ERROR to "
"client. Wrapping it and resending. Error to "
"send(causing exception goes next):\n%s",
formated_tb,
)
try:
raise WrappedException(
str(exc_value), exc_type.__name__, formated_tb
)
except WrappedException:
send_error()
except Exception as ex:
log.error(
"An exception occurred while sending RPC_ERROR to client: %s", ex
)
if method == "daemon.info":
# This is a special case and used in the initial connection process
self.sendData((RPC_RESPONSE, request_id, deluge.common.get_version()))
return
elif method == "daemon.login":
# This is a special case and used in the initial connection process
# We need to authenticate the user here
log.debug("RPC dispatch daemon.login")
try:
client_version = kwargs.pop("client_version", None)
if client_version is None:
raise IncompatibleClient(deluge.common.get_version())
ret = component.get("AuthManager").authorize(*args, **kwargs)
if ret:
self.factory.authorized_sessions[
self.transport.sessionno
] = self.AuthLevel(ret, args[0])
self.factory.session_protocols[self.transport.sessionno] = self
except Exception as ex:
send_error()
if not isinstance(ex, _ClientSideRecreateError):
log.exception(ex)
else:
self.sendData((RPC_RESPONSE, request_id, (ret)))
if not ret:
self.transport.loseConnection()
return
# Anything below requires a valid session
if not self.valid_session():
return
if method == "daemon.set_event_interest":
log.debug("RPC dispatch daemon.set_event_interest")
# This special case is to allow clients to set which events they are
# interested in receiving.
# We are expecting a sequence from the client.
try:
if self.transport.sessionno not in self.factory.interested_events:
self.factory.interested_events[self.transport.sessionno] = []
self.factory.interested_events[self.transport.sessionno].extend(args[0])
except Exception:
send_error()
else:
self.sendData((RPC_RESPONSE, request_id, (True)))
return
if method not in self.factory.methods:
try:
# Raise exception to be sent back to client
raise AttributeError("RPC call on invalid function: %s" % method)
except AttributeError:
send_error()
return
log.debug("RPC dispatch %s", method)
try:
method_auth_requirement = self.factory.methods[method]._rpcserver_auth_level
auth_level = self.factory.authorized_sessions[
self.transport.sessionno
].auth_level
if auth_level < method_auth_requirement:
# This session is not allowed to call this method
log.debug(
"Session %s is attempting an unauthorized method call!",
self.transport.sessionno,
)
raise NotAuthorizedError(auth_level, method_auth_requirement)
# Set the session_id in the factory so that methods can know
# which session is calling it.
self.factory.session_id = self.transport.sessionno
ret = self.factory.methods[method](*args, **kwargs)
except Exception as ex:
send_error()
# Don't bother printing out DelugeErrors, because they are just
# for the client
if not isinstance(ex, DelugeError):
log.exception("Exception calling RPC request: %s", ex)
else:
# Check if the return value is a deferred, since we'll need to
# wait for it to fire before sending the RPC_RESPONSE
if isinstance(ret, defer.Deferred):
def on_success(result):
try:
self.sendData((RPC_RESPONSE, request_id, result))
except Exception:
send_error()
return result
def on_fail(failure):
try:
failure.raiseException()
except Exception:
send_error()
return failure
ret.addCallbacks(on_success, on_fail)
else:
self.sendData((RPC_RESPONSE, request_id, ret))
class RPCServer(component.Component):
"""
This class is used to handle rpc requests from the client. Objects are
registered with this class and their methods are exported using the export
decorator.
:param port: the port the RPCServer will listen on
:type port: int
:param interface: the interface to listen on, this may override the `allow_remote` setting
:type interface: str
:param allow_remote: set True if the server should allow remote connections
:type allow_remote: bool
:param listen: if False, will not start listening.. This is only useful in Classic Mode
:type listen: bool
"""
def __init__(self, port=58846, interface="", allow_remote=False, listen=True):
component.Component.__init__(self, "RPCServer")
self.factory = Factory()
self.factory.protocol = DelugeRPCProtocol
self.factory.session_id = -1
self.factory.state = "running"
# Holds the registered methods
self.factory.methods = {}
# Holds the session_ids and auth levels
self.factory.authorized_sessions = {}
# Holds the protocol objects with the session_id as key
self.factory.session_protocols = {}
# Holds the interested event list for the sessions
self.factory.interested_events = {}
self.listen = listen
if not listen:
return
if allow_remote:
hostname = ""
else:
hostname = "localhost"
if interface:
hostname = interface
log.info("Starting DelugeRPC server %s:%s", hostname, port)
# Check for SSL keys and generate some if needed
check_ssl_keys()
cert = os.path.join(deluge.configmanager.get_config_dir("ssl"), "daemon.cert")
pkey = os.path.join(deluge.configmanager.get_config_dir("ssl"), "daemon.pkey")
try:
reactor.listenSSL(
port, self.factory, get_context_factory(cert, pkey), interface=hostname
)
except Exception as ex:
log.debug("Daemon already running or port not available.: %s", ex)
raise
def register_object(self, obj, name=None):
"""
Registers an object to export it's rpc methods. These methods should
be exported with the export decorator prior to registering the object.
:param obj: the object that we want to export
:type obj: object
:param name: the name to use, if None, it will be the class name of the object
:type name: str
"""
if not name:
name = obj.__class__.__name__.lower()
for d in dir(obj):
if d[0] == "_":
continue
if getattr(getattr(obj, d), "_rpcserver_export", False):
log.debug("Registering method: %s", name + "." + d)
self.factory.methods[name + "." + d] = getattr(obj, d)
def deregister_object(self, obj):
"""
Deregisters an objects exported rpc methods.
:param obj: the object that was previously registered
"""
for key, value in self.factory.methods.items():
if value.__self__ == obj:
del self.factory.methods[key]
def get_object_method(self, name):
"""
Returns a registered method.
:param name: the name of the method, usually in the form of 'object.method'
:type name: str
:returns: method
:raises KeyError: if `name` is not registered
"""
return self.factory.methods[name]
def get_method_list(self):
"""
Returns a list of the exported methods.
:returns: the exported methods
:rtype: list
"""
return list(self.factory.methods)
def get_session_id(self):
"""
Returns the session id of the current RPC.
:returns: the session id, this will be -1 if no connections have been made
:rtype: int
"""
return self.factory.session_id
def get_session_user(self):
"""
Returns the username calling the current RPC.
:returns: the username of the user calling the current RPC
:rtype: string
"""
if not self.listen:
return "localclient"
session_id = self.get_session_id()
if session_id > -1 and session_id in self.factory.authorized_sessions:
return self.factory.authorized_sessions[session_id].username
else:
# No connections made yet
return ""
def get_session_auth_level(self):
"""
Returns the auth level of the user calling the current RPC.
:returns: the auth level
:rtype: int
"""
if not self.listen or not self.is_session_valid(self.get_session_id()):
return AUTH_LEVEL_ADMIN
return self.factory.authorized_sessions[self.get_session_id()].auth_level
def get_rpc_auth_level(self, rpc):
"""
Returns the auth level requirement for an exported rpc.
:returns: the auth level
:rtype: int
"""
return self.factory.methods[rpc]._rpcserver_auth_level
def is_session_valid(self, session_id):
"""
Checks if the session is still valid, eg, if the client is still connected.
:param session_id: the session id
:type session_id: int
:returns: True if the session is valid
:rtype: bool
"""
return session_id in self.factory.authorized_sessions
def emit_event(self, event):
"""
Emits the event to interested clients.
:param event: the event to emit
:type event: :class:`deluge.event.DelugeEvent`
"""
log.debug("intevents: %s", self.factory.interested_events)
# Use copy of `interested_events` since it can mutate while iterating.
for session_id, interest in self.factory.interested_events.copy().items():
if event.name in interest:
log.debug("Emit Event: %s %s", event.name, event.args)
# This session is interested so send a RPC_EVENT
self.factory.session_protocols[session_id].sendData(
(RPC_EVENT, event.name, event.args)
)
def emit_event_for_session_id(self, session_id, event):
"""
Emits the event to specified session_id.
:param session_id: the event to emit
:type session_id: int
:param event: the event to emit
:type event: :class:`deluge.event.DelugeEvent`
"""
if not self.is_session_valid(session_id):
log.debug(
'Session ID %s is not valid. Not sending event "%s".',
session_id,
event.name,
)
return
if session_id not in self.factory.interested_events:
log.debug(
'Session ID %s is not interested in any events. Not sending event "%s".',
session_id,
event.name,
)
return
if event.name not in self.factory.interested_events[session_id]:
log.debug(
'Session ID %s is not interested in event "%s". Not sending it.',
session_id,
event.name,
)
return
log.debug(
'Sending event "%s" with args "%s" to session id "%s".',
event.name,
event.args,
session_id,
)
self.factory.session_protocols[session_id].sendData(
(RPC_EVENT, event.name, event.args)
)
def stop(self):
self.factory.state = "stopping"
|
sunflower | mounts | from __future__ import absolute_import
from gi.repository import Gio, GLib, Gtk, Pango
from sunflower.gui.mounts_manager_window import MountsManagerWindow
from sunflower.widgets.location_menu import GenericHeader, Location
class MountsManager:
"""Class used for monitoring and managing mounts menu"""
def __init__(self, application):
self._application = application
self._mounts = {}
self._location_menu = None
self._window = MountsManagerWindow(self)
# create volume monitor
self._volume_monitor = Gio.VolumeMonitor.get()
self._volume_monitor.connect("volume-added", self._handle_add_volume)
def show(self, widget, data=None):
self._window.show_all()
def attach_location_menu(self, location_menu):
"""Use notification from location menu to populate list with mounts and volumes."""
self._location_menu = location_menu
self._location_menu.add_header(Volume, GenericHeader(_("Mounts")))
automount = self._application.options.section("operations").get(
"automount_start"
)
for volume in self._volume_monitor.get_volumes():
self._location_menu.add_location(Volume(self, volume))
if automount and volume.can_mount() and volume.get_mount() is None:
volume.mount(
Gio.MountMountFlags.NONE,
None,
None,
self._handle_mount_finish,
None,
)
def _handle_add_volume(self, monitor, volume):
"""Event called when new volume is connected."""
self._location_menu.add_location(Volume(self, volume))
# automount volume if needed
automount_insert = self._application.options.section("operations").get(
"automount_insert"
)
if automount_insert and volume.can_mount() and volume.get_mount() is None:
volume.mount(
Gio.MountMountFlags.NONE, None, None, self._handle_mount_finish, None
)
def _handle_remove_volume(self, widget, volume):
"""Event called when volume is removed."""
self._location_menu.remove_location(widget)
def _handle_mount_finish(self, mount, result, data=None):
"""Callback for mount events."""
try:
mount.mount_finish(result)
except GLib.Error as error:
dialog = Gtk.MessageDialog(
self._application,
Gtk.DialogFlags.DESTROY_WITH_PARENT,
Gtk.MessageType.WARNING,
Gtk.ButtonsType.OK,
_("Unable to finish mounting:\n{}".format(error.message)),
)
dialog.run()
dialog.destroy()
def _handle_unmount_finish(self, mount, result, data=None):
"""Callback for unmount events."""
try:
mount.unmount_finish(result)
except GLib.Error as error:
dialog = Gtk.MessageDialog(
self._application,
Gtk.DialogFlags.DESTROY_WITH_PARENT,
Gtk.MessageType.WARNING,
Gtk.ButtonsType.OK,
_("Unable to finish unmounting:\n{}".format(error.message)),
)
dialog.run()
dialog.destroy()
def _handle_eject_finish(self, volume, result, data=None):
"""Callback for eject event."""
try:
volume.eject_finish(result)
except GLib.Error as error:
dialog = Gtk.MessageDialog(
self._application,
Gtk.DialogFlags.DESTROY_WITH_PARENT,
Gtk.MessageType.WARNING,
Gtk.ButtonsType.OK,
_("Unable to finish ejecting:\n{}".format(error.message)),
)
dialog.run()
dialog.destroy()
def mount(self, volume):
"""Perform volume mount."""
if volume.can_mount() and volume.get_mount() is None:
volume.mount(
Gio.MountMountFlags.NONE, None, None, self._handle_mount_finish, None
)
else:
dialog = Gtk.MessageDialog(
self._application,
Gtk.DialogFlags.DESTROY_WITH_PARENT,
Gtk.MessageType.WARNING,
Gtk.ButtonsType.OK,
_("Selected volume can not be mounted."),
)
dialog.run()
dialog.destroy()
def unmount(self, mount):
"""Perform unmounting."""
if mount.can_unmount():
mount.unmount(
Gio.MountUnmountFlags.FORCE, None, self._handle_unmount_finish, None
)
else:
dialog = Gtk.MessageDialog(
self._application,
Gtk.DialogFlags.DESTROY_WITH_PARENT,
Gtk.MessageType.WARNING,
Gtk.ButtonsType.OK,
_("Selected mount can not be unmounted."),
)
dialog.run()
dialog.destroy()
def eject(self, volume):
"""Perform volume ejection."""
if volume.can_eject():
volume.eject(
Gio.MountUnmountFlags.FORCE, None, self._handle_eject_finish, None
)
else:
dialog = Gtk.MessageDialog(
self._application,
Gtk.DialogFlags.DESTROY_WITH_PARENT,
Gtk.MessageType.WARNING,
Gtk.ButtonsType.OK,
_("Selected volume can not be ejected."),
)
dialog.run()
dialog.destroy()
def create_extensions(self):
"""Create mounts manager extensions"""
self._window.create_extensions()
def is_mounted(self, path):
"""Check if specified path is mounted"""
pass
def mount_path(self, path):
"""Mount specified path if extensions know how"""
pass
class Volume(Location):
"""Generic volume handling class."""
def __init__(self, manager, volume):
Location.__init__(self)
self._manager = manager
self._volume = volume
# interface elements
self._icon = None
self._title = None
self._unmount = None
# create user interface
self._create_interface()
self.show_all()
# connect events
self._volume.connect("changed", self.__handle_change)
self._volume.connect("removed", self.__handle_remove)
def __handle_change(self, volume):
"""Handle volume change."""
mount = self._volume.get_mount()
self._unmount_button.set_visible(mount is not None and mount.can_unmount())
self._mount_button.set_visible(mount is None and self._volume.can_mount())
self._eject_button.set_visible(self._volume.can_eject())
def __handle_remove(self, volume):
"""Handle volume remove event."""
self._manager._handle_remove_volume(self, volume)
def __handle_mount_click(self, widget, data=None):
"""Handle clicking on mount button."""
self._manager.mount(self._volume)
def __handle_unmount_click(self, widget, data=None):
"""Handle clicking on unmount button."""
mount = self._volume.get_mount()
if mount:
self._manager.unmount(mount)
def __handle_eject_click(self, widget, data=None):
"""Handle clicking on eject button."""
self._manager.eject(self._volume)
def _create_interface(self):
"""Create interface for the widget to display."""
container = Gtk.HBox.new(False, 5)
container.set_border_width(5)
# create volume icon
self._icon = Gtk.Image.new_from_gicon(
self._volume.get_icon(), Gtk.IconSize.LARGE_TOOLBAR
)
# create volume name label
self._title = Gtk.Label.new(self._volume.get_name())
self._title.set_alignment(0, 0.5)
self._title.set_ellipsize(Pango.EllipsizeMode.END)
# pack interface
container.pack_start(self._icon, False, False, 0)
container.pack_start(self._title, True, True, 0)
# create buttons
self._unmount_button = Gtk.Button.new_from_icon_name(
"media-playback-stop-symbolic", Gtk.IconSize.BUTTON
)
self._unmount_button.connect("clicked", self.__handle_unmount_click)
self._unmount_button.set_tooltip_text(_("Unmount"))
self._unmount_button.set_property("no-show-all", True)
container.pack_start(self._unmount_button, False, False, 0)
self._mount_button = Gtk.Button.new_from_icon_name(
"media-playback-start-symbolic", Gtk.IconSize.BUTTON
)
self._mount_button.connect("clicked", self.__handle_mount_click)
self._mount_button.set_tooltip_text(_("Mount"))
self._mount_button.set_property("no-show-all", True)
container.pack_start(self._mount_button, False, False, 0)
self._eject_button = Gtk.Button.new_from_icon_name(
"media-eject-symbolic", Gtk.IconSize.BUTTON
)
self._eject_button.connect("clicked", self.__handle_eject_click)
self._eject_button.set_tooltip_text(_("Eject"))
self._eject_button.set_property("no-show-all", True)
container.pack_start(self._eject_button, False, False, 0)
# apply button visibility
self.__handle_change(self._volume)
self.add(container)
def get_location(self):
"""Return location path."""
result = None
mount = self._volume.get_mount()
if mount:
root = mount.get_root()
result = root.get_path()
return result
|
webengine | certificateerror | # SPDX-FileCopyrightText: Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# SPDX-License-Identifier: GPL-3.0-or-later
"""Wrapper over a QWebEngineCertificateError."""
from typing import Any
from qutebrowser.qt import machinery
from qutebrowser.qt.core import QUrl
from qutebrowser.qt.webenginecore import QWebEngineCertificateError
from qutebrowser.utils import debug, usertypes, utils
class CertificateErrorWrapper(usertypes.AbstractCertificateErrorWrapper):
"""A wrapper over a QWebEngineCertificateError.
Support both Qt 5 and 6.
"""
def __init__(self, error: QWebEngineCertificateError) -> None:
super().__init__()
self._error = error
self.ignore = False
def __str__(self) -> str:
if machinery.IS_QT5:
return self._error.errorDescription()
else:
return self._error.description()
def _type(self) -> Any: # QWebEngineCertificateError.Type or .Error
if machinery.IS_QT5:
return self._error.error()
else:
return self._error.type()
def reject_certificate(self) -> None:
super().reject_certificate()
self._error.rejectCertificate()
def accept_certificate(self) -> None:
super().accept_certificate()
if machinery.IS_QT5:
self._error.ignoreCertificateError()
else:
self._error.acceptCertificate()
def __repr__(self) -> str:
return utils.get_repr(
self,
error=debug.qenum_key(QWebEngineCertificateError, self._type()),
string=str(self),
)
def url(self) -> QUrl:
return self._error.url()
def is_overridable(self) -> bool:
return self._error.isOverridable()
def defer(self) -> None:
# WORKAROUND for https://www.riverbankcomputing.com/pipermail/pyqt/2022-April/044585.html
# (PyQt 5.15.6, 6.2.3, 6.3.0)
raise usertypes.UndeferrableError("PyQt bug")
|
OLD | patrec | import random as pyrandom
from collections import Counter
import improc
import mlinear
from pylab import *
from scipy.ndimage import filters, interpolation
from toplevel import *
if 0:
from scipy.spatial.distance import cdist
else:
from ocrolib.distance import cdist
sidenote = "\t\t\t\t\t"
def method(cls):
"""Adds the function as a method to the given class."""
import new
def _wrap(f):
cls.__dict__[f.func_name] = new.instancemethod(f,None,cls)
return None
return _wrap
###
### Helper classes.
###
class Err:
def __init__(self,n=10000):
self.n = n
self.total = 0.0
self.count = 0
def add(self,x):
l = 1.0/self.n
self.total = (self.total*(1.0-l)+x*l)
self.count += 1
def value(self):
return self.total
@checks(ndarray)
def make2d(data):
"""Convert any input array into a 2D array by flattening axes 1 and over."""
if data.ndim==1: return array([data])
if data.ndim==2: return data
return data.reshape(data.shape[0],-1)
@checks(AFLOAT2,{int,NoneType},{int,NoneType})
def cshow(im,h=None,w=None):
if h is None:
h = w = int(sqrt(im.size))
elif w is None:
w=h
# figsize(4,4)
ion(); gray()
imshow(im.reshape(h,w),cmap=cm.gray,interpolation='nearest')
def showgrid(l,h=None,w=None):
# figsize(min(12,c),min(12,c)); gray()
if h is None:
h = w = int(sqrt(l[0].size))
elif w is None:
w=h
ion()
xticks([]); yticks([])
n = len(l)
c = int(sqrt(n))
r = (n+c-1)//c
# print r,c
for i in range(n):
subplot(r,c,i+1)
imshow(l[i].reshape(h,w),cmap=cm.gray,interpolation='nearest')
###
### Dataset abstraction, useful for dealing with large
### data sets stored on disk (e.g., in HDF5 files).
###
class Dataset:
"""A wrapper for datdasets that allows individual items to
be transformed using a feature extractor `f`, and subsets
to be selected. This somewhat insulates pattern recognition
algorithms from the idiosyncracies of HDF5 tables and
prevents to some degree accidentally loading too much data
into memory at once."""
def __init__(self,a,f=lambda x:x,subset=None,maxsize=None):
if subset is None: subset = range(len(a))
if maxsize is None: maxsize = len(a)
subset = subset[:min(len(subset),maxsize)]
self.a = a
self.f = f
self.subset = subset
def __len__(self):
return len(self.subset)
def __getitem__(self,i):
if type(i)==slice:
return [self.f(self.a[self.subset[j]].ravel()) for j in range(i.start,i.stop,i.step or 1)]
else:
assert i>=0 and i<len(self)
return self.f(self.a[self.subset[i]].ravel())
def __iter__(self):
for i in range(len(self)):
yield self.a[self.subset[i]]
###
### probability distributions
###
@checks(AINT1)
def distribution(classes,n=-1):
c = Counter(classes)
if n<0: n = max(classes)+1
p = zeros(n)
p[c.keys()] = c.values()
return p/maximum(0.1,sum(p))
###
### vector "sorting" and selecting
###
def minsert(x,l):
if len(l)<2:
return l+[x]
dists = array(cdist([x],l))[0]
# dists2 = dists+roll(dists,-1)
i = argmin(dists)
return l[:i]+[x]+l[i:]
def vecsort(l):
l = list(l)
result = l[:3]
for v in l[3:]:
result = minsert(v,result)
return result
def rselect(data,n,s=1000,f=0.99):
# N = len(data)
l = pyrandom.sample(data,1)
while len(l)<n:
if len(l)%100==0: print len(l)
vs = pyrandom.sample(data,s)
ds = cdist(l,vs)
ds = amin(ds,axis=0)
js = argsort(ds)
j = js[int(f*len(js))]
l = minsert(vs[j],l)
return l
###
### PCA
###
@checks(DATASET(fixedshape=1,vrank=1),True,min_k=RANGE(2,100000),whiten=BOOL)
def pca(data,k,min_k=2,whiten=0):
"""Computes a PCA and a whitening. The number of
components can be specified either directly or as a fraction
of the total sum of the eigenvalues (k in [0...1]).
The function returns
the transformed data, the mean, the eigenvalues, and
the eigenvectors."""
n,d = data.shape
assert k>=0
assert k<=d and k<=n
mean = average(data,axis=0).reshape(1,d)
data = data - mean.reshape(1,d)
cov = dot(data.T,data)/n
evals,evecs = linalg.eigh(cov)
top = argsort(-evals)
if k<1:
fracs = add.accumulate(sorted(abs(evals),reverse=1))
kd = find(fracs>=k*fracs[-1])[0]
# print sidenote+"pca",kd,k,len(evals)
k = maximum(min_k,kd)
evals = evals[top[:k]]
evecs = evecs.T[top[:k]]
assert evecs.shape==(k,d)
ys = dot(evecs,data.T)
assert ys.shape==(k,n)
if whiten: ys = dot(diag(sqrt(1.0/evals)),ys)
return (ys.T,mean.ravel(),evals,evecs)
class PCA:
"""A class wrapper for the pca function that makes it a little easier
to use in some contexts."""
def __init__(self,k):
self.k = k
def fit(self,data):
data = data.reshape(len(data),-1)
_,mu,evals,P = pca(data,self.k)
self.mu = mu
self.evals = evals
self.P = P
def transform(self,data):
data = data.reshape(len(data),-1)
ys = dot(data-self.mu[newaxis,:],self.P.T)
return ys
def residual(self,data):
data = data.reshape(len(data),-1)
return sum(data**2,axis=1)-sum(self.transform(data)**2,axis=1)
def inverse_transform(self,data):
data = data.reshape(len(data),-1)
xs = dot(out,self.P)+self.mu[newaxis,:]
return xs
###
### k-means clustering
###
@checks(DATASET(fixedshape=1,vrank=1),RANGE(2,100000),maxiter=RANGE(0,10000000))
def kmeans(data,k,maxiter=100):
"""Regular k-means algorithm. Computes k means from data."""
centers = array(pyrandom.sample(data,k),'f')
last = -1
for i in range(maxiter):
mins = argmin(cdist(data,centers),axis=1)
if (mins==last).all(): break
for i in range(k):
if sum(mins==i)<1:
centers[i] = pyrandom.sample(data,2)[0]
else:
centers[i] = average(data[mins==i],axis=0)
last = mins
return centers
class Kmeans:
"""Perform k-means clustering."""
def __init__(self,k,maxiter=100,npk=1000,verbose=0):
self.k = k
self.maxiter = maxiter
self.verbose = verbose
def fit(self,data):
self.centers = kmeans(data,self.k,maxiter=self.maxiter)
def centers(self):
return self.centers
def center(self,i):
return self.centers[i]
def predict(self,data,n=0):
nb = knn(ys,self.Pcenters,max(1,n))
if n==0:
return nb[:,0]
else:
return nb
@checks(DATASET(fixedshape=1,vrank=1),RANGE(0,100000),RANGE(0,10000),maxiter=RANGE(0,10000000),\
npk=RANGE(2,100000),maxsample=RANGE(3,1e9),min_norm=RANGE(0.0,1000.0))
def pca_kmeans(data,k,d,min_d=3,maxiter=100,npk=1000,verbose=0,maxsample=200000,min_norm=1e-3):
assert len(data)>=1
n = min(len(data),k*npk,maxsample)
if n<len(data):
# if verbose: print sidenote+"pca_kmeans sampling",n,"samples"
sample = pyrandom.sample(data,n)
else:
sample = list(data)
sample = [v for v in sample if norm(v)>=min_norm]
sample = array(sample)
assert len(sample)>=1
sample = sample.reshape(len(sample),-1)
if verbose: print sidenote+"pca",len(sample),"d",d
ys,mu,evals,evecs = pca(sample,d,min_k=min_d)
if verbose: print sidenote+"kmeans",len(sample),"k",k,"d",ys.shape
km = kmeans(ys,k)
if verbose:
print sidenote+"km",km.shape,"evecs",evecs.shape,"mu",mu.shape
del ys; del sample
return km,evecs,mu
def pca_kmeans0(data,k,d=0.9,**kw):
"""Performs kmeans in PCA space, but otherwise looks like regular
k-means (i.e., it returns the centers in the original space).
This is useful both for speed and because it tends to give better results
than regular k-means."""
km,evecs,mu = pca_kmeans(data,k,d,**kw)
return dot(km,evecs)+mu[newaxis,:]
@checks(AFLOAT2,AFLOAT2,int,chunksize=RANGE(1,1000000000))
def knn(data,protos,k,chunksize=1000,threads=-1):
result = []
for i in range(0,len(data),chunksize):
block = data[i:min(i+chunksize,len(data))]
if type(block)!=ndarray: block = array(block)
ds = cdist(block,protos,threads=threads)
js = argsort(ds,axis=1)
result.append(js[:,:k])
return vstack(result)
def protosets(nb,k):
"""For a list of nearest neighbors to k prototypes,
compute the set belonging to each prototype."""
if k is None: k = amax(nb)+1
psets = [set() for _ in range(k)]
for i,v in enumerate(nb):
psets[v].add(i)
return psets
class PcaKmeans:
"""Perform PCA followed by k-means.
This code is able to deal with Datasets as input, not just arrays."""
def __init__(self,k,d,min_d=3,maxiter=100,npk=1000,verbose=0,threads=1):
self.k = k
self.d = d
self.min_d = min_d
self.maxiter = maxiter
self.npk = npk
self.verbose = verbose
def fit(self,data):
self.Pcenters,self.P,self.mu = \
pca_kmeans(data,self.k,self.d,min_d=self.min_d,
maxiter=self.maxiter,npk=self.npk,verbose=self.verbose)
self.Pcenters = array(vecsort(self.Pcenters))
def centers(self):
return dot(self.Pcenters,self.P)+self.mu
def center(self,i):
return dot(self.Pcenters[i],self.P)+self.mu
def dist1(self,x):
y = dot(x.ravel()-self.mu.ravel(),self.P.T)
return sqrt(norm(x.ravel()-self.mu.ravel())**2-norm(y)**2)
def predict1(self,x,threads=1):
# We always use multiple threads during training, but
# only one thread by default for prediction (since
# prediction is usually run in parallel for multiple
# lines)
y = dot(x.ravel()-self.mu.ravel(),self.P.T)
c = knn(y.reshape(1,-1),self.Pcenters,1,threads=threads)
return c[0][0]
def predict(self,data,n=0,threads=1):
if type(data)==ndarray:
# regular 2D array code
data = data.reshape(len(data),-1)
ys = dot(data-self.mu,self.P.T)
nb = knn(ys,self.Pcenters,max(1,n),threads=threads)
else:
# for datasets (and other iterables), use a slower, per-row routine
nb = []
for i,x in enumerate(data):
if self.verbose:
if i%100000==0: print sidenote+"PcaKmeans.predict",i
nb.append(self.predict1(x))
nb = array(nb)
if n==0:
return nb[:,0]
else:
return nb
###
### A tree vector quantizer.
###
### TODO:
### - implement pruning based on distortion measure
###
class HierarchicalSplitter:
def __init__(self,**kw):
self.maxsplit = 100
self.maxdepth = 2
self.d = 0.90
self.min_d = 3
self.verbose = 0
self.depth = 0
self.splitsize = 10000
self.targetsize = 1000
self.offsets = None
self.splitter = None
self.subs = None
self.quiet = 0
self.extractor = None
assert set(kw.keys())<set(dir(self))
self.__dict__.update(kw)
if "depth" in kw: del kw["depth"]
self.kw = kw
self.offsets = None
def fit(self,data,offset=0):
assert len(data)>=3
if "extractor" in dir(self) and self.extractor is not None:
data = Dataset(data,f=self.extractor)
k = maximum(2,minimum(len(data)//self.targetsize,self.maxsplit))
d = self.d
if not self.quiet: print "\t"*self.depth,"pcakmeans",len(data),"k",k,"d",d
self.splitter = PcaKmeans(k,d)
self.splitter.fit(data)
if not self.quiet: print "\t"*self.depth,"predicting",len(data),len(data[0])
nb = self.splitter.predict(data,n=1)
sets = protosets(nb,k)
self.subs = [None]*k
self.offsets = []
for s,subset in enumerate(sets):
self.offsets.append(offset)
if self.verbose:
print "\t"*self.depth,"bucket",s,"of",k,"len",len(subset),"offset",offset
if self.depth>=self.maxdepth or len(subset)<self.splitsize:
offset += 1
else:
sub = HierarchicalSplitter(depth=self.depth+1,**self.kw)
subdata = [data[i] for i in sets[s]]
if len(subdata)>=3:
offset = sub.fit(subdata,offset=offset)
self.subs[s] = sub
else:
print "WARNING: empty split"
self.offsets.append(offset)
return offset
def predict1(self,v):
if "extractor" in dir(self) and self.extractor is not None:
v = self.extractor(v)
s = self.splitter.predict(v.reshape(1,-1))[0]
if self.subs[s] is None:
return self.offsets[s]
else:
if self.subs[s] is None: return -1
return self.subs[s].predict1(v)
def predict(self,data):
return array([self.predict1(v) for v in data],'i')
def nclusters(self):
return self.offsets[-1]
def center(self,v):
"""Returns the cluster number and cluster center associated
with this vector"""
s = self.splitter.predict(v.reshape(1,-1))[0]
if self.subs[s] is None:
result = (self.offsets[s],self.splitter.center(s))
else:
result = self.subs[s].center(v)
print result
return result
###
### A couple of trivial classifiers and cost models, used for testing.
###
class TrivialCmodel:
"""Classify using just the prior information."""
def __init__(self,limit=5):
self.limit = limit
def fit(self,data,classes):
self.counter = Counter(classes)
def coutputs(self,v):
n = sum(self.counter.values())
return [(k,c*1.0/n) for k,n in self.counter.most_common(self.limit)]
class TrivialCostModel:
"""Here, cost is simply Euclidean distance from the mean of the bucket.
This corresponds to a unit covariance matrix."""
def fit(self,data):
data = data.reshape(len(data),-1)
self.avg = mean(data,axis=0)
def cost(self,v):
return norm(self.avg-v)
###
### Logistic character classifier
###
class LogisticCmodel:
def __init__(self,d=0.9,min_d=2,linear=0,l=1e-4):
self.d = d
self.min_d = min_d
self.linear = linear
self.l = l
def fit(self,data,classes):
self.reverse = sorted(Counter(classes).keys())
self.forward = { k:i for i,k in enumerate(self.reverse) }
outputs = array([self.forward[c] for c in classes],'i')
targets = zeros((len(data),len(self.reverse)),'f')
for i,c in enumerate(outputs): targets[i,c] = 1
(ys,mu,vs,tr) = pca(make2d(data),k=self.d,min_k=self.min_d)
ys = c_[ones(len(ys)),ys]
if self.linear:
M2 = linalg.lstsq(ys,targets)[0]
else:
M2 = mlinear.logreg_l2_fp(ys,targets,l=self.l)
b = M2[0,:]
M = M2[1:,:]
self.R = dot(M.T,tr)
self.r = b-dot(self.R,mu.ravel())
def coutputs(self,v,geometry=None):
assert v.ndim==1
pred = dot(v,self.R.T)+self.r
if not self.linear: pred = mlinear.sigmoid(pred)
return sorted(zip(self.reverse,pred),key=lambda x:-x[1])
# obsolete, just for backwards compatibility
def normalizer_none(v):
return v.ravel()
def normalizer_normal(v):
return improc.classifier_normalize(v)
###
### Overall binning classifier.
###
class LocalCmodel:
def __init__(self,splitter):
self.splitter = splitter
self.nclusters = splitter.nclusters()
self.cshape = None
self.cmodels = [None]*self.nclusters
def split1(self,v):
return self.splitter.predict(v.reshape(1,-1))[0]
def setCmodel(self,i,cmodel):
"""We leave the training of the individual buckets
to code outside the class, since it is often parallelized
and complicated. All that we care about for classification
is that we have a good cmodel for each bucket."""
self.cmodels[i] = cmodel
def coutputs(self,v,geometry=None,prenormalized=0):
v = v.ravel()
# after normalization, character sizes need to be consistent
if self.cshape is None: self.cshape=v.shape
else: assert self.cshape==v.shape
# now split and predict
i = self.splitter.predict1(v)
if i<0: return []
if self.cmodels[i] is None: return []
return self.cmodels[i].coutputs(v,geometry=geometry)
class ModelWithExtractor:
def __init__(self,model,extractor):
self.model = model
self.extractor = extractor
def fit(self,data,classes):
transformed = [self.extractor(v) for v in data]
self.model.fit(transformed,classes)
def coutputs(self,v,geometry=None,prenormalized=0):
transformed = self.extractor(v)
return self.coutputs(transformed)
class Extractor0:
def __init__(self,alpha=0.5,dsigma=1.0,spread=0,tsize=(32,32)):
assert alpha>=0.0 and alpha<=1.0
assert dsigma>=0.0 and dsigma<=100.0
assert spread>=0 and spread<=100 and type(spread)==int
self.alpha = alpha
self.dsigma = dsigma
self.spread = spread
self.tsize = tsize
def __call__(self,image):
if image.ndim==1:
image = image.reshape(*self.tsize)
left = 0.0+image[:,0]
image[:,0] = 0
deriv = filters.gaussian_gradient_magnitude(image,self.dsigma,mode='constant')
if self.spread>0: deriv = filters.maximum_filter(image,(self.spread,self.spread))
deriv /= 1e-6+amax(deriv)
result = self.alpha*deriv + (1.0-self.alpha)*image
result[:,0] = left
return result
class Grad0Model(ModelWithExtractor):
def __init__(self,mparams,eparams):
ModelWithExtractor.__init__(self,LocalCModel(**mparams),Extractor0(**eparams))
class Extractor1:
def __init__(self,alpha=0.5,dsigma=1.0,spread=3,buckets=2,tsize=(32,32)):
assert alpha>=0.0 and alpha<=1.0
assert dsigma>=0.0 and dsigma<=100.0
assert spread>=0 and spread<=100 and type(spread)==int
self.alpha = alpha
self.dsigma = dsigma
self.spread = spread
self.buckets = buckets
self.tsize = tsize
def __call__(self,image):
if image.ndim==1:
image = image.reshape(*self.tsize)
left = 0.0+image[:,0]
image[:,0] = 0
dy = filters.gaussian_filter(image,self.dsigma,order=(1,0),mode='constant')
dx = filters.gaussian_filter(image,self.dsigma,order=(0,1),mode='constant')
nb = self.buckets
deriv = zeros(image.shape)
for b,alpha in enumerate(linspace(0,pi,nb+1)):
d = cos(alpha)*dx+sin(alpha)*dy
dhi = filters.maximum_filter(d,self.spread)
dlo = filters.maximum_filter(-d,self.spread)
deriv[::2,b::nb] = maximum(0,dhi[::2,b::nb])
deriv[1::2,b::nb] = maximum(0,dlo[1::2,b::nb])
deriv /= 1e-6+amax(deriv)
result = self.alpha*deriv + (1.0-self.alpha)*image
result[:,0] = left
return result
class Grad1Model(ModelWithExtractor):
def __init__(self,mparams,eparams):
ModelWithExtractor.__init__(self,LocalCModel(**mparams),Extractor1(**eparams))
class Extractor2:
def __init__(self,alpha=0.0,dsigma=1.0,p=3.0,buckets=4,dzoom=0.5,spread=5,tsize=(32,32)):
assert alpha>=0.0 and alpha<=1.0
assert dsigma>=0.0 and dsigma<=100.0
self.alpha = alpha
self.dsigma = dsigma
self.spread = spread
self.buckets = buckets
self.dzoom = dzoom
self.tsize = tsize
self.p = p
def __call__(self,image):
if image.ndim==1:
image = image.reshape(*self.tsize)
left = 0.0+image[:,0:1]
image[:,0] = 0
dy = filters.gaussian_filter(image,self.dsigma,order=(1,0),mode='constant')
dx = filters.gaussian_filter(image,self.dsigma,order=(0,1),mode='constant')
nb = self.buckets
dzoom = self.dzoom
derivs = []
derivs.append(interpolation.zoom(left,(dzoom,1)))
for b,alpha in enumerate(linspace(0,pi,nb+1)[:-1]):
d = cos(alpha)*dx+sin(alpha)*dy
dhi = filters.maximum_filter(maximum(d,0),self.spread)**self.p
dhi = (1-self.alpha)*dhi+self.alpha*image
derivs.append(interpolation.zoom(dhi,dzoom))
dlo = filters.maximum_filter(maximum(-d,0),self.spread)**self.p
dlo = (1-self.alpha)*dlo+self.alpha*image
derivs.append(interpolation.zoom(dlo,dzoom))
result = hstack(derivs)
result /= amax(result)
return result
class Grad2Model(ModelWithExtractor):
def __init__(self,mparams,eparams):
ModelWithExtractor.__init__(self,LocalCModel(**mparams),Extractor2(**eparams))
# need IPCA without covariance matrix
# maybe sectioned PCA
################################################################
### utility functions for parallelizing prediction and character
### classification (the overhead of this is too large to use
### it at a per-line level, but it is useful during training)
################################################################
import multiprocessing
import common
def datachunks(data,model=None,chunksize=1000):
for i in range(0,len(data),chunksize):
j = min(i+chunksize,len(data))
block = data[i:j]
if type(block)!=ndarray: block = array(block,'float32')
yield i,j,block,model
def coutputs_chunk(job):
i,j,block,model = job
outputs = [model.coutputs(v) for v in block]
return i,j,outputs
def parallel_coutputs(model,data,parallel=multiprocessing.cpu_count(),verbose=1):
results = [None]*len(data)
for i,j,outs in common.parallel_map(coutputs_chunk,datachunks(data,model=model),parallel=parallel):
if verbose: print "parallel_coutputs",i,j,"(%d)"%parallel
for k in range(j-i): results[i+k] = outs[k]
return results
def predict_chunk(job):
i,j,block,model = job
return i,j,model.predict(block)
def parallel_predict(model,data,parallel=multiprocessing.cpu_count(),verbose=1):
results = [None]*len(data)
for i,j,outs in common.parallel_map(predict_chunk,datachunks(data,model=model),parallel=parallel):
if verbose: print "parallel_predict",i,j,"(%d)"%parallel
for k in range(j-i): results[i+k] = outs[k]
return results
|
addons | DiscordNotifier | # -*- coding: utf-8 -*-
from pyload.core.network.request_factory import get_request
from ..base.notifier import Notifier
class DiscordNotifier(Notifier):
__name__ = "DiscordNotifier"
__type__ = "addon"
__version__ = "0.11"
__status__ = "testing"
__config__ = [
("enabled", "bool", "Activated", False),
("webhookurl", "string", "The URL of the webhook", ""),
("captcha", "bool", "Notify captcha request", True),
("reconnection", "bool", "Notify reconnection request", True),
("downloadfinished", "bool", "Notify download finished", True),
("downloadfailed", "bool", "Notify download failed", True),
("alldownloadsfinished", "bool", "Notify all downloads finished", True),
("alldownloadsprocessed", "bool", "Notify all downloads processed", True),
("packagefinished", "bool", "Notify package finished", True),
("packagefailed", "bool", "Notify package failed", True),
("update", "bool", "Notify pyload update", False),
("exit", "bool", "Notify pyload shutdown/restart", False),
("sendinterval", "int", "Interval in seconds between notifications", 1),
("sendpermin", "int", "Max notifications per minute", 60),
("ignoreclient", "bool", "Send notifications if client is connected", True),
]
__description__ = "Send push notifications to a Discord channel via a webhook."
__license__ = "GPLv3"
__authors__ = [("Jan-Olaf Becker", "job87@web.de")]
def get_key(self):
return self.config.get("webhookurl")
def send(self, event, msg, key):
req = get_request()
self.log_info("Sending message to discord")
self.load(self.get_key(), post={"content": event + "\n" + msg}, req=req)
|
migrations | 0236_add_instance_setting_model | # Generated by Django 3.2.12 on 2022-05-18 12:39
import json
import logging
import pickle
from base64 import b64decode
from django.db import connection, migrations, models, utils
logger = logging.getLogger(__name__)
def populate_instance_settings(apps, schema_editor):
try:
InstanceSetting = apps.get_model("posthog", "InstanceSetting")
with connection.cursor() as cursor:
cursor.execute("SELECT key, value FROM constance_config")
for key, pickled_value in cursor.fetchall():
value = (
pickle.loads(b64decode(pickled_value.encode()))
if pickled_value is not None
else None
)
InstanceSetting.objects.create(key=key, raw_value=json.dumps(value))
except utils.ProgrammingError:
logger.info(
"constance_config table did not exist, skipping populating posthog_instance_setting table"
)
class Migration(migrations.Migration):
atomic = False
dependencies = [
("posthog", "0235_plugin_source_transpilation"),
]
operations = [
migrations.CreateModel(
name="InstanceSetting",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("key", models.CharField(max_length=128)),
("raw_value", models.CharField(blank=True, max_length=1024)),
],
),
migrations.AddConstraint(
model_name="instancesetting",
constraint=models.UniqueConstraint(fields=("key",), name="unique key"),
),
migrations.RunPython(
populate_instance_settings, migrations.RunPython.noop, elidable=True
),
]
|
localDrone | stacksMerge | import eos.db
import gui.mainFrame
import wx
from gui import globalEvents as GE
from gui.fitCommands.calc.drone.localChangeAmount import (
CalcChangeLocalDroneAmountCommand,
)
from gui.fitCommands.calc.drone.localRemove import CalcRemoveLocalDroneCommand
from gui.fitCommands.helpers import InternalCommandHistory
from service.fit import Fit
class GuiMergeLocalDroneStacksCommand(wx.Command):
def __init__(self, fitID, srcPosition, dstPosition):
wx.Command.__init__(self, True, "Merge Local Drone Stacks")
self.internalHistory = InternalCommandHistory()
self.fitID = fitID
self.srcPosition = srcPosition
self.dstPosition = dstPosition
def Do(self):
if self.srcPosition == self.dstPosition:
return False
sFit = Fit.getInstance()
fit = sFit.getFit(self.fitID)
srcDrone = fit.drones[self.srcPosition]
dstDrone = fit.drones[self.dstPosition]
if srcDrone.itemID != dstDrone.itemID:
return False
srcAmount = srcDrone.amount
commands = []
commands.append(
CalcChangeLocalDroneAmountCommand(
fitID=self.fitID,
position=self.dstPosition,
amount=dstDrone.amount + srcAmount,
)
)
commands.append(
CalcRemoveLocalDroneCommand(
fitID=self.fitID, position=self.srcPosition, amount=srcAmount
)
)
success = self.internalHistory.submitBatch(*commands)
eos.db.flush()
sFit.recalc(self.fitID)
sFit.fill(self.fitID)
eos.db.commit()
wx.PostEvent(
gui.mainFrame.MainFrame.getInstance(), GE.FitChanged(fitIDs=(self.fitID,))
)
return success
def Undo(self):
success = self.internalHistory.undoAll()
eos.db.flush()
sFit = Fit.getInstance()
sFit.recalc(self.fitID)
sFit.fill(self.fitID)
eos.db.commit()
wx.PostEvent(
gui.mainFrame.MainFrame.getInstance(), GE.FitChanged(fitIDs=(self.fitID,))
)
return success
|
you-get | extractor | #!/usr/bin/env python
import os
import sys
from . import json_output
from .common import (
download_urls,
dry_run,
get_content,
get_filename,
match1,
maybe_print,
parse_host,
player,
)
from .common import print_more_compatible as print
from .common import set_proxy, unset_proxy
from .util import log
class Extractor:
def __init__(self, *args):
self.url = None
self.title = None
self.vid = None
self.streams = {}
self.streams_sorted = []
if args:
self.url = args[0]
class VideoExtractor:
def __init__(self, *args):
self.url = None
self.title = None
self.vid = None
self.m3u8_url = None
self.streams = {}
self.streams_sorted = []
self.audiolang = None
self.password_protected = False
self.dash_streams = {}
self.caption_tracks = {}
self.out = False
self.ua = None
self.referer = None
self.danmaku = None
self.lyrics = None
if args:
self.url = args[0]
def download_by_url(self, url, **kwargs):
self.url = url
self.vid = None
if "extractor_proxy" in kwargs and kwargs["extractor_proxy"]:
set_proxy(parse_host(kwargs["extractor_proxy"]))
self.prepare(**kwargs)
if self.out:
return
if "extractor_proxy" in kwargs and kwargs["extractor_proxy"]:
unset_proxy()
try:
self.streams_sorted = [
dict(
[("id", stream_type["id"])]
+ list(self.streams[stream_type["id"]].items())
)
for stream_type in self.__class__.stream_types
if stream_type["id"] in self.streams
]
except:
self.streams_sorted = [
dict(
[("itag", stream_type["itag"])]
+ list(self.streams[stream_type["itag"]].items())
)
for stream_type in self.__class__.stream_types
if stream_type["itag"] in self.streams
]
self.extract(**kwargs)
self.download(**kwargs)
def download_by_vid(self, vid, **kwargs):
self.url = None
self.vid = vid
if "extractor_proxy" in kwargs and kwargs["extractor_proxy"]:
set_proxy(parse_host(kwargs["extractor_proxy"]))
self.prepare(**kwargs)
if "extractor_proxy" in kwargs and kwargs["extractor_proxy"]:
unset_proxy()
try:
self.streams_sorted = [
dict(
[("id", stream_type["id"])]
+ list(self.streams[stream_type["id"]].items())
)
for stream_type in self.__class__.stream_types
if stream_type["id"] in self.streams
]
except:
self.streams_sorted = [
dict(
[("itag", stream_type["itag"])]
+ list(self.streams[stream_type["itag"]].items())
)
for stream_type in self.__class__.stream_types
if stream_type["itag"] in self.streams
]
self.extract(**kwargs)
self.download(**kwargs)
def prepare(self, **kwargs):
pass
# raise NotImplementedError()
def extract(self, **kwargs):
pass
# raise NotImplementedError()
def p_stream(self, stream_id):
if stream_id in self.streams:
stream = self.streams[stream_id]
else:
stream = self.dash_streams[stream_id]
if "itag" in stream:
print(" - itag: %s" % log.sprint(stream_id, log.NEGATIVE))
else:
print(" - format: %s" % log.sprint(stream_id, log.NEGATIVE))
if "container" in stream:
print(" container: %s" % stream["container"])
if "video_profile" in stream:
maybe_print(" video-profile: %s" % stream["video_profile"])
if "quality" in stream:
print(" quality: %s" % stream["quality"])
if (
"size" in stream
and "container" in stream
and stream["container"].lower() != "m3u8"
):
if stream["size"] != float("inf") and stream["size"] != 0:
print(
" size: %s MiB (%s bytes)"
% (round(stream["size"] / 1048576, 1), stream["size"])
)
if "m3u8_url" in stream:
print(" m3u8_url: {}".format(stream["m3u8_url"]))
if "itag" in stream:
print(
" # download-with: %s"
% log.sprint("you-get --itag=%s [URL]" % stream_id, log.UNDERLINE)
)
else:
print(
" # download-with: %s"
% log.sprint("you-get --format=%s [URL]" % stream_id, log.UNDERLINE)
)
print()
def p_i(self, stream_id):
if stream_id in self.streams:
stream = self.streams[stream_id]
else:
stream = self.dash_streams[stream_id]
maybe_print(" - title: %s" % self.title)
print(
" size: %s MiB (%s bytes)"
% (round(stream["size"] / 1048576, 1), stream["size"])
)
print(" url: %s" % self.url)
print()
sys.stdout.flush()
def p(self, stream_id=None):
maybe_print("site: %s" % self.__class__.name)
maybe_print("title: %s" % self.title)
if stream_id:
# Print the stream
print("stream:")
self.p_stream(stream_id)
elif stream_id is None:
# Print stream with best quality
print("stream: # Best quality")
stream_id = (
self.streams_sorted[0]["id"]
if "id" in self.streams_sorted[0]
else self.streams_sorted[0]["itag"]
)
self.p_stream(stream_id)
elif stream_id == []:
print("streams: # Available quality and codecs")
# Print DASH streams
if self.dash_streams:
print(" [ DASH ] %s" % ("_" * 36))
itags = sorted(
self.dash_streams, key=lambda i: -self.dash_streams[i]["size"]
)
for stream in itags:
self.p_stream(stream)
# Print all other available streams
if self.streams_sorted:
print(" [ DEFAULT ] %s" % ("_" * 33))
for stream in self.streams_sorted:
self.p_stream(stream["id"] if "id" in stream else stream["itag"])
if self.audiolang:
print("audio-languages:")
for i in self.audiolang:
print(" - lang: {}".format(i["lang"]))
print(" download-url: {}\n".format(i["url"]))
sys.stdout.flush()
def p_playlist(self, stream_id=None):
maybe_print("site: %s" % self.__class__.name)
print("playlist: %s" % self.title)
print("videos:")
def download(self, **kwargs):
if "json_output" in kwargs and kwargs["json_output"]:
json_output.output(self)
elif "info_only" in kwargs and kwargs["info_only"]:
if "stream_id" in kwargs and kwargs["stream_id"]:
# Display the stream
stream_id = kwargs["stream_id"]
if "index" not in kwargs:
self.p(stream_id)
else:
self.p_i(stream_id)
else:
# Display all available streams
if "index" not in kwargs:
self.p([])
else:
stream_id = (
self.streams_sorted[0]["id"]
if "id" in self.streams_sorted[0]
else self.streams_sorted[0]["itag"]
)
self.p_i(stream_id)
else:
if "stream_id" in kwargs and kwargs["stream_id"]:
# Download the stream
stream_id = kwargs["stream_id"]
else:
# Download stream with the best quality
from .processor.ffmpeg import has_ffmpeg_installed
if (
has_ffmpeg_installed()
and player is None
and self.dash_streams
or not self.streams_sorted
):
# stream_id = list(self.dash_streams)[-1]
itags = sorted(
self.dash_streams, key=lambda i: -self.dash_streams[i]["size"]
)
stream_id = itags[0]
else:
stream_id = (
self.streams_sorted[0]["id"]
if "id" in self.streams_sorted[0]
else self.streams_sorted[0]["itag"]
)
if "index" not in kwargs:
self.p(stream_id)
else:
self.p_i(stream_id)
if stream_id in self.streams:
urls = self.streams[stream_id]["src"]
ext = self.streams[stream_id]["container"]
total_size = self.streams[stream_id]["size"]
else:
urls = self.dash_streams[stream_id]["src"]
ext = self.dash_streams[stream_id]["container"]
total_size = self.dash_streams[stream_id]["size"]
if ext == "m3u8" or ext == "m4a":
ext = "mp4"
if not urls:
log.wtf("[Failed] Cannot extract video source.")
# For legacy main()
headers = {}
if self.ua is not None:
headers["User-Agent"] = self.ua
if self.referer is not None:
headers["Referer"] = self.referer
download_urls(
urls,
self.title,
ext,
total_size,
headers=headers,
output_dir=kwargs["output_dir"],
merge=kwargs["merge"],
av=stream_id in self.dash_streams,
vid=self.vid,
)
if "caption" not in kwargs or not kwargs["caption"]:
print("Skipping captions or danmaku.")
return
for lang in self.caption_tracks:
filename = "%s.%s.srt" % (get_filename(self.title), lang)
print("Saving %s ... " % filename, end="", flush=True)
srt = self.caption_tracks[lang]
with open(
os.path.join(kwargs["output_dir"], filename), "w", encoding="utf-8"
) as x:
x.write(srt)
print("Done.")
if self.danmaku is not None and not dry_run:
filename = "{}.cmt.xml".format(get_filename(self.title))
print("Downloading {} ...\n".format(filename))
with open(
os.path.join(kwargs["output_dir"], filename), "w", encoding="utf8"
) as fp:
fp.write(self.danmaku)
if self.lyrics is not None and not dry_run:
filename = "{}.lrc".format(get_filename(self.title))
print("Downloading {} ...\n".format(filename))
with open(
os.path.join(kwargs["output_dir"], filename), "w", encoding="utf8"
) as fp:
fp.write(self.lyrics)
# For main_dev()
# download_urls(urls, self.title, self.streams[stream_id]['container'], self.streams[stream_id]['size'])
keep_obj = kwargs.get("keep_obj", False)
if not keep_obj:
self.__init__()
|
ocrolib | lstm | # An implementation of LSTM networks, CTC alignment, and related classes.
#
# This code operates on sequences of vectors as inputs, and either outputs
# sequences of vectors, or symbol sequences. Sequences of vectors are
# represented as 2D arrays, with rows representing vectors at different
# time steps.
#
# The code makes liberal use of array programming, including slicing,
# both for speed and for simplicity. All arrays are actual narrays (not matrices),
# so `*` means element-wise multiplication. If you're not familiar with array
# programming style, the numerical code may be hard to follow. If you're familiar with
# MATLAB, here is a side-by-side comparison: http://wiki.scipy.org/NumPy_for_Matlab_Users
#
# Implementations follow the mathematical formulas for forward and backward
# propagation closely; these are not documented in the code, but you can find
# them in the original publications or the slides for the LSTM tutorial
# at http://lstm.iupr.com/
#
# You can find a simple example of how to use this code in this worksheet:
# https://docs.google.com/a/iupr.com/file/d/0B2VUW2Zx_hNoXzJQemFhOXlLN0U
# More complex usage is illustrated by the ocropus-rpred and ocropus-rtrain
# command line programs.
#
# Author: Thomas M. Breuel
# License: Apache 2.0
from __future__ import print_function
import unicodedata
from collections import defaultdict
import common as ocrolib
import matplotlib.pyplot as plt
import numpy as np
import utils
from ocrolib.edist import levenshtein
from ocrolib.exceptions import RecognitionError
from scipy.ndimage import filters, measurements
initial_range = 0.1
class RangeError(Exception):
def __init__(self, s=None):
Exception.__init__(self, s)
def prepare_line(line, pad=16):
"""Prepare a line for recognition; this inverts it, transposes
it, and pads it."""
line = line * 1.0 / np.amax(line)
line = np.amax(line) - line
line = line.T
if pad > 0:
w = line.shape[1]
line = np.vstack([np.zeros((pad, w)), line, np.zeros((pad, w))])
return line
def randu(*shape):
"""Generate uniformly random values in the range (-1,1).
This can usually be used as a drop-in replacement for `randn`
resulting in a different distribution for weight initializations.
Empirically, the choice of randu/randn can make a difference
for neural network initialization."""
return 2 * np.random.rand(*shape) - 1
def sigmoid(x):
"""Compute the sigmoid function.
We don't bother with clipping the input value because IEEE floating
point behaves reasonably with this function even for infinities."""
return 1.0 / (1.0 + np.exp(-x))
def rownorm(a):
"""Compute a vector consisting of the Euclidean norm of the
rows of the 2D array."""
return np.sum(np.array(a) ** 2, axis=1) ** 0.5
def check_nan(*args, **kw):
"Check whether there are any NaNs in the argument arrays."
for arg in args:
if np.isnan(arg).any():
raise FloatingPointError()
def sumouter(us, vs, lo=-1.0, hi=1.0, out=None):
"""Sum the outer products of the `us` and `vs`.
Values are clipped into the range `[lo,hi]`.
This is mainly used for computing weight updates
in logistic regression layers."""
result = out or np.zeros((len(us[0]), len(vs[0])))
for u, v in zip(us, vs):
result += np.outer(np.clip(u, lo, hi), v)
return result
class Network:
"""General interface for networks. This mainly adds convenience
functions for `predict` and `train`.
For the purposes of this library, all inputs and outputs are
in the form of (temporal) sequences of vectors. Sequences of
vectors are represented as 2D arrays, with each row representing
a vector at the time step given by the row index. Both activations
and deltas are propagated that way.
Common implementations of this are the `MLP`, `Logreg`, `Softmax`,
and `LSTM` networks. These implementations do most of the numerical
computation and learning.
Networks are designed such that they can be abstracted; that is,
you can create a network class that implements forward/backward
methods but internally implements through calls to component networks.
The `Stacked`, `Reversed`, and `Parallel` classes below take advantage
of that.
"""
def predict(self, xs):
"""Prediction is the same as forward propagation."""
return self.forward(xs)
def train(self, xs, ys, debug=0):
"""Training performs forward propagation, computes the output deltas
as the difference between the predicted and desired values,
and then propagates those deltas backwards."""
xs = np.array(xs)
ys = np.array(ys)
pred = np.array(self.forward(xs))
deltas = ys - pred
self.backward(deltas)
self.update()
return pred
def walk(self):
yield self
def preSave(self):
pass
def postLoad(self):
pass
def ctrain(self, xs, cs, debug=0, lo=1e-5, accelerated=1):
"""Training for classification. This handles
the special case of just two classes. It also
can use regular least square error training or
accelerated training using 1/pred as the error signal."""
assert len(cs.shape) == 1
assert (cs == np.array(cs, "i")).all()
xs = np.array(xs)
pred = np.array(self.forward(xs))
deltas = np.zeros(pred.shape)
assert len(deltas) == len(cs)
# NB: these deltas are such that they can be used
# directly to update the gradient; some other libraries
# use the negative value.
if accelerated:
# ATTENTION: These deltas use an "accelerated" error signal.
if deltas.shape[1] == 1:
# Binary class case uses just one output variable.
for i, c in enumerate(cs):
if c == 0:
deltas[i, 0] = -1.0 / max(lo, 1.0 - pred[i, 0])
else:
deltas[i, 0] = 1.0 / max(lo, pred[i, 0])
else:
# For the multi-class case, we use all output variables.
deltas[:, :] = -pred[:, :]
for i, c in enumerate(cs):
deltas[i, c] = 1.0 / max(lo, pred[i, c])
else:
# These are the deltas from least-square error
# updates. They are slower than `accelerated`,
# but may give more accurate probability estimates.
if deltas.shape[1] == 1:
# Binary class case uses just one output variable.
for i, c in enumerate(cs):
if c == 0:
deltas[i, 0] = -pred[i, 0]
else:
deltas[i, 0] = 1.0 - pred[i, 0]
else:
# For the multi-class case, we use all output variables.
deltas[:, :] = -pred[:, :]
for i, c in enumerate(cs):
deltas[i, c] = 1.0 - pred[i, c]
self.backward(deltas)
self.update()
return pred
def setLearningRate(self, r, momentum=0.9):
"""Set the learning rate and momentum for weight updates."""
self.learning_rate = r
self.momentum = momentum
def weights(self):
"""Return an iterator that iterates over (W,DW,name) triples
representing the weight matrix, the computed deltas, and the names
of all the components of this network. This needs to be implemented
in subclasses. The objects returned by the iterator must not be copies,
since they are updated in place by the `update` method."""
pass
def allweights(self):
"""Return all weights as a single vector. This is mainly a convenience
function for plotting."""
aw = list(self.weights())
weights, derivs, names = zip(*aw)
weights = [w.ravel() for w in weights]
derivs = [d.ravel() for d in derivs]
return np.concatenate(weights), np.concatenate(derivs)
def update(self):
"""Update the weights using the deltas computed in the last forward/backward pass.
Subclasses need not implement this, they should implement the `weights` method.
"""
if not hasattr(self, "verbose"):
self.verbose = 0
if not hasattr(self, "deltas") or self.deltas is None:
self.deltas = [np.zeros(dw.shape) for w, dw, n in self.weights()]
for ds, (w, dw, n) in zip(self.deltas, self.weights()):
ds.ravel()[:] = (
self.momentum * ds.ravel()[:] + self.learning_rate * dw.ravel()[:]
)
w.ravel()[:] += ds.ravel()[:]
if self.verbose:
print(n, (np.amin(w), np.amax(w)), (np.amin(dw), np.amax(dw)))
''' The following are subclass responsibility:
def forward(self,xs):
"""Propagate activations forward through the network.
This needs to be implemented in subclasses.
It updates the internal state of the object for an (optional)
subsequent call to `backward`.
"""
pass
def backward(self,deltas):
"""Propagate error signals backward through the network.
This needs to be implemented in subclasses.
It assumes that activations for the input have previously
been computed by a call to `forward`.
It should not perform weight updates (that is handled by
the `update` method)."""
pass
'''
class Logreg(Network):
"""A logistic regression layer, a straightforward implementation
of the logistic regression equations. Uses 1-augmented vectors."""
def __init__(self, Nh, No, initial_range=initial_range, rand=np.random.rand):
self.Nh = Nh
self.No = No
self.W2 = randu(No, Nh + 1) * initial_range
self.DW2 = np.zeros((No, Nh + 1))
def ninputs(self):
return self.Nh
def noutputs(self):
return self.No
def forward(self, ys):
n = len(ys)
inputs, zs = [None] * n, [None] * n
for i in range(n):
inputs[i] = np.concatenate([np.ones(1), ys[i]])
zs[i] = sigmoid(np.dot(self.W2, inputs[i]))
self.state = (inputs, zs)
return zs
def backward(self, deltas):
inputs, zs = self.state
n = len(zs)
assert len(deltas) == len(inputs)
dzspre, dys = [None] * n, [None] * n
for i in reversed(range(len(zs))):
dzspre[i] = deltas[i] * zs[i] * (1 - zs[i])
dys[i] = np.dot(dzspre[i], self.W2)[1:]
self.dzspre = dzspre
self.DW2 = sumouter(dzspre, inputs)
return dys
def info(self):
vars = sorted("W2".split())
for v in vars:
a = np.array(getattr(self, v))
print(v, a.shape, np.amin(a), np.amax(a))
def weights(self):
yield self.W2, self.DW2, "Logreg"
class Softmax(Network):
"""A softmax layer, a straightforward implementation
of the softmax equations. Uses 1-augmented vectors."""
def __init__(self, Nh, No, initial_range=initial_range, rand=np.random.rand):
self.Nh = Nh
self.No = No
self.W2 = randu(No, Nh + 1) * initial_range
self.DW2 = np.zeros((No, Nh + 1))
def ninputs(self):
return self.Nh
def noutputs(self):
return self.No
def forward(self, ys):
"""Forward propagate activations. This updates the internal
state for a subsequent call to `backward` and returns the output
activations."""
n = len(ys)
inputs, zs = [None] * n, [None] * n
for i in range(n):
inputs[i] = np.concatenate([np.ones(1), ys[i]])
temp = np.dot(self.W2, inputs[i])
temp = np.exp(np.clip(temp, -100, 100))
temp /= np.sum(temp)
zs[i] = temp
self.state = (inputs, zs)
return zs
def backward(self, deltas):
inputs, zs = self.state
n = len(zs)
assert len(deltas) == len(inputs)
dzspre, dys = [None] * n, [None] * n
for i in reversed(range(len(zs))):
dzspre[i] = deltas[i]
dys[i] = np.dot(dzspre[i], self.W2)[1:]
self.DW2 = sumouter(dzspre, inputs)
return dys
def info(self):
vars = sorted("W2".split())
for v in vars:
a = np.array(getattr(self, v))
print(v, a.shape, np.amin(a), np.amax(a))
def weights(self):
yield self.W2, self.DW2, "Softmax"
class MLP(Network):
"""A multilayer perceptron (direct implementation). Effectively,
two `Logreg` layers stacked on top of each other, or a simple direct
implementation of the MLP equations. This is mainly used for testing."""
def __init__(self, Ni, Nh, No, initial_range=initial_range, rand=randu):
self.Ni = Ni
self.Nh = Nh
self.No = No
self.W1 = np.random.rand(Nh, Ni + 1) * initial_range
self.W2 = np.random.rand(No, Nh + 1) * initial_range
def ninputs(self):
return self.Ni
def noutputs(self):
return self.No
def forward(self, xs):
n = len(xs)
inputs, ys, zs = [None] * n, [None] * n, [None] * n
for i in range(n):
inputs[i] = np.concatenate([np.ones(1), xs[i]])
ys[i] = sigmoid(np.dot(self.W1, inputs[i]))
ys[i] = np.concatenate([np.ones(1), ys[i]])
zs[i] = sigmoid(np.dot(self.W2, ys[i]))
self.state = (inputs, ys, zs)
return zs
def backward(self, deltas):
xs, ys, zs = self.state
n = len(xs)
dxs, dyspre, dzspre, dys = [None] * n, [None] * n, [None] * n, [None] * n
for i in reversed(range(len(zs))):
dzspre[i] = deltas[i] * zs[i] * (1 - zs[i])
dys[i] = np.dot(dzspre[i], self.W2)[1:]
dyspre[i] = dys[i] * (ys[i] * (1 - ys[i]))[1:]
dxs[i] = np.dot(dyspre[i], self.W1)[1:]
self.DW2 = sumouter(dzspre, ys)
self.DW1 = sumouter(dyspre, xs)
return dxs
def weights(self):
yield self.W1, self.DW1, "MLP1"
yield self.W2, self.DW2, "MLP2"
# These are the nonlinearities used by the LSTM network.
# We don't bother parameterizing them here
def ffunc(x):
"Nonlinearity used for gates."
# cliping to avoid overflows
return 1.0 / (1.0 + np.exp(np.clip(-x, -20, 20)))
def fprime(x, y=None):
"Derivative of nonlinearity used for gates."
if y is None:
y = sigmoid(x)
return y * (1.0 - y)
def gfunc(x):
"Nonlinearity used for input to state."
return np.tanh(x)
def gprime(x, y=None):
"Derivative of nonlinearity used for input to state."
if y is None:
y = np.tanh(x)
return 1 - y**2
# ATTENTION: try linear for hfunc
def hfunc(x):
"Nonlinearity used for output."
return np.tanh(x)
def hprime(x, y=None):
"Derivative of nonlinearity used for output."
if y is None:
y = np.tanh(x)
return 1 - y**2
# These two routines have been factored out of the class in order to
# make their conversion to native code easy; these are the "inner loops"
# of the LSTM algorithm.
# Both functions are a straightforward implementation of the
# LSTM equations. It is possible to abstract this further and
# represent gates and memory cells as individual data structures.
# However, that is several times slower and the extra abstraction
# isn't actually all that useful.
def forward_py(
n,
N,
ni,
ns,
na,
xs,
source,
gix,
gfx,
gox,
cix,
gi,
gf,
go,
ci,
state,
output,
WGI,
WGF,
WGO,
WCI,
WIP,
WFP,
WOP,
):
"""Perform forward propagation of activations for a simple LSTM layer."""
for t in range(n):
prev = np.zeros(ns) if t == 0 else output[t - 1]
source[t, 0] = 1
source[t, 1 : 1 + ni] = xs[t]
source[t, 1 + ni :] = prev
np.dot(WGI, source[t], out=gix[t])
np.dot(WGF, source[t], out=gfx[t])
np.dot(WGO, source[t], out=gox[t])
np.dot(WCI, source[t], out=cix[t])
if t > 0:
gix[t] += WIP * state[t - 1]
gfx[t] += WFP * state[t - 1]
gi[t] = ffunc(gix[t])
gf[t] = ffunc(gfx[t])
ci[t] = gfunc(cix[t])
state[t] = ci[t] * gi[t]
if t > 0:
state[t] += gf[t] * state[t - 1]
gox[t] += WOP * state[t]
go[t] = ffunc(gox[t])
output[t] = hfunc(state[t]) * go[t]
assert not np.isnan(output[:n]).any()
def backward_py(
n,
N,
ni,
ns,
na,
deltas,
source,
gix,
gfx,
gox,
cix,
gi,
gf,
go,
ci,
state,
output,
WGI,
WGF,
WGO,
WCI,
WIP,
WFP,
WOP,
sourceerr,
gierr,
gferr,
goerr,
cierr,
stateerr,
outerr,
DWGI,
DWGF,
DWGO,
DWCI,
DWIP,
DWFP,
DWOP,
):
"""Perform backward propagation of deltas for a simple LSTM layer."""
for t in reversed(range(n)):
outerr[t] = deltas[t]
if t < n - 1:
outerr[t] += sourceerr[t + 1][-ns:]
goerr[t] = fprime(None, go[t]) * hfunc(state[t]) * outerr[t]
stateerr[t] = hprime(state[t]) * go[t] * outerr[t]
stateerr[t] += goerr[t] * WOP
if t < n - 1:
stateerr[t] += gferr[t + 1] * WFP
stateerr[t] += gierr[t + 1] * WIP
stateerr[t] += stateerr[t + 1] * gf[t + 1]
if t > 0:
gferr[t] = fprime(None, gf[t]) * stateerr[t] * state[t - 1]
gierr[t] = fprime(None, gi[t]) * stateerr[t] * ci[t] # gfunc(cix[t])
cierr[t] = gprime(None, ci[t]) * stateerr[t] * gi[t]
np.dot(gierr[t], WGI, out=sourceerr[t])
if t > 0:
sourceerr[t] += np.dot(gferr[t], WGF)
sourceerr[t] += np.dot(goerr[t], WGO)
sourceerr[t] += np.dot(cierr[t], WCI)
DWIP = utils.sumprod(gierr[1:n], state[: n - 1], out=DWIP)
DWFP = utils.sumprod(gferr[1:n], state[: n - 1], out=DWFP)
DWOP = utils.sumprod(goerr[:n], state[:n], out=DWOP)
DWGI = utils.sumouter(gierr[:n], source[:n], out=DWGI)
DWGF = utils.sumouter(gferr[1:n], source[1:n], out=DWGF)
DWGO = utils.sumouter(goerr[:n], source[:n], out=DWGO)
DWCI = utils.sumouter(cierr[:n], source[:n], out=DWCI)
class LSTM(Network):
"""A standard LSTM network. This is a direct implementation of all the forward
and backward propagation formulas, mainly for speed. (There is another, more
abstract implementation as well, but that's significantly slower in Python
due to function call overhead.)"""
def __init__(self, ni, ns, initial=initial_range, maxlen=5000):
na = 1 + ni + ns
self.dims = ni, ns, na
self.init_weights(initial)
self.allocate(maxlen)
def ninputs(self):
return self.dims[0]
def noutputs(self):
return self.dims[1]
def states(self):
"""Return the internal state array for the last forward
propagation. This is mostly used for visualizations."""
return np.array(self.state[: self.last_n])
def init_weights(self, initial):
"Initialize the weight matrices and derivatives"
ni, ns, na = self.dims
# gate weights
for w in "WGI WGF WGO WCI".split():
setattr(self, w, randu(ns, na) * initial)
setattr(self, "D" + w, np.zeros((ns, na)))
# peep weights
for w in "WIP WFP WOP".split():
setattr(self, w, randu(ns) * initial)
setattr(self, "D" + w, np.zeros(ns))
def weights(self):
"Yields all the weight and derivative matrices"
weights = "WGI WGF WGO WCI WIP WFP WOP"
for w in weights.split():
yield (getattr(self, w), getattr(self, "D" + w), w)
def info(self):
"Print info about the internal state"
vars = "WGI WGF WGO WIP WFP WOP cix ci gix gi gox go gfx gf"
vars += " source state output gierr gferr goerr cierr stateerr"
vars = vars.split()
vars = sorted(vars)
for v in vars:
a = np.array(getattr(self, v))
print(v, a.shape, np.amin(a), np.amax(a))
def preSave(self):
self.max_n = max(500, len(self.ci))
self.allocate(1)
def postLoad(self):
self.allocate(getattr(self, "max_n", 5000))
def allocate(self, n):
"""Allocate space for the internal state variables.
`n` is the maximum sequence length that can be processed."""
ni, ns, na = self.dims
vars = "cix ci gix gi gox go gfx gf"
vars += " state output gierr gferr goerr cierr stateerr outerr"
for v in vars.split():
setattr(self, v, np.nan * np.ones((n, ns)))
self.source = np.nan * np.ones((n, na))
self.sourceerr = np.nan * np.ones((n, na))
def reset(self, n):
"""Reset the contents of the internal state variables to `nan`"""
vars = "cix ci gix gi gox go gfx gf"
vars += " state output gierr gferr goerr cierr stateerr outerr"
vars += " source sourceerr"
for v in vars.split():
getattr(self, v)[:, :] = np.nan
def forward(self, xs):
"""Perform forward propagation of activations and update the
internal state for a subsequent call to `backward`.
Since this performs sequence classification, `xs` is a 2D
array, with rows representing input vectors at each time step.
Returns a 2D array whose rows represent output vectors for
each input vector."""
ni, ns, na = self.dims
assert len(xs[0]) == ni
n = len(xs)
self.last_n = n
N = len(self.gi)
if n > N:
raise RecognitionError("input too large for LSTM model")
self.reset(n)
forward_py(
n,
N,
ni,
ns,
na,
xs,
self.source,
self.gix,
self.gfx,
self.gox,
self.cix,
self.gi,
self.gf,
self.go,
self.ci,
self.state,
self.output,
self.WGI,
self.WGF,
self.WGO,
self.WCI,
self.WIP,
self.WFP,
self.WOP,
)
assert not np.isnan(self.output[:n]).any()
return self.output[:n]
def backward(self, deltas):
"""Perform backward propagation of deltas. Must be called after `forward`.
Does not perform weight updating (for that, use the generic `update` method).
Returns the `deltas` for the input vectors."""
ni, ns, na = self.dims
n = len(deltas)
self.last_n = n
N = len(self.gi)
if n > N:
raise ocrolib.RecognitionError("input too large for LSTM model")
backward_py(
n,
N,
ni,
ns,
na,
deltas,
self.source,
self.gix,
self.gfx,
self.gox,
self.cix,
self.gi,
self.gf,
self.go,
self.ci,
self.state,
self.output,
self.WGI,
self.WGF,
self.WGO,
self.WCI,
self.WIP,
self.WFP,
self.WOP,
self.sourceerr,
self.gierr,
self.gferr,
self.goerr,
self.cierr,
self.stateerr,
self.outerr,
self.DWGI,
self.DWGF,
self.DWGO,
self.DWCI,
self.DWIP,
self.DWFP,
self.DWOP,
)
return [s[1 : 1 + ni] for s in self.sourceerr[:n]]
################################################################
# combination classifiers
################################################################
class Stacked(Network):
"""Stack two networks on top of each other."""
def __init__(self, nets):
self.nets = nets
self.dstats = defaultdict(list)
def walk(self):
yield self
for sub in self.nets:
for x in sub.walk():
yield x
def ninputs(self):
return self.nets[0].ninputs()
def noutputs(self):
return self.nets[-1].noutputs()
def forward(self, xs):
for i, net in enumerate(self.nets):
xs = net.forward(xs)
return xs
def backward(self, deltas):
self.ldeltas = [deltas]
for i, net in reversed(list(enumerate(self.nets))):
if deltas is not None:
self.dstats[i].append(
(np.amin(deltas), np.mean(deltas), np.amax(deltas))
)
deltas = net.backward(deltas)
self.ldeltas.append(deltas)
self.ldeltas = self.ldeltas[::-1]
return deltas
def lastdeltas(self):
return self.ldeltas[-1]
def info(self):
for net in self.nets:
net.info()
def states(self):
return self.nets[0].states()
def weights(self):
for i, net in enumerate(self.nets):
for w, dw, n in net.weights():
yield w, dw, "Stacked%d/%s" % (i, n)
class Reversed(Network):
"""Run a network on the time-reversed input."""
def __init__(self, net):
self.net = net
def walk(self):
yield self
for x in self.net.walk():
yield x
def ninputs(self):
return self.net.ninputs()
def noutputs(self):
return self.net.noutputs()
def forward(self, xs):
return self.net.forward(xs[::-1])[::-1]
def backward(self, deltas):
result = self.net.backward(deltas[::-1])
return result[::-1] if result is not None else None
def info(self):
self.net.info()
def states(self):
return self.net.states()[::-1]
def weights(self):
for w, dw, n in self.net.weights():
yield w, dw, "Reversed/%s" % n
class Parallel(Network):
"""Run multiple networks in parallel on the same input."""
def __init__(self, *nets):
self.nets = nets
def walk(self):
yield self
for sub in self.nets:
for x in sub.walk():
yield x
def forward(self, xs):
outputs = [net.forward(xs) for net in self.nets]
outputs = zip(*outputs)
outputs = [np.concatenate(l) for l in outputs]
return outputs
def backward(self, deltas):
deltas = np.array(deltas)
start = 0
for i, net in enumerate(self.nets):
k = net.noutputs()
net.backward(deltas[:, start : start + k])
start += k
return None
def info(self):
for net in self.nets:
net.info()
def states(self):
# states = [net.states() for net in self.nets] # FIXME
outputs = zip(*outputs)
outputs = [np.concatenate(l) for l in outputs]
return outputs
def weights(self):
for i, net in enumerate(self.nets):
for w, dw, n in net.weights():
yield w, dw, "Parallel%d/%s" % (i, n)
def MLP1(Ni, Ns, No):
"""An MLP implementation by stacking two `Logreg` networks on top
of each other."""
lr1 = Logreg(Ni, Ns)
lr2 = Logreg(Ns, No)
stacked = Stacked([lr1, lr2])
return stacked
def LSTM1(Ni, Ns, No):
"""An LSTM layer with a `Logreg` layer for the output."""
lstm = LSTM(Ni, Ns)
if No == 1:
logreg = Logreg(Ns, No)
else:
logreg = Softmax(Ns, No)
stacked = Stacked([lstm, logreg])
return stacked
def BIDILSTM(Ni, Ns, No):
"""A bidirectional LSTM, constructed from regular and reversed LSTMs."""
lstm1 = LSTM(Ni, Ns)
lstm2 = Reversed(LSTM(Ni, Ns))
bidi = Parallel(lstm1, lstm2)
assert No > 1
# logreg = Logreg(2*Ns,No)
logreg = Softmax(2 * Ns, No)
stacked = Stacked([bidi, logreg])
return stacked
################################################################
# LSTM classification with forward/backward alignment ("CTC")
################################################################
def make_target(cs, nc):
"""Given a list of target classes `cs` and a total
maximum number of classes, compute an array that has
a `1` in each column and time step corresponding to the
target class."""
result = np.zeros((2 * len(cs) + 1, nc))
for i, j in enumerate(cs):
result[2 * i, 0] = 1.0
result[2 * i + 1, j] = 1.0
result[-1, 0] = 1.0
return result
def translate_back0(outputs, threshold=0.25):
"""Simple code for translating output from a classifier
back into a list of classes. TODO/ATTENTION: this can
probably be improved."""
ms = np.amax(outputs, axis=1)
cs = np.argmax(outputs, axis=1)
cs[ms < threshold * np.amax(outputs)] = 0
result = []
for i in range(1, len(cs)):
if cs[i] != cs[i - 1]:
if cs[i] != 0:
result.append(cs[i])
return result
def translate_back(outputs, threshold=0.7, pos=0):
"""Translate back. Thresholds on class 0, then assigns the maximum class to
each region. ``pos`` determines the depth of character information returned:
* `pos=0`: Return list of recognized characters
* `pos=1`: Return list of position-character tuples
* `pos=2`: Return list of character-probability tuples
"""
labels, n = measurements.label(outputs[:, 0] < threshold)
mask = np.tile(labels.reshape(-1, 1), (1, outputs.shape[1]))
maxima = measurements.maximum_position(
outputs, mask, np.arange(1, np.amax(mask) + 1)
)
if pos == 1:
return maxima # include character position
if pos == 2:
return [
(c, outputs[r, c]) for (r, c) in maxima
] # include character probabilities
return [c for (r, c) in maxima] # only recognized characters
def log_mul(x, y):
"Perform multiplication in the log domain (i.e., addition)."
return x + y
def log_add(x, y):
"Perform addition in the log domain."
# return np.where(np.abs(x-y)>10,np.maximum(x,y),np.log(np.exp(x-y)+1)+y)
return np.where(
np.abs(x - y) > 10,
np.maximum(x, y),
np.log(np.exp(np.clip(x - y, -20, 20)) + 1) + y,
)
def forward_algorithm(match, skip=-5.0):
"""Apply the forward algorithm to an array of log state
correspondence probabilities."""
v = skip * np.arange(len(match[0]))
result = []
# This is a fairly straightforward dynamic programming problem and
# implemented in close analogy to the edit distance:
# we either stay in the same state at no extra cost or make a diagonal
# step (transition into new state) at no extra cost; the only costs come
# from how well the symbols match the network output.
for i in range(0, len(match)):
w = np.roll(v, 1).copy()
# extra cost for skipping initial symbols
w[0] = skip * i
# total cost is match cost of staying in same state
# plus match cost of making a transition into the next state
v = log_add(log_mul(v, match[i]), log_mul(w, match[i]))
result.append(v)
return np.array(result, "f")
def forwardbackward(lmatch):
"""Apply the forward-backward algorithm to an array of log state
correspondence probabilities."""
lr = forward_algorithm(lmatch)
# backward is just forward applied to the reversed sequence
rl = forward_algorithm(lmatch[::-1, ::-1])[::-1, ::-1]
both = lr + rl
return both
def ctc_align_targets(outputs, targets, threshold=100.0, verbose=0, debug=0, lo=1e-5):
"""Perform alignment between the `outputs` of a neural network
classifier and some targets. The targets themselves are a time sequence
of vectors, usually a unary representation of each target class (but
possibly sequences of arbitrary posterior probability distributions
represented as vectors)."""
outputs = np.maximum(lo, outputs)
outputs = outputs * 1.0 / np.sum(outputs, axis=1)[:, np.newaxis]
# first, we compute the match between the outputs and the targets
# and put the result in the log domain
match = np.dot(outputs, targets.T)
lmatch = np.log(match)
if debug:
plt.figure("ctcalign")
plt.clf()
plt.subplot(411)
plt.imshow(outputs.T, interpolation="nearest", cmap=plt.cm.hot)
plt.subplot(412)
plt.imshow(lmatch.T, interpolation="nearest", cmap=plt.cm.hot)
assert not np.isnan(lmatch).any()
# Now, we compute a forward-backward algorithm over the matches between
# the input and the output states.
both = forwardbackward(lmatch)
# We need posterior probabilities for the states, so we need to normalize
# the output. Instead of keeping track of the normalization
# factors, we just normalize the posterior distribution directly.
epath = np.exp(both - np.amax(both))
l = np.sum(epath, axis=0)[np.newaxis, :]
epath /= np.where(l == 0.0, 1e-9, l)
# The previous computation gives us an alignment between input time
# and output sequence position as posteriors over states.
# However, we actually want the posterior probability distribution over
# output classes at each time step. This dot product gives
# us that result. We renormalize again afterwards.
aligned = np.maximum(lo, np.dot(epath, targets))
l = np.sum(aligned, axis=1)[:, np.newaxis]
aligned /= np.where(l == 0.0, 1e-9, l)
if debug:
plt.subplot(413)
plt.imshow(epath.T, cmap=plt.cm.hot, interpolation="nearest")
plt.subplot(414)
plt.imshow(aligned.T, cmap=plt.cm.hot, interpolation="nearest")
plt.ginput(1, 0.01)
return aligned
def normalize_nfkc(s):
return unicodedata.normalize("NFKC", s)
def add_training_info(network):
return network
class SeqRecognizer:
"""Perform sequence recognition using BIDILSTM and alignment."""
def __init__(
self, ninput, nstates, noutput=-1, codec=None, normalize=normalize_nfkc
):
self.Ni = ninput
if codec:
noutput = codec.size()
assert noutput > 0
self.No = noutput
self.lstm = BIDILSTM(ninput, nstates, noutput)
self.setLearningRate(1e-4)
self.debug_align = 0
self.normalize = normalize
self.codec = codec
self.clear_log()
def walk(self):
for x in self.lstm.walk():
yield x
def clear_log(self):
self.command_log = []
self.error_log = []
self.cerror_log = []
self.key_log = []
def __setstate__(self, state):
self.__dict__.update(state)
self.upgrade()
def upgrade(self):
if "last_trial" not in dir(self):
self.last_trial = 0
if "command_log" not in dir(self):
self.command_log = []
if "error_log" not in dir(self):
self.error_log = []
if "cerror_log" not in dir(self):
self.cerror_log = []
if "key_log" not in dir(self):
self.key_log = []
def info(self):
self.net.info()
def setLearningRate(self, r, momentum=0.9):
self.lstm.setLearningRate(r, momentum)
def predictSequence(self, xs):
"Predict an integer sequence of codes."
assert xs.shape[1] == self.Ni, (
"wrong image height (image: %d, expected: %d)" % (xs.shape[1], self.Ni)
)
self.outputs = np.array(self.lstm.forward(xs))
return translate_back(self.outputs)
def trainSequence(self, xs, cs, update=1, key=None):
"Train with an integer sequence of codes."
assert xs.shape[1] == self.Ni, "wrong image height"
# forward step
self.outputs = np.array(self.lstm.forward(xs))
# CTC alignment
self.targets = np.array(make_target(cs, self.No))
self.aligned = np.array(
ctc_align_targets(self.outputs, self.targets, debug=self.debug_align)
)
# propagate the deltas back
deltas = self.aligned - self.outputs
self.lstm.backward(deltas)
if update:
self.lstm.update()
# translate back into a sequence
result = translate_back(self.outputs)
# compute least square error
self.error = np.sum(deltas**2)
self.error_log.append(self.error**0.5 / len(cs))
# compute class error
self.cerror = levenshtein(cs, result)
self.cerror_log.append((self.cerror, len(cs)))
# training keys
self.key_log.append(key)
return result
# we keep track of errors within the object; this even gets
# saved to give us some idea of the training history
def errors(self, range=10000, smooth=0):
result = self.error_log[-range:]
if smooth > 0:
result = filters.gaussian_filter(result, smooth, mode="mirror")
return result
def cerrors(self, range=10000, smooth=0):
result = [e * 1.0 / max(1, n) for e, n in self.cerror_log[-range:]]
if smooth > 0:
result = filters.gaussian_filter(result, smooth, mode="mirror")
return result
def s2l(self, s):
"Convert a unicode sequence into a code sequence for training."
s = self.normalize(s)
s = [c for c in s]
return self.codec.encode(s)
def l2s(self, l):
"Convert a code sequence into a unicode string after recognition."
l = self.codec.decode(l)
return "".join(l)
def trainString(self, xs, s, update=1):
"Perform training with a string. This uses the codec and normalizer."
return self.trainSequence(xs, self.s2l(s), update=update)
def predictString(self, xs):
"Predict output as a string. This uses codec and normalizer."
cs = self.predictSequence(xs)
return self.l2s(cs)
class Codec:
"""Translate between integer codes and characters."""
def init(self, charset):
charset = sorted(list(set(charset)))
self.code2char = {}
self.char2code = {}
for code, char in enumerate(charset):
self.code2char[code] = char
self.char2code[char] = code
return self
def size(self):
"""The total number of codes (use this for the number of output
classes when training a classifier."""
return len(list(self.code2char.keys()))
def encode(self, s):
"Encode the string `s` into a code sequence."
# tab = self.char2code
dflt = self.char2code["~"]
return [self.char2code.get(c, dflt) for c in s]
def decode(self, l):
"Decode a code sequence into a string."
s = [self.code2char.get(c, "~") for c in l]
return s
ascii_labels = ["", " ", "~"] + [unichr(x) for x in range(33, 126)]
def ascii_codec():
"Create a codec containing just ASCII characters."
return Codec().init(ascii_labels)
def ocropus_codec():
"""Create a codec containing ASCII characters plus the default
character set from ocrolib."""
import ocrolib
base = [c for c in ascii_labels]
base_set = set(base)
extra = [c for c in ocrolib.chars.default if c not in base_set]
return Codec().init(base + extra)
def getstates_for_display(net):
"""Get internal states of an LSTM network for making nice state
plots. This only works on a few types of LSTM."""
if isinstance(net, LSTM):
return net.state[: net.last_n]
if isinstance(net, Stacked) and isinstance(net.nets[0], LSTM):
return net.nets[0].state[: net.nets[0].last_n]
return None
|
implant | remove | import eos.db
import gui.mainFrame
import wx
from gui import globalEvents as GE
from gui.fitCommands.calc.implant.remove import CalcRemoveImplantCommand
from gui.fitCommands.helpers import InternalCommandHistory
from service.fit import Fit
from service.market import Market
class GuiRemoveImplantsCommand(wx.Command):
def __init__(self, fitID, positions):
wx.Command.__init__(self, True, "Remove Implants")
self.internalHistory = InternalCommandHistory()
self.fitID = fitID
self.positions = positions
def Do(self):
sMkt = Market.getInstance()
results = []
for position in sorted(self.positions, reverse=True):
cmd = CalcRemoveImplantCommand(fitID=self.fitID, position=position)
results.append(self.internalHistory.submit(cmd))
sMkt.storeRecentlyUsed(cmd.savedImplantInfo.itemID)
success = any(results)
eos.db.flush()
sFit = Fit.getInstance()
sFit.recalc(self.fitID)
sFit.fill(self.fitID)
eos.db.commit()
wx.PostEvent(
gui.mainFrame.MainFrame.getInstance(), GE.FitChanged(fitIDs=(self.fitID,))
)
return success
def Undo(self):
success = self.internalHistory.undoAll()
eos.db.flush()
sFit = Fit.getInstance()
sFit.recalc(self.fitID)
sFit.fill(self.fitID)
eos.db.commit()
wx.PostEvent(
gui.mainFrame.MainFrame.getInstance(), GE.FitChanged(fitIDs=(self.fitID,))
)
return success
|
src | multiqueue | """
A queue with multiple internal subqueues.
Elements are added into a random subqueue, and retrieval rotates
"""
from collections import deque
from six.moves import queue
try:
import helper_random
except ImportError:
from . import helper_random
class MultiQueue(queue.Queue):
"""A base queue class"""
# pylint: disable=redefined-builtin,attribute-defined-outside-init
defaultQueueCount = 10
def __init__(self, maxsize=0, count=0):
if not count:
self.queueCount = MultiQueue.defaultQueueCount
else:
self.queueCount = count
queue.Queue.__init__(self, maxsize)
# Initialize the queue representation
def _init(self, maxsize):
self.iter = 0
self.queues = []
for _ in range(self.queueCount):
self.queues.append(deque())
def _qsize(self, len=len):
return len(self.queues[self.iter])
# Put a new item in the queue
def _put(self, item):
# self.queue.append(item)
self.queues[helper_random.randomrandrange(self.queueCount)].append((item))
# Get an item from the queue
def _get(self):
return self.queues[self.iter].popleft()
def iterate(self):
"""Increment the iteration counter"""
self.iter = (self.iter + 1) % self.queueCount
def totalSize(self):
"""Return the total number of items in all the queues"""
return sum(len(x) for x in self.queues)
|
extractor | indavideo | # coding: utf-8
from __future__ import unicode_literals
import re
from ..compat import compat_str
from ..utils import int_or_none, parse_age_limit, parse_iso8601, update_url_query
from .common import InfoExtractor
class IndavideoEmbedIE(InfoExtractor):
_VALID_URL = r"https?://(?:(?:embed\.)?indavideo\.hu/player/video/|assets\.indavideo\.hu/swf/player\.swf\?.*\b(?:v(?:ID|id))=)(?P<id>[\da-f]+)"
_TESTS = [
{
"url": "http://indavideo.hu/player/video/1bdc3c6d80/",
"md5": "c8a507a1c7410685f83a06eaeeaafeab",
"info_dict": {
"id": "1837039",
"ext": "mp4",
"title": "Cicatánc",
"description": "",
"thumbnail": r"re:^https?://.*\.jpg$",
"uploader": "cukiajanlo",
"uploader_id": "83729",
"timestamp": 1439193826,
"upload_date": "20150810",
"duration": 72,
"age_limit": 0,
"tags": ["tánc", "cica", "cuki", "cukiajanlo", "newsroom"],
},
},
{
"url": "http://embed.indavideo.hu/player/video/1bdc3c6d80?autostart=1&hide=1",
"only_matching": True,
},
{
"url": "http://assets.indavideo.hu/swf/player.swf?v=fe25e500&vID=1bdc3c6d80&autostart=1&hide=1&i=1",
"only_matching": True,
},
]
# Some example URLs covered by generic extractor:
# http://indavideo.hu/video/Vicces_cica_1
# http://index.indavideo.hu/video/2015_0728_beregszasz
# http://auto.indavideo.hu/video/Sajat_utanfutoban_a_kis_tacsko
# http://erotika.indavideo.hu/video/Amator_tini_punci
# http://film.indavideo.hu/video/f_hrom_nagymamm_volt
# http://palyazat.indavideo.hu/video/Embertelen_dal_Dodgem_egyuttes
@staticmethod
def _extract_urls(webpage):
return re.findall(
r'<iframe[^>]+\bsrc=["\'](?P<url>(?:https?:)?//embed\.indavideo\.hu/player/video/[\da-f]+)',
webpage,
)
def _real_extract(self, url):
video_id = self._match_id(url)
video = self._download_json(
"https://amfphp.indavideo.hu/SYm0json.php/player.playerHandler.getVideoData/%s"
% video_id,
video_id,
)["data"]
title = video["title"]
video_urls = []
video_files = video.get("video_files")
if isinstance(video_files, list):
video_urls.extend(video_files)
elif isinstance(video_files, dict):
video_urls.extend(video_files.values())
video_file = video.get("video_file")
if video:
video_urls.append(video_file)
video_urls = list(set(video_urls))
video_prefix = video_urls[0].rsplit("/", 1)[0]
for flv_file in video.get("flv_files", []):
flv_url = "%s/%s" % (video_prefix, flv_file)
if flv_url not in video_urls:
video_urls.append(flv_url)
filesh = video.get("filesh")
formats = []
for video_url in video_urls:
height = int_or_none(
self._search_regex(
r"\.(\d{3,4})\.mp4(?:\?|$)", video_url, "height", default=None
)
)
if filesh:
if not height:
continue
token = filesh.get(compat_str(height))
if token is None:
continue
video_url = update_url_query(video_url, {"token": token})
formats.append(
{
"url": video_url,
"height": height,
}
)
self._sort_formats(formats)
timestamp = video.get("date")
if timestamp:
# upload date is in CEST
timestamp = parse_iso8601(timestamp + " +0200", " ")
thumbnails = [
{"url": self._proto_relative_url(thumbnail)}
for thumbnail in video.get("thumbnails", [])
]
tags = [tag["title"] for tag in video.get("tags") or []]
return {
"id": video.get("id") or video_id,
"title": title,
"description": video.get("description"),
"thumbnails": thumbnails,
"uploader": video.get("user_name"),
"uploader_id": video.get("user_id"),
"timestamp": timestamp,
"duration": int_or_none(video.get("length")),
"age_limit": parse_age_limit(video.get("age_limit")),
"tags": tags,
"formats": formats,
}
|
puddlestuff | puddletag | import logging
import os
import platform
import sys
import traceback
import urllib.parse
from collections import defaultdict
from errno import EEXIST
from functools import partial
from operator import itemgetter
from PyQt5.QtCore import QDir, QSettings, QUrl, pyqtRemoveInputHook, pyqtSignal
from PyQt5.QtGui import QDesktopServices
from PyQt5.QtWidgets import (
QAction,
QApplication,
QFileDialog,
QFrame,
QLabel,
QMainWindow,
QMenu,
QMessageBox,
QSplitter,
QVBoxLayout,
QWidget,
)
from . import (
action_shortcuts,
audioinfo,
changeset,
confirmations,
constants,
findfunc,
genres,
)
from . import loadshortcuts as ls
from . import (
m3u,
mainwin,
plugins,
shortcutsettings,
tagmodel,
tagsources,
version_string,
)
from .about import versions
from .audioinfo import PATH, encode_fn, lnglength, str_filesize, strlength
from .mainwin import funcs as mainfuncs
from .masstag import dialogs
from .puddleobjects import (
PuddleConfig,
PuddleDock,
PuddleStatus,
dircmp,
errormsg,
get_icon,
progress,
winsettings,
)
from .puddlesettings import SettingsDialog, load_gen_settings, update_settings
from .tagmodel import TagTable
from .translations import translate
from .util import rename_error_msg
status = PuddleStatus()
pyqtRemoveInputHook()
# A global variable that hold the status of
# various puddletag statuses.
# It is passed to any and all modules that asks for it.
# Feel free to read as much as you want from it, but
# modify only values that you've created or that are
# intended to be modified. This rule may be enforced
# in the future.
mainfuncs.status = status
tagmodel.status = status
mainwin.previews.set_status(status)
mainwin.tagtools.set_status(status)
plugins.status = status
confirmations.add(
"preview_mode",
True,
translate("Confirmations", "Confirm when exiting preview mode."),
)
confirmations.add(
"delete_files", True, translate("Confirmations", "Confirm when deleting files.")
)
def create_tool_windows(parent, extra=None):
"""Creates the dock widgets for the main window (parent) using
the modules stored in puddlestuff/mainwin.
Returns (the toggleViewActions of the docks, the dockWidgets the
mselves)."""
actions = []
docks = []
cparser = PuddleConfig()
cparser.filename = ls.menu_path
widgets = (
mainwin.tagpanel,
mainwin.artwork,
mainwin.dirview,
mainwin.patterncombo,
mainwin.filterwin,
mainwin.tagsources,
mainwin.storedtags,
mainwin.logdialog,
dialogs,
)
controls = [z.control for z in widgets]
controls.extend(mainwin.action_dialogs.controls)
if extra:
controls.extend(extra)
for z in controls:
name = z[0]
try:
if not z[2]:
PuddleDock._controls[name] = z[1](status=status)
continue
except IndexError:
pass
p = PuddleDock(z[0], z[1], parent, status)
parent.addDockWidget(z[2], p)
try:
if z[4]:
p.setFloating(True)
p.move(parent.rect().center())
except IndexError:
pass
p.setVisible(z[3])
docks.append(p)
action = p.toggleViewAction()
action.setText(name)
scut = cparser.get("winshortcuts", name, "")
if scut:
action.setShortcut(scut)
actions.append(action)
return actions, docks
def create_context_menus(controls, actions):
"""Creates context menus for controls using actions
depending on whether and action in actions is
supposed to be in a menu. See the context_menu
function in the loadshortcuts module for more info."""
for name in controls:
menu = ls.context_menu(name, actions)
if menu:
controls[name].contextMenu = menu
def connect_controls(controls):
"""Connects the signals emitted by controls to any
controls that receive them."""
emits = {}
for c in controls:
for sig in c.emits:
emits[sig].append(c) if sig in emits else emits.update({sig: [c]})
for c in controls:
for signal, slot in c.receives:
if signal in emits:
[getattr(i, signal).connect(slot) for i in emits[signal]]
def connect_control(control, controls):
emits = defaultdict(lambda: [])
for c in controls:
[emits[sig].append(c) for sig in c.emits]
for signal, slot in control.receives:
if signal in emits:
[getattr(c, signal).connect(slot) for c in emits[signal]]
for c in controls:
for signal, slot in c.receives:
if signal in control.emits:
getattr(control, signal).connect(slot)
def action_triggered_slot(control, command):
"QAction.triggerred slot adds a checked=False param which we want to ignore"
return lambda: getattr(control, command)()
def connect_actions(actions, controls):
"""Connect the triggered() signals in actions to the respective
slot in controls if it exists. Just a message is shown if it
doesn't."""
emits = {}
for c in controls.values():
for sig in c.emits:
emits[sig].append(c) if sig in emits else emits.update({sig: [c]})
for action in actions:
if action.enabled in emits:
[
getattr(c, action.enabled).connect(action.setEnabled)
for c in emits[action.enabled]
]
else:
logging.debug("No enable signal found for " + action.text())
action.setEnabled(False)
continue
if action.togglecheck and action.togglecheck in emits:
[
getattr(c, action.togglecheck).connect(action.setEnabled)
for c in emits[action.togglecheck]
]
command = action.command
if action.control == "mainwin" and hasattr(mainfuncs, command):
f = getattr(mainfuncs, command)
if "parent" in f.__code__.co_varnames:
f = partial(f, parent=c)
if action.togglecheck:
action.toggled.connect(f)
else:
action.triggered.connect(f)
continue
elif action.control in controls:
c = controls[action.control]
if hasattr(c, command):
action.triggered.connect(action_triggered_slot(c, command))
else:
logging.debug(action.command + " slot not found for " + action.text())
def connect_action_shortcuts(actions):
cparser = PuddleConfig()
cparser.filename = ls.menu_path
for action in actions:
shortcut = cparser.get("shortcuts", str(action.text()), "")
if shortcut:
action.setShortcut(shortcut)
def get_os():
"""Returns a string describing the underlying operating system."""
def osrelease_dict_to_str(osrel):
"""Returns a pretty string from the given os-release dict."""
# First try: The PRETTY_NAME usually already includes a version
if "PRETTY_NAME" in osrel:
return osrel["PRETTY_NAME"]
# Second try: Combine name and version
name = next((osrel[k] for k in ["NAME", "ID"] if k in osrel), "Linux")
version = next(
(
osrel[k]
for k in ["VERSION", "VERSION_ID", "VERSION_CODENAME"]
if k in osrel
),
"",
)
return "%s %s" % (name, version)
def read_osrelease():
"""Naive re-implementation of freedesktop_os_release."""
for filepath in ["/etc/os-release", "/usr/lib/os-release"]:
osrelease_dict = {}
try:
with open(filepath, "r", -1, "utf-8") as file:
for line in file:
line = line.strip()
if line.startswith("#"):
continue
name, value = line.split("=", 1)
osrelease_dict[name.strip()] = value.strip("\"'").strip()
except OSError:
pass
else:
return osrelease_dict
raise OSError("No file to read found")
def get_linux_name():
"""Returns the name of the linux distribution. Hopefully."""
# First try: The optional distro package (should always work if available)
try:
import distro
except ImportError:
pass
else:
return distro.name(True)
# Second try: the new function in platform (py v3.10+ only)
if hasattr(platform, "freedesktop_os_release"):
try:
return osrelease_dict_to_str(platform.freedesktop_os_release())
except OSError:
pass
# Third try: the old function in platform (until py v3.7 only)
if hasattr(platform, "linux_distribution"):
linux_dist = platform.linux_distribution()
if any((x for x in linux_dist if x)):
return "%s %s" % (linux_dist[0], linux_dist[1])
# Fourth try: read the os-release file directly (to fill the gap btw. 3.7 and 3.10)
try:
return osrelease_dict_to_str(read_osrelease())
except OSError:
pass
# We tried hard, but it wasn't enough.
return None
if "Linux" == platform.system():
result = get_linux_name()
if result:
return result
# else fall through to the fallback
if "Windows" == platform.system():
win_info = platform.win32_ver()
return "Windows %s" % (win_info[0])
if "Darwin" == platform.system():
return "macOS %s" % (platform.mac_ver()[0])
# In case of emergency, hope for the best
return "%s %s %s" % (platform.system(), platform.release(), platform.version())
def create_bug_report_issue():
"""Create a new, pre-filled bug report as github issue."""
puddletag_version = version_string
if changeset:
puddletag_version = f"{puddletag_version} ({changeset})"
version_list = [
("Puddletag", puddletag_version),
("OS", get_os().strip()),
*versions().items(),
]
params = urllib.parse.urlencode(
{
"template": "bug_report.yaml",
"labels": "bug",
"system": "\n".join(map(lambda x: f"{x[0]}: {x[1]}", version_list)),
}
)
url = f"https://github.com/puddletag/puddletag/issues/new?{params}"
QDesktopServices.openUrl(QUrl(url))
def help_menu(parent):
menu = QMenu(translate("Menus", "Help"), parent)
open_url = lambda url: QDesktopServices.openUrl(QUrl(url))
connect = lambda c, s: c.triggered.connect(s)
doc_link = QAction(translate("Menus", "Online &Documentation"), parent)
connect(doc_link, lambda: open_url("https://docs.puddletag.net/docs.html"))
forum_link = QAction(translate("Menus", "&GitHub project"), parent)
connect(forum_link, lambda: open_url("https://github.com/puddletag/puddletag"))
issue_link = QAction(translate("Menus", "&Report a problem"), parent)
connect(issue_link, create_bug_report_issue)
about_icon = get_icon("help-about")
about = QAction(about_icon, translate("Menus", "About puddletag"), parent)
connect(about, partial(mainfuncs.show_about, parent))
about_qt = QAction(translate("Menus", "About Qt"), parent)
connect(about_qt, QApplication.aboutQt)
sep = QAction(parent)
sep.setSeparator(True)
list(map(menu.addAction, (doc_link, forum_link, issue_link, sep, about, about_qt)))
return menu
def load_plugins():
from . import functions, musiclib
from .pluginloader import load_plugins
plugins = load_plugins()
findfunc.functions.update(plugins[constants.FUNCTIONS])
functions.no_preview.extend(plugins[constants.FUNCTIONS_NO_PREVIEW])
tagsources.tagsources.extend(plugins[constants.TAGSOURCE])
musiclib.extralibs = plugins[constants.MUSICLIBS]
return plugins[constants.DIALOGS], plugins[constants.MODULES]
class PreviewLabel(QLabel):
valueChanged = pyqtSignal(bool, name="valueChanged")
def __init__(self, *args, **kwargs):
super(PreviewLabel, self).__init__(*args, **kwargs)
self._enabled = False
def mouseDoubleClickEvent(self, event):
self._enabled = not self._enabled
self.valueChanged.emit(self._enabled)
def _openFilesFilterFilename(filename):
filename = os.path.abspath(filename)
if isinstance(filename, str):
filename = encode_fn(filename)
return filename
# set in MainWin.__init__
add_shortcuts = None
remove_shortcuts = None
class MainWin(QMainWindow):
loadFiles = pyqtSignal(object, object, object, object, object, name="loadFiles")
always = pyqtSignal(bool, name="always")
dirsmoved = pyqtSignal(list, name="dirsmoved")
libfilesedited = pyqtSignal(list, name="libfilesedited")
enable_preview_mode = pyqtSignal(name="enable_preview_mode")
disable_preview_mode = pyqtSignal(name="disable_preview_mode")
filesloaded = pyqtSignal(bool, name="filesloaded")
filesselected = pyqtSignal(bool, name="filesselected")
viewfilled = pyqtSignal(bool, name="viewfilled")
def __init__(self):
QMainWindow.__init__(self)
self.__updateDirs = True
self.__dirsToUpdate = []
global add_shortcuts
global remove_shortcuts
add_shortcuts = self.addShortcuts
remove_shortcuts = self.removeShortcuts
plugins.add_shortcuts = add_shortcuts
self.emits = [
"loadFiles",
"always",
"dirsmoved",
"libfilesedited",
"enable_preview_mode",
"disable_preview_mode",
]
self.receives = [
("writeselected", self.writeTags),
("filesloaded", self._filesLoaded),
("viewfilled", self._viewFilled),
("filesselected", self._filesSelected),
("renamedirs", self.renameDirs),
("filesloaded", self.updateTotalStats),
("filesselected", self.updateSelectedStats),
("onetomany", self.writeOneToMany),
("dirschanged", self._dirChanged),
("writepreview", self._writePreview),
("clearpreview", self._clearPreview),
("renameselected", self._renameSelected),
("playlistchanged", self._dirChanged),
("adddock", self.addDock),
("writeaction", self.writeAction),
("onetomanypreview", self.writeSinglePreview),
("manypreview", self.writeManyPreview),
]
self.gensettings = [("&Load last folder at startup", False, 1)]
self._playlist = None
plugin_dialogs, plugin_modules = load_plugins()
self.setWindowTitle("puddletag")
self.setDockNestingEnabled(True)
self._table = TagTable()
self._table.dirsmoved.connect(self.updateDirs)
win = QSplitter()
layout = QVBoxLayout()
layout.addWidget(self._table)
layoutWidget = QWidget()
layoutWidget.setLayout(layout)
win.addWidget(layoutWidget)
self.setCentralWidget(win)
PuddleDock._controls = {
"table": self._table,
"mainwin": self,
"funcs": mainfuncs.obj,
}
status["mainwin"] = self
status["model"] = self._table.model()
status["table"] = self._table
ls.create_files()
winactions, self._docks = create_tool_windows(self)
status["dialogs"] = PuddleDock._controls
self.createStatusBar()
actions = ls.get_actions(self)
menus = ls.get_menus("menu")
previewactions = mainwin.previews.create_actions(self)
all_actions = actions + winactions + previewactions
controls = PuddleDock._controls
toolgroup = ls.get_menus("toolbar")
toolbar = ls.toolbar(toolgroup, all_actions, controls)
toolbar.setObjectName(translate("Menus", "Toolbar"))
self.addToolBar(toolbar)
menubar, winmenu, self._menus = ls.menubar(menus, all_actions)
temp_winactions = winmenu.actions()
[winmenu.addAction(a) for a in winactions if a not in temp_winactions]
if winmenu:
winmenu.addSeparator()
self._winmenu = winmenu
else:
self._winmenu = QMenu(translate("Settings", "&Windows"), self)
menubar.addMenu(self._winmenu)
self.setMenuBar(menubar)
menubar.addMenu(help_menu(self))
mainfuncs.connect_status(actions)
connect_actions(actions, controls)
connect_action_shortcuts(all_actions)
create_context_menus(controls, all_actions)
status["actions"] = all_actions
for m in plugin_modules:
if hasattr(m, "init"):
try:
m.init(parent=self)
except:
traceback.print_exc()
continue
for win in plugin_dialogs:
try:
self.addDock(*win, connect=False)
except:
logging.exception("Error while loading Plugin dialog.")
self.restoreSettings()
self.always.emit(True)
def addDock(self, name, dialog, position, visibility=True, connect=True):
controls = list(PuddleDock._controls.values())
dock = PuddleDock(name, dialog, self, status)
self.addDockWidget(position, dock)
self._winmenu.addAction(dock.toggleViewAction())
if connect:
connect_control(PuddleDock._controls[name], controls)
dock.setVisible(visibility)
self.restoreDockWidget(dock)
return PuddleDock._controls[name]
def addShortcuts(self, menu_title, actions, toolbar=False, save=False):
if not actions:
return
if menu_title in self._menus:
menu = self._menus[menu_title][0]
else:
menu = QMenu(menu_title)
self._menus[menu_title] = [menu] + actions
self.menuBar().insertMenu(self._menus["&Windows"][0].menuAction(), menu)
status["actions"].extend(actions)
list(map(menu.addAction, actions))
if toolbar:
list(map(self.toolbar.addAction, actions))
if save:
shortcutsettings.ActionEditorDialog.saveSettings(status["actions"])
def _clearPreview(self):
self._table.model().unSetTestData()
def createShortcut(self, text, slot, *args, **kwargs):
action = ls.create_action(self, text, None, slot)
connect_actions([action], PuddleDock._controls)
def _dirChanged(self, dirs):
if not dirs:
self.setWindowTitle("puddletag")
return
if isinstance(dirs, str):
dirs = [dirs]
dirs = [encode_fn(d) for d in dirs]
if self._lastdir:
initial = self._lastdir[0]
else:
initial = None
if initial not in dirs:
initial = dirs[0]
if isinstance(initial, bytes):
initial = initial.decode("utf8", "replace")
if len(dirs) > 1:
self.setWindowTitle(
translate("Main Window", "puddletag: %1 + others").arg(initial)
)
else:
self.setWindowTitle(translate("Main Window", "puddletag: %1").arg(initial))
self._lastdir = dirs
def _getDir(self):
dirname = self._lastdir[0] if self._lastdir else QDir.homePath()
filedlg = QFileDialog()
filedlg.setFileMode(QFileDialog.FileMode.Directory)
filedlg.setOption(QFileDialog.Option.ShowDirsOnly)
filename = str(
QFileDialog.getExistingDirectory(
self,
translate("Main Window", "Import directory..."),
dirname,
QFileDialog.Option.ShowDirsOnly
| QFileDialog.Option.DontUseNativeDialog
| QFileDialog.Option.DontResolveSymlinks,
)
)
return filename
def appendDir(self, filename=None):
self.openDir(filename, append=True)
def _filesLoaded(self, val):
self.filesloaded.emit(val)
def _filesSelected(self, val):
self.filesselected.emit(val)
def applyGenSettings(self, settings, level=None):
pass
def closeEvent(self, e):
preview_msg = translate(
"Previews",
"Some files have uncommited previews. "
"These changes will be lost once you exit puddletag. <br />"
"Do you want to exit without writing those changes?<br />",
)
if tagmodel.has_previews(parent=self, msg=preview_msg):
e.ignore()
return False
controls = PuddleDock._controls
for control in PuddleDock._controls.values():
if hasattr(control, "saveSettings"):
try:
control.saveSettings(self)
except TypeError:
control.saveSettings()
cparser = PuddleConfig()
settings = QSettings(constants.QT_CONFIG, QSettings.Format.IniFormat)
if self._lastdir:
cparser.set("main", "lastfolder", self._lastdir[0])
cparser.set("main", "maximized", self.isMaximized())
settings.setValue("main/state", self.saveState())
headstate = self._table.horizontalHeader().saveState()
settings.setValue("table/header", headstate)
genres.save_genres(status["genres"])
e.accept()
def createStatusBar(self):
statusbar = self.statusBar()
statuslabel = QLabel()
statuslabel.setFrameStyle(QFrame.Shape.NoFrame)
statusbar.addPermanentWidget(statuslabel, 1)
self._totalstats = QLabel("00 (00:00:00 | 00 MB)")
self._selectedstats = QLabel("00 (00:00:00 | 00 MB)")
preview_status = PreviewLabel(translate("Previews", "Preview Mode: Off"))
statusbar.addPermanentWidget(preview_status, 0)
statusbar.addPermanentWidget(self._selectedstats, 0)
statusbar.addPermanentWidget(self._totalstats, 0)
def set_preview_status(value):
if value:
preview_status.setText(translate("Previews", "<b>Preview Mode: On</b>"))
else:
preview_status.setText(translate("Previews", "Preview Mode: Off"))
def change_preview(value):
if value:
self.enable_preview_mode.emit()
else:
self.disable_preview_mode.emit()
preview_status.valueChanged.connect(change_preview)
self._table.model().previewModeChanged.connect(set_preview_status)
statusbar.setMaximumHeight(statusbar.height())
statusbar.messageChanged.connect(statuslabel.setText)
def loadPlayList(self):
dirname = self._lastdir[0] if self._lastdir else QDir.homePath()
selectedFile = QFileDialog.getOpenFileName(
self,
translate("Playlist", translate("Playlist", "Select m3u file...")),
)
filename = selectedFile[0]
if not filename:
return
try:
files = m3u.readm3u(filename)
self.loadFiles.emit(files, None, None, None, filename)
except (OSError, IOError) as e:
QMessageBox.information(
self._table,
translate("Defaults", "Error"),
translate("Playlist", "An error occured while reading <b>%1</b> (%2)")
.arg(filename)
.arg(e.strerror),
)
except UnicodeError as e:
QMessageBox.information(
self._table,
translate("Defaults", "Error"),
translate("Playlist", "The playlist is not encoded in UTF-8"),
)
except Exception as e:
QMessageBox.information(
self._table,
translate("Defaults", "Error"),
translate("Playlist", "An error occured while reading <b>%1</b> (%2)")
.arg(filename)
.arg(str(e)),
)
def openDir(self, filename=None, append=False):
"""Opens a folder. If filename != None, then
the table is filled with the folder.
If filename is None, show the open folder dialog and open that.
If appenddir = True, the folder is appended.
Otherwise, the folder is just loaded."""
if filename is None:
filename = self._getDir()
if not filename:
return
else:
if not isinstance(filename, str):
filename = filename[0]
filename = os.path.abspath(filename)
if isinstance(filename, str):
filename = encode_fn(filename)
self.loadFiles.emit(None, [filename], append, None, None)
def openFiles(self, filenames, append=False):
filenames = map(_openFilesFilterFilename, filenames)
self.loadFiles.emit(None, filenames, append, None, None)
def openPrefs(self):
win = SettingsDialog(list(PuddleDock._controls.values()), self, status)
win.show()
def removeShortcuts(self, menu_title, actions):
if menu_title in self._menus:
menu = self._menus[menu_title][0]
if actions:
children = dict([(str(z.text()), z) for z in menu.actions()])
for action in actions:
if isinstance(action, str):
action = children[action]
menu.removeAction(action)
try:
status["actions"].remove(action)
except ValueError:
pass
def restoreSettings(self):
scts = action_shortcuts.create_action_shortcuts(mainwin.funcs.applyaction, self)
self.addShortcuts("&Actions", scts)
connect_actions(scts, PuddleDock._controls)
cparser = PuddleConfig()
settings = QSettings(constants.QT_CONFIG, QSettings.Format.IniFormat)
gensettings = {}
controls = list(PuddleDock._controls.values())
for control in controls:
if hasattr(control, "loadSettings"):
control.loadSettings()
if hasattr(control, "gensettings"):
t = load_gen_settings(control.gensettings)
gensettings[control] = dict(t)
for control, val in gensettings.items():
control.applyGenSettings(val, 0)
self._lastdir = [
encode_fn(cparser.get("main", "lastfolder", constants.HOMEDIR))
]
mapping = {
"VorbisComment": {
"date": "year",
"tracknumber": "track",
"musicbrainz_albumid": "mbrainz_album_id",
"musicbrainz_artistid": "mbrainz_artist_id",
"musicbrainz_trackid": "mbrainz_track_id",
},
"MP4": {
"MusicBrainz Track Id": "mbrainz_track_id",
"MusicBrainz Artist Id": "mbrainz_artist_id",
"MusicBrainz Album Id": "mbrainz_album_id",
},
"ID3": {
"ufid:http://musicbrainz.org": "mbrainz_track_id",
"MusicBrainz Album Id": "mbrainz_album_id",
"MusicBrainz Artist Id": "mbrainz_artist_id",
},
"APEv2": {
"musicbrainz_albumid": "mbrainz_album_id",
"musicbrainz_artistid": "mbrainz_artist_id",
"musicbrainz_trackid": "mbrainz_track_id",
},
}
filepath = os.path.join(cparser.savedir, "mappings")
audioinfo.setmapping(audioinfo.loadmapping(filepath, mapping))
status["genres"] = genres.load_genres()
connect_controls(controls + [mainwin.previews.obj])
cover_pattern = cparser.get("tags", "cover_pattern", "folder")
status["cover_pattern"] = cover_pattern
winsettings("mainwin", self)
if cparser.get("main", "maximized", True):
self.showMaximized()
QApplication.processEvents()
if constants.FS_ENC == "ascii":
QMessageBox.warning(
self,
"puddletag",
translate(
"Errors",
"Your filesystem encoding was detected as <b>ASCII</b>. <br />"
"You won't be able to rename files using accented, <br />"
" cyrillic or any characters outside the ASCII alphabet.",
),
)
for control, val in gensettings.items():
control.applyGenSettings(val, 1)
h = self._table.horizontalHeader()
if settings.value("table/header"):
h.restoreState(settings.value("table/header"))
if settings.value("main/state"):
self.restoreState(settings.value("main/state"))
confirmations.load()
shortcutsettings.ActionEditorDialog._loadSettings(status["actions"])
update_settings()
QApplication.processEvents()
def savePlayList(self):
tags = status["selectedfiles"]
if not tags:
tags = status["alltags"]
settings = PuddleConfig()
try:
dirname = self._lastdir[0]
except IndexError:
dirname = constants.HOMEDIR
filepattern = settings.get("playlist", "filepattern", "puddletag.m3u")
default = encode_fn(findfunc.tagtofilename(filepattern, tags[0]))
selectedFile = QFileDialog.getSaveFileName(
self,
translate("Playlist", "Save Playlist..."),
os.path.join(dirname, default),
)
f = selectedFile[0]
if f:
if settings.get("playlist", "extinfo", 1, True):
pattern = settings.get("playlist", "extpattern", "%artist% - %title%")
else:
pattern = None
reldir = settings.get("playlist", "reldir", 0, True)
windows_separator = settings.get("playlist", "windows_separator", 0, False)
m3u.exportm3u(tags, f, pattern, reldir, windows_separator)
def _viewFilled(self, val):
self.viewfilled.emit(val)
def _updateStatus(self, files):
if not files:
return "00 (00:00:00 | 00 KB)"
numfiles = len(files)
stats = [(int(z.size), lnglength(z.length)) for z in files]
totalsize = sum([z[0] for z in stats])
totallength = strlength(sum([z[1] for z in stats]))
sizetext = str_filesize(totalsize)
return "%d (%s | %s)" % (numfiles, totallength, sizetext)
def lockLayout(self):
for dw in self._docks:
dw.setTitleBarWidget(QWidget())
def updateSelectedStats(self, *args):
self._selectedstats.setText(self._updateStatus(status["selectedfiles"]))
def updateTotalStats(self, *args):
self._totalstats.setText(self._updateStatus(status["alltags"]))
def _write(self, tagiter, rows=None, previews=None):
self.__updateDirs = False
if not rows:
rows = status["selectedrows"]
model = self._table.model()
setRowData = model.setRowData
def fin():
model.undolevel += 1
self._table.selectionChanged()
if not model.previewMode:
self.libfilesedited.emit(lib_updates)
lib_updates = []
failed_rows = [rows[0]] # First element=last row used.
# Rest, rows that failed to write.
if model.previewMode:
[setRowData(row, f, undo=True) for row, f in zip(rows, tagiter)]
fin()
return
def func():
for row, f in zip(rows, tagiter):
failed_rows[0] = row
try:
update = setRowData(row, f, undo=True)
if update:
lib_updates.append(update)
yield None
except PermissionError as e:
failed_rows.append(row)
filename = model.taginfo[row][PATH]
m = rename_error_msg(e, filename)
if row == rows[-1]:
yield m, 1
else:
yield m, len(rows)
except EnvironmentError as e:
failed_rows.append(row)
filename = model.taginfo[row][PATH]
m = rename_error_msg(e, filename)
if row == rows[-1]:
yield m, 1
else:
yield m, len(rows)
def finished():
self.__updateDirs = True
self.updateDirs([])
if previews and failed_rows[1:]:
model.previewMode = True
last_row = failed_rows[0]
[failed_rows.append(r) for r in previews if r > last_row]
taginfo = model.taginfo
for row in failed_rows[1:]:
if row not in previews:
continue
taginfo[row].preview = previews[row]
last_row = failed_rows[0]
model.updateTable(failed_rows)
return fin()
return func, finished, rows
def writeTags(self, tagiter, rows=None, previews=None):
ret = self._write(tagiter, rows, previews)
if ret is None:
return
func, fin, rows = ret
s = progress(func, translate("Defaults", "Writing "), len(rows), fin)
s(self)
def writeAction(self, tagiter, rows=None, state=None):
if state is None:
state = {}
ret = self._write(tagiter, rows)
if ret is None:
return
func, fin, rows = ret
def finished():
fin()
if "rename_dirs" in state:
self.renameDirs(list(state["rename_dirs"].items()))
s = progress(func, translate("Defaults", "Writing "), len(rows), finished)
s(self)
def writeOneToMany(self, d):
rows = status["selectedrows"]
ret = self._write((d.copy() for r in rows), rows)
if ret is None:
return
func, fin, rows = ret
s = progress(func, translate("Defaults", "Writing "), len(rows), fin)
s(self)
def writeSinglePreview(self, d):
if not status["previewmode"]:
return
model = self._table.model()
rows = status["selectedrows"]
if not rows:
return
setRowData = model.setRowData
[setRowData(row, d, undo=False, temp=True) for row in rows]
columns = [_f for _f in map(model.columns.get, d) if _f]
if columns:
start = model.index(min(rows), min(columns))
end = model.index(max(rows), max(columns))
model.dataChanged.emit(start, end)
def writeManyPreview(self, tags):
if not status["previewmode"]:
return
model = self._table.model()
rows = status["selectedrows"]
setRowData = model.setRowData
[setRowData(row, d, undo=False, temp=True) for row, d in zip(rows, tags)]
columns = set(
[
model.columns.get(tagname)
for tag in tags
for tagname in tag
if tagname in model.columns
]
)
if columns:
start = model.index(min(rows), min(columns))
end = model.index(max(rows), max(columns))
model.dataChanged.emit(start, end)
def _writePreview(self):
taginfo = self._table.model().taginfo
previews = {}
def get(audio, row):
preview = audio.preview
audio.preview = {}
previews[row] = preview
return row, preview
data = [get(audio, row) for row, audio in enumerate(taginfo) if audio.preview]
if not data:
return
self._table.model().previewMode = False
self.writeTags((z[1] for z in data), [z[0] for z in data], previews)
def _renameSelected(self, filenames):
rows = status["selectedrows"]
files = status["selectedfiles"]
model = self._table.model()
setRowData = model.setRowData
def fin():
model.undolevel += 1
self._table.selectionChanged()
if model.previewMode:
for row, audio, filename in zip(rows, files, filenames):
tag = PATH
if tag in audio.mapping:
tag = audio.mapping[tag]
setRowData(row, {tag: filename}, True, True)
fin()
return
def func():
for row, audio, filename in zip(rows, files, filenames):
tag = PATH
if tag in audio.mapping:
tag = audio.mapping[tag]
try:
setRowData(row, {tag: filename}, True, True)
yield None
except EnvironmentError as e:
m = (
translate(
"Dir Renaming",
"An error occured while renaming <b>%1</b> to "
"<b>%2</b>. (%3)",
)
.arg(audio[PATH])
.arg(filename)
.arg(e.strerror)
)
if row == rows[-1]:
yield m, 1
else:
yield m, len(rows)
s = progress(func, translate("Dir Renaming", "Renaming "), len(rows), fin)
s(self)
def renameDirs(self, dirs):
self._table.saveSelection()
showmessage = True
dirs = sorted(dirs, dircmp, itemgetter(0))
for index, (olddir, newdir) in enumerate(dirs):
try:
if os.path.exists(newdir) and (olddir != newdir):
raise IOError(EEXIST, os.strerror(EEXIST), newdir)
os.rename(olddir, newdir)
self._table.changeFolder(olddir, newdir)
if self._lastdir and olddir in self._lastdir:
self._lastdir[self._lastdir.index(olddir)] = newdir
except (IOError, OSError) as detail:
msg = (
translate(
"Dir Renaming", "I couldn't rename: <i>%1</i> to <b>%2</b> (%3)"
)
.arg(olddir)
.arg(newdir)
.arg(detail.strerror)
)
if index == len(dirs) - 1:
dirlen = 1
else:
dirlen = len(dirs)
if showmessage:
ret = errormsg(self, msg, dirlen)
if ret is True:
showmessage = False
elif ret is False:
break
self.dirsmoved.emit(dirs)
self._dirChanged(self._lastdir)
self._table.restoreSelection()
def updateDirs(self, dirs):
if self.__updateDirs:
if self.__dirsToUpdate:
dirs = self.__dirsToUpdate + dirs
self.__dirsToUpdate = []
else:
self.__dirsToUpdate.extend(dirs)
return
old_dirs = set()
new_dirs = []
for old_dir, new_dir in dirs:
if old_dir not in old_dirs:
new_dirs.append([old_dir, new_dir])
old_dirs.add(old_dir)
dirs = new_dirs
if self._lastdir:
last = self._lastdir[::]
else:
last = None
for index, (olddir, newdir) in enumerate(dirs):
self._table.changeFolder(olddir, newdir, False)
if last and olddir in last:
last[last.index(olddir)] = newdir
self.dirsmoved.emit(dirs)
self._dirChanged(last)
if __name__ == "__main__":
app = QApplication(sys.argv)
win = MainWin()
win.show()
app.exec_()
|
extractor | screencastomatic | # coding: utf-8
from __future__ import unicode_literals
from ..utils import (
get_element_by_class,
int_or_none,
remove_start,
strip_or_none,
unified_strdate,
)
from .common import InfoExtractor
class ScreencastOMaticIE(InfoExtractor):
_VALID_URL = r"https?://screencast-o-matic\.com/(?:(?:watch|player)/|embed\?.*?\bsc=)(?P<id>[0-9a-zA-Z]+)"
_TESTS = [
{
"url": "http://screencast-o-matic.com/watch/c2lD3BeOPl",
"md5": "483583cb80d92588f15ccbedd90f0c18",
"info_dict": {
"id": "c2lD3BeOPl",
"ext": "mp4",
"title": "Welcome to 3-4 Philosophy @ DECV!",
"thumbnail": r"re:^https?://.*\.jpg$",
"description": "as the title says! also: some general info re 1) VCE philosophy and 2) distance learning.",
"duration": 369,
"upload_date": "20141216",
},
},
{
"url": "http://screencast-o-matic.com/player/c2lD3BeOPl",
"only_matching": True,
},
{
"url": "http://screencast-o-matic.com/embed?ff=true&sc=cbV2r4Q5TL&fromPH=true&a=1",
"only_matching": True,
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(
"https://screencast-o-matic.com/player/" + video_id, video_id
)
info = self._parse_html5_media_entries(url, webpage, video_id)[0]
info.update(
{
"id": video_id,
"title": get_element_by_class("overlayTitle", webpage),
"description": strip_or_none(
get_element_by_class("overlayDescription", webpage)
)
or None,
"duration": int_or_none(
self._search_regex(
r"player\.duration\s*=\s*function\(\)\s*{\s*return\s+(\d+);\s*};",
webpage,
"duration",
default=None,
)
),
"upload_date": unified_strdate(
remove_start(
get_element_by_class("overlayPublished", webpage), "Published: "
)
),
}
)
return info
|
heartbeat | legacy_alertmanager | from pathlib import PurePath
from apps.integrations.metadata.heartbeat._heartbeat_text_creator import (
HeartBeatTextCreatorForTitleGrouping,
)
integration_verbal = PurePath(__file__).stem
creator = HeartBeatTextCreatorForTitleGrouping(integration_verbal)
heartbeat_text = creator.get_heartbeat_texts()
heartbeat_expired_title = heartbeat_text.heartbeat_expired_title
heartbeat_expired_message = heartbeat_text.heartbeat_expired_message
heartbeat_expired_payload = {
"endsAt": "",
"labels": {"alertname": heartbeat_expired_title},
"status": "firing",
"startsAt": "",
"annotations": {
"message": heartbeat_expired_message,
},
"generatorURL": None,
}
heartbeat_restored_title = heartbeat_text.heartbeat_restored_title
heartbeat_restored_message = heartbeat_text.heartbeat_restored_message
heartbeat_restored_payload = {
"endsAt": "",
"labels": {"alertname": heartbeat_restored_title},
"status": "resolved",
"startsAt": "",
"annotations": {"message": heartbeat_restored_message},
"generatorURL": None,
}
|
administration | views | # This file is part of Archivematica.
#
# Copyright 2010-2013 Artefactual Systems Inc. <http://artefactual.com>
#
# Archivematica is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Archivematica is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Archivematica. If not, see <http://www.gnu.org/licenses/>.
import collections
import logging
import os
import shutil
import subprocess
import components.administration.views_processing as processing_views
import components.decorators as decorators
import components.helpers as helpers
import storageService as storage_service
from components.administration.forms import (
AgentForm,
ChecksumSettingsForm,
GeneralSettingsForm,
HandleForm,
StorageSettingsForm,
TaxonomyTermForm,
)
from django.conf import settings as django_settings
from django.contrib import messages
from django.contrib.auth.decorators import user_passes_test
from django.db.models import Max, Min
from django.http import Http404, HttpResponseNotAllowed, HttpResponseRedirect
from django.shortcuts import redirect, render
from django.template.defaultfilters import filesizeformat
from django.urls import reverse
from django.utils.translation import ugettext as _
from installer.steps import setup_pipeline_in_ss
from main import models
from version import get_full_version, get_preservation_system_identifier
logger = logging.getLogger("archivematica.dashboard")
""" @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
Administration
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ """
def administration(request):
return redirect("administration:processing")
def failure_report(request, report_id=None):
if report_id is not None:
report = models.Report.objects.get(pk=report_id)
return render(request, "administration/reports/failure_detail.html", locals())
else:
current_page_number = request.GET.get("page", "1")
items_per_page = 10
reports = models.Report.objects.all().order_by("-created")
page = helpers.pager(reports, items_per_page, current_page_number)
return render(request, "administration/reports/failures.html", locals())
def delete_context(request, report_id):
report = models.Report.objects.get(pk=report_id)
prompt = "Delete failure report for " + report.unitname + "?"
cancel_url = reverse("administration:reports_failures_index")
return {"action": "Delete", "prompt": prompt, "cancel_url": cancel_url}
@decorators.confirm_required("simple_confirm.html", delete_context)
def failure_report_delete(request, report_id):
models.Report.objects.get(pk=report_id).delete()
messages.info(request, _("Deleted."))
return redirect("administration:failure_report_index")
def failure_report_detail(request):
return render(
request, "administration/reports/failure_report_detail.html", locals()
)
def atom_levels_of_description(request):
if request.method == "POST":
level_operation = request.POST.get("operation")
level_id = request.POST.get("id")
if level_operation == "promote":
if _atom_levels_of_description_sort_adjust(level_id, "promote"):
messages.info(request, _("Promoted."))
else:
messages.error(
request, _("Error attempting to promote level of description.")
)
elif level_operation == "demote":
if _atom_levels_of_description_sort_adjust(level_id, "demote"):
messages.info(request, _("Demoted."))
else:
messages.error(
request, _("Error attempting to demote level of description.")
)
elif level_operation == "delete":
try:
level = models.LevelOfDescription.objects.get(id=level_id)
level.delete()
messages.info(request, _("Deleted."))
except models.LevelOfDescription.DoesNotExist:
messages.error(request, _("Level of description not found."))
levels = models.LevelOfDescription.objects.order_by("sortorder")
sortorder_min = models.LevelOfDescription.objects.aggregate(min=Min("sortorder"))[
"min"
]
sortorder_max = models.LevelOfDescription.objects.aggregate(max=Max("sortorder"))[
"max"
]
return render(
request,
"administration/atom_levels_of_description.html",
{
"levels": levels,
"sortorder_min": sortorder_min,
"sortorder_max": sortorder_max,
},
)
def _atom_levels_of_description_sort_adjust(level_id, sortorder="promote"):
"""
Move LevelOfDescription with level_id up or down one.
:param int level_id: ID of LevelOfDescription to adjust
:param string sortorder: 'promote' to demote level_id, 'demote' to promote level_id
:returns: True if success, False otherwise.
"""
try:
level = models.LevelOfDescription.objects.get(id=level_id)
# Get object with next highest/lowest sortorder
if sortorder == "demote":
previous_level = models.LevelOfDescription.objects.order_by(
"sortorder"
).filter(sortorder__gt=level.sortorder)[:1][0]
elif sortorder == "promote":
previous_level = models.LevelOfDescription.objects.order_by(
"-sortorder"
).filter(sortorder__lt=level.sortorder)[:1][0]
except (models.LevelOfDescription.DoesNotExist, IndexError):
return False
# Swap
level.sortorder, previous_level.sortorder = (
previous_level.sortorder,
level.sortorder,
)
level.save()
previous_level.save()
return True
def storage(request):
"""Return storage service locations related with this pipeline.
Exclude locations for currently processing, AIP recovery and SS internal
purposes and disabled locations. Format used, quota and purpose values to
human readable form.
"""
try:
response_locations = storage_service.get_location()
except:
messages.warning(
request,
_(
"Error retrieving locations: is the storage server running? "
"Please contact an administrator."
),
)
return render(request, "administration/locations.html")
# Currently processing, AIP recovery and SS internal locations
# are intentionally not included to not display them in the table.
purposes = {
"AS": _("AIP Storage"),
"DS": _("DIP Storage"),
"SD": _("FEDORA Deposits"),
"BL": _("Transfer Backlog"),
"TS": _("Transfer Source"),
"RP": _("Replicator"),
}
# Filter and format locations
locations = []
for loc in response_locations:
# Skip disabled locations
if not loc["enabled"]:
continue
# Skip unwanted purposes
if not loc["purpose"] or loc["purpose"] not in list(purposes.keys()):
continue
# Only show usage of AS and DS locations
loc["show_usage"] = loc["purpose"] in ["AS", "DS"]
if loc["show_usage"]:
# Show unlimited for unset quotas
if not loc["quota"]:
loc["quota"] = _("unlimited")
# Format bytes to human readable filesize
else:
loc["quota"] = filesizeformat(loc["quota"])
if loc["used"]:
loc["used"] = filesizeformat(loc["used"])
# Format purpose
loc["purpose"] = purposes[loc["purpose"]]
locations.append(loc)
# Sort by purpose
locations.sort(key=lambda loc: loc["purpose"])
return render(request, "administration/locations.html", {"locations": locations})
def usage(request):
"""Return page summarizing storage usage.
To avoid timeouts on the first load of this page, the usage data is only
calculated when the `calculate` get parameter is evaluated as `True` (true
values are "true", "yes", "on" and "1", case insensitive). When the usage data
is not calculated, the page shows a description and a button to reload the page
calculating the usage. When the usage is calculated, the page will display a
general information section and a table of clearable directories from within
the shared path.
"""
calculate = request.GET.get("calculate", "")
calculate_usage = calculate.lower() in ["true", "yes", "on", "1"]
if calculate_usage:
root_path = _get_mount_point_path(django_settings.SHARED_DIRECTORY)
root = {
"path": root_path,
"size": _usage_check_directory_volume_size(root_path),
"used": _usage_get_directory_used_bytes(root_path),
}
shared = {
"path": django_settings.SHARED_DIRECTORY,
"used": _usage_get_directory_used_bytes(django_settings.SHARED_DIRECTORY),
}
usage_dirs = _get_shared_dirs(calculate_usage=True)
return render(request, "administration/usage.html", locals())
def _get_shared_dirs(calculate_usage=False):
"""Get shared directories information.
Get information about directories that can be cleared manually whitin the
SHARED_DIRECTORY setting path.
:param bool calculate_usage: True if usage should be calculated.
:returns OrderedDict: Dict where key is a descriptive handle and value is a
dict with the path, description and optionally usage.
"""
shared_path = django_settings.SHARED_DIRECTORY
dirs = collections.OrderedDict(
(
(
"transfers",
{
"description": "Transfers",
"path": os.path.join(shared_path, "completed", "transfers"),
},
),
(
"dip_backups",
{
"description": "DIP backups",
"path": os.path.join(shared_path, "DIPbackups"),
},
),
(
"failed",
{"description": "Failed", "path": os.path.join(shared_path, "failed")},
),
(
"rejected",
{
"description": "Rejected",
"path": os.path.join(shared_path, "rejected"),
},
),
(
"sip_backups",
{
"description": "SIP backups",
"path": os.path.join(shared_path, "SIPbackups"),
},
),
(
"tmp",
{
"description": "Temporary file storage",
"path": os.path.join(shared_path, "tmp"),
},
),
(
"uploaded_dips",
{
"description": "Uploaded DIPs",
"path": os.path.join(
shared_path, "watchedDirectories", "uploadedDIPs"
),
},
),
)
)
if calculate_usage:
for id, dir_spec in dirs.items():
dir_spec["used"] = _usage_get_directory_used_bytes(dir_spec["path"])
return dirs
def _get_mount_point_path(path):
"""Get the mount point path from a directory.
:param str path: path to check.
:returns: mount point path.
"""
path = os.path.realpath(os.path.abspath(path))
while path != os.path.sep:
if os.path.ismount(path):
return path
path = os.path.abspath(os.path.join(path, os.pardir))
return path
def _usage_check_directory_volume_size(path):
"""Check the size of the volume containing a given path.
:param str path: path to check.
:returns: size in bytes, or 0 on error.
"""
try:
# Get volume size (in 1K blocks)
output = subprocess.check_output(["df", "--block-size", "1024", path]).decode(
"utf8"
)
# Second line returns disk usage-related values
usage_summary = output.split("\n")[1]
# Split value by whitespace and size (in blocks)
size = usage_summary.split()[1]
return int(size) * 1024
except OSError:
logger.exception("No such directory: %s", path)
return 0
except subprocess.CalledProcessError:
logger.exception("Unable to determine size of %s", path)
return 0
def _usage_get_directory_used_bytes(path):
"""Check the space used at a given path.
:param string path: path to check.
:returns: usage in bytes.
"""
try:
output = subprocess.check_output(
["du", "--one-file-system", "--bytes", "--summarize", path]
).decode("utf8")
return output.split("\t")[0]
except OSError:
logger.exception("No such directory: %s", path)
return 0
except subprocess.CalledProcessError as err:
# CalledProcessErrors are typically the result of du returning
# 1 because there were some directories in the path for which
# the archivematica user doesn't have permissions. In this case
# du still prints an output to stdout so we try to catch it
# from CalledProcessError.output.
byte_count = 0
try:
byte_count = int(err.output.decode("utf8").split("\t")[0])
logger.warning(
"Non-zero exit code while determining usage of %s. "
"Some directories may be missing from total.",
path,
)
except (AttributeError, ValueError):
logger.exception("Unable to determine usage of %s.", path)
return byte_count
def _usage_clear_context(request, dir_id):
"""Confirmation context for emptying a directory from _get_shared_dirs.
:param dir_id: Key for the directory in _get_shared_dirs.
"""
usage_dirs = _get_shared_dirs()
dir_info = usage_dirs.get(dir_id, None)
if not dir_info:
raise Http404
prompt = _("Clear %(dir)s?") % {"dir": dir_info["description"]}
cancel_url = reverse("administration:usage")
return {"action": _("Clear"), "prompt": prompt, "cancel_url": cancel_url}
@user_passes_test(lambda u: u.is_superuser, login_url="/forbidden/")
@decorators.confirm_required("simple_confirm.html", _usage_clear_context)
def usage_clear(request, dir_id):
"""Clears a directory from _get_shared_dirs.
:param dir_id: Descriptive shorthand for the dir, key for _get_shared_dirs.
"""
if request.method != "POST":
return HttpResponseNotAllowed()
usage_dirs = _get_shared_dirs()
dir_info = usage_dirs.get(dir_id, None)
if not dir_info:
raise Http404
try:
for entry in os.listdir(dir_info["path"]):
entry_path = os.path.join(dir_info["path"], entry)
if os.path.isfile(entry_path):
os.unlink(entry_path)
else:
shutil.rmtree(entry_path)
message = _("Cleared: %(path)s") % {"path": dir_info["path"]}
messages.info(request, message)
except OSError:
message = _("No such file or directory: %(path)s") % {"path": dir_info["path"]}
messages.error(request, message)
logger.exception(message)
return redirect("administration:usage")
def processing(request):
return processing_views.index(request)
def handle_config(request):
"""Display or save the Handle configuration form, which allows for the
specification of configuration values for Handle PID creation and binding
using the ``bindpid`` module. State is stored in DashboardSettings table.
"""
if request.method == "POST":
form = HandleForm(request.POST)
if form.is_valid():
models.DashboardSetting.objects.set_dict("handle", form.cleaned_data)
messages.info(request, _("Saved."))
else:
settings_dict = models.DashboardSetting.objects.get_dict("handle")
settings_dict["pid_request_verify_certs"] = {"False": False}.get(
settings_dict.get("pid_request_verify_certs", True), True
)
form = HandleForm(initial=settings_dict)
return render(request, "administration/handle_config.html", {"form": form})
def premis_agent(request):
agent = models.Agent.objects.default_organization_agent()
if request.POST:
form = AgentForm(request.POST, instance=agent)
if form.is_valid():
messages.info(request, _("Saved."))
form.save()
else:
form = AgentForm(instance=agent)
return render(request, "administration/premis_agent.html", locals())
def api(request):
if request.method == "POST":
allowlist = request.POST.get("allowlist", "")
helpers.set_api_allowlist(allowlist)
messages.info(request, _("Saved."))
else:
allowlist = helpers.get_api_allowlist()
return render(request, "administration/api.html", locals())
def taxonomy(request):
taxonomies = models.Taxonomy.objects.all().order_by("name")
page = helpers.pager(taxonomies, 20, request.GET.get("page", 1))
return render(request, "administration/taxonomy.html", locals())
def terms(request, taxonomy_uuid):
taxonomy = models.Taxonomy.objects.get(pk=taxonomy_uuid)
terms = taxonomy.taxonomyterm_set.order_by("term")
page = helpers.pager(terms, 20, request.GET.get("page", 1))
return render(request, "administration/terms.html", locals())
def term_detail(request, term_uuid):
term = models.TaxonomyTerm.objects.get(pk=term_uuid)
taxonomy = term.taxonomy
if request.POST:
form = TaxonomyTermForm(request.POST, instance=term)
if form.is_valid():
form.save()
messages.info(request, _("Saved."))
else:
form = TaxonomyTermForm(instance=term)
return render(request, "administration/term_detail.html", locals())
def term_delete_context(request, term_uuid):
term = models.TaxonomyTerm.objects.get(pk=term_uuid)
prompt = "Delete term " + term.term + "?"
cancel_url = reverse("administration:term", args=[term_uuid])
return {"action": "Delete", "prompt": prompt, "cancel_url": cancel_url}
@decorators.confirm_required("simple_confirm.html", term_delete_context)
def term_delete(request, term_uuid):
if request.method == "POST":
term = models.TaxonomyTerm.objects.get(pk=term_uuid)
term.delete()
return HttpResponseRedirect(
reverse("administration:terms", args=[term.taxonomy_id])
)
def _intial_settings_data():
return dict(models.DashboardSetting.objects.all().values_list("name", "value"))
def general(request):
initial_data = _intial_settings_data()
initial_data["storage_service_use_default_config"] = {"False": False}.get(
initial_data.get("storage_service_use_default_config", True), True
)
general_form = GeneralSettingsForm(
request.POST or None, prefix="general", initial=initial_data
)
storage_form = StorageSettingsForm(
request.POST or None, prefix="storage", initial=initial_data
)
checksum_form = ChecksumSettingsForm(
request.POST or None, prefix="checksum algorithm", initial=initial_data
)
forms = (general_form, storage_form, checksum_form)
if all([form.is_valid() for form in forms]):
for item in forms:
item.save()
messages.info(request, _("Saved."))
dashboard_uuid = helpers.get_setting("dashboard_uuid")
not_created_yet = False
try:
pipeline = storage_service.get_pipeline(dashboard_uuid)
except Exception as err:
if err.response is not None and err.response.status_code == 404:
# The server has returned a 404, we're going to assume that this is
# the Storage Service telling us that the pipeline is unknown.
not_created_yet = True
else:
messages.warning(
request,
_(
"Storage Service inaccessible. Please"
" contact an administrator or update"
" the Storage Sevice URL below."
"<hr />%(error)s" % {"error": err}
),
)
if not_created_yet:
if storage_form.is_valid():
try:
setup_pipeline_in_ss(
storage_form.cleaned_data["storage_service_use_default_config"]
)
except Exception as err:
messages.warning(
request,
_(
"Storage Service failed to create the"
" pipeline. This can happen if"
" the pipeline exists but it is"
" disabled. Please contact an"
" administrator."
"<hr />%(error)s" % {"error": err}
),
)
else:
messages.warning(
request,
_(
"Storage Service returned a 404 error."
" Has the pipeline been disabled or is"
" it not registered yet? Submitting"
" form will attempt to register the"
" pipeline."
),
)
return render(request, "administration/general.html", locals())
def version(request):
return render(
request,
"administration/version.html",
{
"version": get_full_version(),
"agent_code": get_preservation_system_identifier(),
},
)
|
benchmarks | helpers | import os
import sys
from contextlib import contextmanager
from functools import wraps
from os.path import dirname
from django.utils.timezone import now
os.environ["POSTHOG_DB_NAME"] = "posthog_test"
os.environ["DJANGO_SETTINGS_MODULE"] = "posthog.settings"
sys.path.append(dirname(dirname(dirname(__file__))))
import django # noqa: E402
django.setup()
from ee.clickhouse.materialized_columns.columns import get_materialized_columns # noqa: E402
from posthog import client # noqa: E402
from posthog.clickhouse.query_tagging import reset_query_tags # noqa: E402
from posthog.clickhouse.query_tagging import tag_queries
from posthog.models.utils import UUIDT # noqa: E402
get_column = lambda rows, index: [row[index] for row in rows]
def run_query(fn, *args):
uuid = str(UUIDT())
tag_queries(kind="benchmark", id=f"{uuid}::${fn.__name__}")
try:
fn(*args)
return get_clickhouse_query_stats(uuid)
finally:
reset_query_tags()
def get_clickhouse_query_stats(uuid):
client.sync_execute("SYSTEM FLUSH LOGS")
rows = client.sync_execute(
f"""
SELECT
query_duration_ms,
read_rows,
read_bytes,
memory_usage
FROM system.query_log
WHERE
query NOT LIKE '%%query_log%%'
AND query LIKE %(matcher)s
AND type = 'QueryFinish'
""",
{"matcher": f"%benchmark:{uuid}%"},
)
return {
"query_count": len(rows),
"ch_query_time": int(sum(get_column(rows, 0))),
"read_rows": sum(get_column(rows, 1)),
"read_bytes": sum(get_column(rows, 2)),
"memory_usage": sum(get_column(rows, 3)),
}
def benchmark_clickhouse(fn):
@wraps(fn)
def inner(*args):
samples = [run_query(fn, *args)["ch_query_time"] for _ in range(4)]
return {"samples": samples, "number": len(samples)}
return inner
@contextmanager
def no_materialized_columns():
"Allows running a function without any materialized columns being used in query"
get_materialized_columns._cache = {
("events",): (now(), {}),
("person",): (now(), {}),
}
yield
get_materialized_columns._cache = {}
|
uhd | replaymsgpushbutton | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2023 Ettus Research, a National Instruments Brand.
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
import pmt
from gnuradio import gr
from PyQt5 import Qt
class ReplayMsgPushButton(gr.sync_block, Qt.QPushButton):
"""
This block creates a variable push button that creates a message
when clicked. The message will be formatted as a dictionary to pass
to the RFNoC Replay block
"""
def __init__(
self,
lbl,
relBackColor,
relFontColor,
command,
port,
offset=-1,
size=-1,
time=-1,
repeat=False,
):
gr.sync_block.__init__(
self, name="ReplayMsgPushButton", in_sig=None, out_sig=None
)
Qt.QPushButton.__init__(self, lbl)
self.lbl = lbl
self.replayDict = {"command": command}
self.replayDict["port"] = port
self.replayDict["repeat"] = repeat
# If the user does not specify a command offset, size, or time,
# don't add to the command. The properties will be used for
# offset and size, and the command will happen immediately.
if offset != -1:
self.replayDict["offset"] = offset
if size != -1:
self.replayDict["size"] = size
if time != -1:
self.replayDict["time"] = time
styleStr = ""
if relBackColor != "default":
styleStr = "background-color: " + relBackColor + "; "
if relFontColor:
styleStr += "color: " + relFontColor + "; "
self.setStyleSheet(styleStr)
self.clicked[bool].connect(self.onBtnClicked)
self.message_port_register_out(pmt.intern("pressed"))
def set_command(self, command):
self.replayDict["command"] = command
def set_port(self, port):
self.replayDict["port"] = port
def set_size(self, size):
self.replayDict["size"] = size
def set_offset(self, offset):
self.replayDict["offset"] = offset
def set_time(self, time):
self.replayDict["time"] = time
def set_repeat(self, repeat):
self.replayDict["repeat"] = repeat
def onBtnClicked(self, pressed):
self.message_port_pub(pmt.intern("pressed"), pmt.to_pmt(self.replayDict))
|
core | preferencesmanager | #
# Copyright (C) 2008-2010 Andrew Resch <andrewresch@gmail.com>
#
# This file is part of Deluge and is licensed under GNU General Public License 3.0, or later, with
# the additional special exception to link portions of this program with the OpenSSL library.
# See LICENSE for more details.
#
import logging
import os
import platform
import random
import threading
from urllib.parse import quote_plus
from urllib.request import urlopen
import deluge.common
import deluge.component as component
import deluge.configmanager
from deluge._libtorrent import lt
from deluge.event import ConfigValueChangedEvent
from twisted.internet.task import LoopingCall
GeoIP = None
try:
from GeoIP import GeoIP
except ImportError:
try:
from pygeoip import GeoIP
except ImportError:
pass
log = logging.getLogger(__name__)
DEFAULT_PREFS = {
"send_info": False,
"info_sent": 0.0,
"daemon_port": 58846,
"allow_remote": False,
"pre_allocate_storage": False,
"download_location": deluge.common.get_default_download_dir(),
"listen_ports": [6881, 6891],
"listen_interface": "",
"outgoing_interface": "",
"random_port": True,
"listen_random_port": None,
"listen_use_sys_port": False,
"listen_reuse_port": True,
"outgoing_ports": [0, 0],
"random_outgoing_ports": True,
"copy_torrent_file": False,
"del_copy_torrent_file": False,
"torrentfiles_location": deluge.common.get_default_download_dir(),
"plugins_location": os.path.join(deluge.configmanager.get_config_dir(), "plugins"),
"prioritize_first_last_pieces": False,
"sequential_download": False,
"dht": True,
"upnp": True,
"natpmp": True,
"utpex": True,
"lsd": True,
"enc_in_policy": 1,
"enc_out_policy": 1,
"enc_level": 2,
"max_connections_global": 200,
"max_upload_speed": -1.0,
"max_download_speed": -1.0,
"max_upload_slots_global": 4,
"max_half_open_connections": (
lambda: deluge.common.windows_check()
and (lambda: deluge.common.vista_check() and 4 or 8)()
or 50
)(),
"max_connections_per_second": 20,
"ignore_limits_on_local_network": True,
"max_connections_per_torrent": -1,
"max_upload_slots_per_torrent": -1,
"max_upload_speed_per_torrent": -1,
"max_download_speed_per_torrent": -1,
"enabled_plugins": [],
"add_paused": False,
"max_active_seeding": 5,
"max_active_downloading": 3,
"max_active_limit": 8,
"dont_count_slow_torrents": False,
"queue_new_to_top": False,
"stop_seed_at_ratio": False,
"remove_seed_at_ratio": False,
"stop_seed_ratio": 2.00,
"share_ratio_limit": 2.00,
"seed_time_ratio_limit": 7.00,
"seed_time_limit": 180,
"auto_managed": True,
"move_completed": False,
"move_completed_path": deluge.common.get_default_download_dir(),
"move_completed_paths_list": [],
"download_location_paths_list": [],
"path_chooser_show_chooser_button_on_localhost": True,
"path_chooser_auto_complete_enabled": True,
"path_chooser_accelerator_string": "Tab",
"path_chooser_max_popup_rows": 20,
"path_chooser_show_hidden_files": False,
"new_release_check": True,
"proxy": {
"type": 0,
"hostname": "",
"username": "",
"password": "",
"port": 8080,
"proxy_hostnames": True,
"proxy_peer_connections": True,
"proxy_tracker_connections": True,
"force_proxy": False,
"anonymous_mode": False,
},
"peer_tos": "0x00",
"rate_limit_ip_overhead": True,
"geoip_db_location": "/usr/share/GeoIP/GeoIP.dat",
"cache_size": 512,
"cache_expiry": 60,
"auto_manage_prefer_seeds": False,
"shared": False,
"super_seeding": False,
}
class PreferencesManager(component.Component):
def __init__(self):
component.Component.__init__(self, "PreferencesManager")
self.config = deluge.configmanager.ConfigManager("core.conf", DEFAULT_PREFS)
if "proxies" in self.config:
log.warning(
'Updating config file for proxy, using "peer" values to fill new "proxy" setting'
)
self.config["proxy"].update(self.config["proxies"]["peer"])
log.warning("New proxy config is: %s", self.config["proxy"])
del self.config["proxies"]
if "i2p_proxy" in self.config and self.config["i2p_proxy"]["hostname"]:
self.config["proxy"].update(self.config["i2p_proxy"])
self.config["proxy"]["type"] = 6
del self.config["i2p_proxy"]
if "anonymous_mode" in self.config:
self.config["proxy"]["anonymous_mode"] = self.config["anonymous_mode"]
del self.config["anonymous_mode"]
if "proxy" in self.config:
for key in DEFAULT_PREFS["proxy"]:
if key not in self.config["proxy"]:
self.config["proxy"][key] = DEFAULT_PREFS["proxy"][key]
self.core = component.get("Core")
self.new_release_timer = None
def start(self):
# Set the initial preferences on start-up
for key in DEFAULT_PREFS:
self.do_config_set_func(key, self.config[key])
self.config.register_change_callback(self._on_config_value_change)
def stop(self):
if self.new_release_timer and self.new_release_timer.running:
self.new_release_timer.stop()
# Config set functions
def do_config_set_func(self, key, value):
on_set_func = getattr(self, "_on_set_" + key, None)
if on_set_func:
if log.isEnabledFor(logging.DEBUG):
log.debug("Config key: %s set to %s..", key, value)
on_set_func(key, value)
def _on_config_value_change(self, key, value):
if self.get_state() == "Started":
self.do_config_set_func(key, value)
component.get("EventManager").emit(ConfigValueChangedEvent(key, value))
def _on_set_torrentfiles_location(self, key, value):
if self.config["copy_torrent_file"]:
try:
os.makedirs(value)
except OSError as ex:
log.debug("Unable to make directory: %s", ex)
def _on_set_listen_ports(self, key, value):
self.__set_listen_on()
def _on_set_listen_interface(self, key, value):
self.__set_listen_on()
def _on_set_outgoing_interface(self, key, value):
"""Set interface name or IP address for outgoing BitTorrent connections."""
value = value.strip() if value else ""
self.core.apply_session_settings({"outgoing_interfaces": value})
def _on_set_random_port(self, key, value):
self.__set_listen_on()
def __set_listen_on(self):
"""Set the ports and interface address to listen for incoming connections on."""
if self.config["random_port"]:
if not self.config["listen_random_port"]:
self.config["listen_random_port"] = random.randrange(49152, 65525)
listen_ports = [
self.config["listen_random_port"]
] * 2 # use single port range
else:
self.config["listen_random_port"] = None
listen_ports = self.config["listen_ports"]
if self.config["listen_interface"]:
interface = self.config["listen_interface"].strip()
else:
interface = "0.0.0.0"
log.debug(
"Listen Interface: %s, Ports: %s with use_sys_port: %s",
interface,
listen_ports,
self.config["listen_use_sys_port"],
)
interfaces = [
f"{interface}:{port}"
for port in range(listen_ports[0], listen_ports[1] + 1)
]
self.core.apply_session_settings(
{
"listen_system_port_fallback": self.config["listen_use_sys_port"],
"listen_interfaces": ",".join(interfaces),
}
)
def _on_set_outgoing_ports(self, key, value):
self.__set_outgoing_ports()
def _on_set_random_outgoing_ports(self, key, value):
self.__set_outgoing_ports()
def __set_outgoing_ports(self):
port = (
0
if self.config["random_outgoing_ports"]
else self.config["outgoing_ports"][0]
)
if port:
num_ports = (
self.config["outgoing_ports"][1] - self.config["outgoing_ports"][0]
)
num_ports = num_ports if num_ports > 1 else 5
else:
num_ports = 0
log.debug("Outgoing port set to %s with range: %s", port, num_ports)
self.core.apply_session_settings(
{"outgoing_port": port, "num_outgoing_ports": num_ports}
)
def _on_set_peer_tos(self, key, value):
try:
self.core.apply_session_setting("peer_tos", int(value, 16))
except ValueError as ex:
log.error("Invalid tos byte: %s", ex)
def _on_set_dht(self, key, value):
lt_bootstraps = self.core.session.get_settings()["dht_bootstrap_nodes"]
# Update list of lt bootstraps, using set to remove duplicates.
dht_bootstraps = set(
lt_bootstraps.split(",")
+ [
"router.bittorrent.com:6881",
"router.utorrent.com:6881",
"router.bitcomet.com:6881",
"dht.transmissionbt.com:6881",
"dht.aelitis.com:6881",
]
)
self.core.apply_session_settings(
{"dht_bootstrap_nodes": ",".join(dht_bootstraps), "enable_dht": value}
)
def _on_set_upnp(self, key, value):
self.core.apply_session_setting("enable_upnp", value)
def _on_set_natpmp(self, key, value):
self.core.apply_session_setting("enable_natpmp", value)
def _on_set_lsd(self, key, value):
self.core.apply_session_setting("enable_lsd", value)
def _on_set_utpex(self, key, value):
if value:
self.core.session.add_extension("ut_pex")
def _on_set_enc_in_policy(self, key, value):
self._on_set_encryption(key, value)
def _on_set_enc_out_policy(self, key, value):
self._on_set_encryption(key, value)
def _on_set_enc_level(self, key, value):
self._on_set_encryption(key, value)
def _on_set_encryption(self, key, value):
# Convert Deluge enc_level values to libtorrent enc_level values.
pe_enc_level = {
0: lt.enc_level.plaintext,
1: lt.enc_level.rc4,
2: lt.enc_level.both,
}
self.core.apply_session_settings(
{
"out_enc_policy": lt.enc_policy(self.config["enc_out_policy"]),
"in_enc_policy": lt.enc_policy(self.config["enc_in_policy"]),
"allowed_enc_level": lt.enc_level(
pe_enc_level[self.config["enc_level"]]
),
"prefer_rc4": True,
}
)
def _on_set_max_connections_global(self, key, value):
self.core.apply_session_setting("connections_limit", value)
def _on_set_max_upload_speed(self, key, value):
# We need to convert Kb/s to B/s
value = -1 if value < 0 else int(value * 1024)
self.core.apply_session_setting("upload_rate_limit", value)
def _on_set_max_download_speed(self, key, value):
# We need to convert Kb/s to B/s
value = -1 if value < 0 else int(value * 1024)
self.core.apply_session_setting("download_rate_limit", value)
def _on_set_max_upload_slots_global(self, key, value):
self.core.apply_session_setting("unchoke_slots_limit", value)
def _on_set_max_half_open_connections(self, key, value):
self.core.apply_session_setting("half_open_limit", value)
def _on_set_max_connections_per_second(self, key, value):
self.core.apply_session_setting("connection_speed", value)
def _on_set_ignore_limits_on_local_network(self, key, value):
self.core.apply_session_setting("ignore_limits_on_local_network", value)
def _on_set_share_ratio_limit(self, key, value):
# This value is a float percentage in deluge, but libtorrent needs int percentage.
self.core.apply_session_setting("share_ratio_limit", int(value * 100))
def _on_set_seed_time_ratio_limit(self, key, value):
# This value is a float percentage in deluge, but libtorrent needs int percentage.
self.core.apply_session_setting("seed_time_ratio_limit", int(value * 100))
def _on_set_seed_time_limit(self, key, value):
# This value is stored in minutes in deluge, but libtorrent wants seconds
self.core.apply_session_setting("seed_time_limit", int(value * 60))
def _on_set_max_active_downloading(self, key, value):
self.core.apply_session_setting("active_downloads", value)
def _on_set_max_active_seeding(self, key, value):
self.core.apply_session_setting("active_seeds", value)
def _on_set_max_active_limit(self, key, value):
self.core.apply_session_setting("active_limit", value)
def _on_set_dont_count_slow_torrents(self, key, value):
self.core.apply_session_setting("dont_count_slow_torrents", value)
def _on_set_send_info(self, key, value):
"""sends anonymous stats home"""
log.debug("Sending anonymous stats..")
class SendInfoThread(threading.Thread):
def __init__(self, config):
self.config = config
threading.Thread.__init__(self)
def run(self):
import time
now = time.time()
# check if we've done this within the last week or never
if (now - self.config["info_sent"]) >= (60 * 60 * 24 * 7):
try:
url = (
"http://deluge-torrent.org/stats_get.php?processor="
+ platform.machine()
+ "&python="
+ platform.python_version()
+ "&deluge="
+ deluge.common.get_version()
+ "&os="
+ platform.system()
+ "&plugins="
+ quote_plus(":".join(self.config["enabled_plugins"]))
)
urlopen(url)
except OSError as ex:
log.debug("Network error while trying to send info: %s", ex)
else:
self.config["info_sent"] = now
if value:
SendInfoThread(self.config).start()
def _on_set_new_release_check(self, key, value):
if value:
log.debug("Checking for new release..")
threading.Thread(target=self.core.get_new_release).start()
if self.new_release_timer and self.new_release_timer.running:
self.new_release_timer.stop()
# Set a timer to check for a new release every 3 days
self.new_release_timer = LoopingCall(
self._on_set_new_release_check, "new_release_check", True
)
self.new_release_timer.start(72 * 60 * 60, False)
else:
if self.new_release_timer and self.new_release_timer.running:
self.new_release_timer.stop()
def _on_set_proxy(self, key, value):
# Initialise with type none and blank hostnames.
proxy_settings = {
"proxy_type": lt.proxy_type_t.none,
"i2p_hostname": "",
"proxy_hostname": "",
"proxy_hostnames": value["proxy_hostnames"],
"proxy_peer_connections": value["proxy_peer_connections"],
"proxy_tracker_connections": value["proxy_tracker_connections"],
"force_proxy": value["force_proxy"],
"anonymous_mode": value["anonymous_mode"],
}
if value["type"] == lt.proxy_type_t.i2p_proxy:
proxy_settings.update(
{
"proxy_type": lt.proxy_type_t.i2p_proxy,
"i2p_hostname": value["hostname"],
"i2p_port": value["port"],
}
)
elif value["type"] != lt.proxy_type_t.none:
proxy_settings.update(
{
"proxy_type": value["type"],
"proxy_hostname": value["hostname"],
"proxy_port": value["port"],
"proxy_username": value["username"],
"proxy_password": value["password"],
}
)
self.core.apply_session_settings(proxy_settings)
def _on_set_rate_limit_ip_overhead(self, key, value):
self.core.apply_session_setting("rate_limit_ip_overhead", value)
def _on_set_geoip_db_location(self, key, geoipdb_path):
# Load the GeoIP DB for country look-ups if available
if os.path.exists(geoipdb_path):
try:
self.core.geoip_instance = GeoIP(geoipdb_path, 0)
except Exception as ex:
log.warning("GeoIP Unavailable: %s", ex)
else:
log.warning("Unable to find GeoIP database file: %s", geoipdb_path)
def _on_set_cache_size(self, key, value):
self.core.apply_session_setting("cache_size", value)
def _on_set_cache_expiry(self, key, value):
self.core.apply_session_setting("cache_expiry", value)
def _on_auto_manage_prefer_seeds(self, key, value):
self.core.apply_session_setting("auto_manage_prefer_seeds", value)
|
core | scheduler | # -*- coding: utf-8 -*-
import time
from _thread import start_new_thread
from heapq import heappop, heappush
from threading import Lock
from .utils.struct.lock import lock
class AlreadyCalled(Exception):
pass
class Deferred:
def __init__(self):
self.call = []
self.result = ()
def add_callback(self, f, *cargs, **ckwargs):
self.call.append((f, cargs, ckwargs))
def callback(self, *args, **kwargs):
if self.result:
raise AlreadyCalled
self.result = (args, kwargs)
for f, cargs, ckwargs in self.call:
args += tuple(cargs)
kwargs.update(ckwargs)
f(*args**kwargs)
class Scheduler:
def __init__(self, core):
self.pyload = core
self._ = core._
self.queue = PriorityQueue()
def add_job(self, t, call, args=[], kwargs={}, threaded=True):
d = Deferred()
t += time.time()
j = Job(t, call, args, kwargs, d, threaded)
self.queue.put((t, j))
return d
def remove_job(self, d):
"""
:param d: deferred object
:return: if job was deleted
"""
index = -1
for i, j in enumerate(self.queue):
if j[1].deferred == d:
index = i
if index >= 0:
del self.queue[index]
return True
return False
def run(self):
while True:
t, j = self.queue.get()
if not j:
break
else:
if t <= time.time():
j.start()
else:
self.queue.put((t, j))
break
class Job:
def __init__(self, time, call, args=[], kwargs={}, deferred=None, threaded=True):
self.time = float(time)
self.call = call
self.args = args
self.kwargs = kwargs
self.deferred = deferred
self.threaded = threaded
def __lt__(self, other):
return id(self) < id(other)
def run(self):
ret = self.call(*self.args, **self.kwargs)
if self.deferred is None:
return
else:
self.deferred.callback(ret)
def start(self):
if self.threaded:
start_new_thread(self.run, ())
else:
self.run()
class PriorityQueue:
"""
a non blocking priority queue.
"""
def __init__(self):
self.queue = []
self.lock = Lock()
def __iter__(self):
return iter(self.queue)
def __delitem__(self, key):
del self.queue[key]
@lock
def put(self, element):
heappush(self.queue, element)
@lock
def get(self):
"""
return element or None.
"""
try:
el = heappop(self.queue)
return el
except IndexError:
return None, None
|
archiver | compact_cmd | import argparse
from ..constants import * # NOQA
from ..helpers import EXIT_SUCCESS
from ..logger import create_logger
from ..manifest import Manifest
from ._common import Highlander, with_repository
logger = create_logger()
class CompactMixIn:
@with_repository(manifest=False, exclusive=True)
def do_compact(self, args, repository):
"""compact segment files in the repository"""
# see the comment in do_with_lock about why we do it like this:
data = repository.get(Manifest.MANIFEST_ID)
repository.put(Manifest.MANIFEST_ID, data)
threshold = args.threshold / 100
repository.commit(compact=True, threshold=threshold)
return EXIT_SUCCESS
def build_parser_compact(self, subparsers, common_parser, mid_common_parser):
from ._common import process_epilog
compact_epilog = process_epilog(
"""
This command frees repository space by compacting segments.
Use this regularly to avoid running out of space - you do not need to use this
after each borg command though. It is especially useful after deleting archives,
because only compaction will really free repository space.
borg compact does not need a key, so it is possible to invoke it from the
client or also from the server.
Depending on the amount of segments that need compaction, it may take a while,
so consider using the ``--progress`` option.
A segment is compacted if the amount of saved space is above the percentage value
given by the ``--threshold`` option. If omitted, a threshold of 10% is used.
When using ``--verbose``, borg will output an estimate of the freed space.
See :ref:`separate_compaction` in Additional Notes for more details.
"""
)
subparser = subparsers.add_parser(
"compact",
parents=[common_parser],
add_help=False,
description=self.do_compact.__doc__,
epilog=compact_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help="compact segment files / free space in repo",
)
subparser.set_defaults(func=self.do_compact)
subparser.add_argument(
"--threshold",
metavar="PERCENT",
dest="threshold",
type=int,
default=10,
action=Highlander,
help="set minimum threshold for saved space in PERCENT (Default: 10)",
)
|
friture | ringbuffer | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009 Timothée Lecomte
# This file is part of Friture.
#
# Friture is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as published by
# the Free Software Foundation.
#
# Friture is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Friture. If not, see <http://www.gnu.org/licenses/>.
# FIXME problem when self.offset overflows the MAXINT limit !
import logging
from numpy import zeros
class RingBuffer:
def __init__(self):
self.logger = logging.getLogger(__name__)
# buffer length is dynamic based on the needs
self.buffer_length = 10000
self.buffer = zeros((1, 2 * self.buffer_length))
self.offset = 0
def push(self, floatdata):
# update the circular buffer
dim = floatdata.shape[0]
l = floatdata.shape[1]
if dim != self.buffer.shape[0]:
# switched from single to dual channels or vice versa
self.buffer = zeros((dim, 2 * self.buffer_length))
self.grow_if_needed(l)
# first copy, always complete
offset = self.offset % self.buffer_length
self.buffer[:, offset : offset + l] = floatdata[:, :]
# second copy, can be folded
direct = min(l, self.buffer_length - offset)
folded = l - direct
self.buffer[
:, offset + self.buffer_length : offset + self.buffer_length + direct
] = floatdata[:, 0:direct]
self.buffer[:, :folded] = floatdata[:, direct:]
self.offset += l
def data(self, length):
self.grow_if_needed(length)
stop = self.offset % self.buffer_length + self.buffer_length
start = stop - length
while stop > 2 * self.buffer_length:
self.grow_if_needed(stop)
stop = self.offset % self.buffer_length + self.buffer_length
start = stop - length
if start > 2 * self.buffer_length or start < 0:
raise ArithmeticError(
"Start index is wrong %d %d" % (start, self.buffer_length)
)
if stop > 2 * self.buffer_length:
raise ArithmeticError(
"Stop index is larger than buffer size: %d > %d"
% (stop, 2 * self.buffer_length)
)
return self.buffer[:, start:stop]
def data_older(self, length, delay_samples):
self.grow_if_needed(length + delay_samples)
start = (
self.offset - length - delay_samples
) % self.buffer_length + self.buffer_length
stop = start + length
return self.buffer[:, start:stop]
def data_indexed(self, start, length):
delay = self.offset - start
self.grow_if_needed(length + delay)
stop0 = start % self.buffer_length + self.buffer_length
start0 = stop0 - length
if start0 > 2 * self.buffer_length or start0 < 0:
raise ArithmeticError(
"Start index is wrong %d %d" % (start0, self.buffer_length)
)
if stop0 > 2 * self.buffer_length:
raise ArithmeticError(
"Stop index is larger than buffer size: %d > %d"
% (stop0, 2 * self.buffer_length)
)
return self.buffer[:, start0:stop0]
def grow_if_needed(self, length):
if length > self.buffer_length:
# let the buffer grow according to our needs
old_length = self.buffer_length
new_length = int(1.5 * length)
self.logger.info("Ringbuffer: growing buffer for length %d", new_length)
# create new buffer
newbuffer = zeros((self.buffer.shape[0], 2 * new_length))
# copy existing data so that self.offset does not have to be changed
old_offset_mod = self.offset % old_length
new_offset_mod = self.offset % new_length
shift = new_offset_mod - old_offset_mod
# shift can be negative, computing modulo again
shift %= new_length
# first copy, always complete
newbuffer[:, shift : shift + old_length] = self.buffer[:, :old_length]
# second copy, can be folded
direct = min(old_length, new_length - shift)
folded = old_length - direct
newbuffer[
:, new_length + shift : new_length + shift + direct
] = self.buffer[:, :direct]
newbuffer[:, :folded] = self.buffer[:, direct : direct + folded]
# assign self.butter to the new larger buffer
self.buffer = newbuffer
self.buffer_length = new_length
|
commands | send_usage_report | from django.core.management.base import BaseCommand
from posthog.tasks.usage_report import send_all_org_usage_reports
class Command(BaseCommand):
help = "Send the usage report for a given day"
def add_arguments(self, parser):
parser.add_argument(
"--dry-run", type=bool, help="Print information instead of sending it"
)
parser.add_argument(
"--date", type=str, help="The date to be ran in format YYYY-MM-DD"
)
parser.add_argument(
"--event-name",
type=str,
help="Override the event name to be sent - for testing",
)
parser.add_argument(
"--skip-capture-event",
type=str,
help="Skip the posthog capture events - for retrying to billing service",
)
parser.add_argument(
"--organization-id",
type=str,
help="Only send the report for this organization ID",
)
parser.add_argument("--async", type=bool, help="Run the task asynchronously")
def handle(self, *args, **options):
dry_run = options["dry_run"]
date = options["date"]
event_name = options["event_name"]
skip_capture_event = options["skip_capture_event"]
organization_id = options["organization_id"]
run_async = options["async"]
if run_async:
send_all_org_usage_reports.delay(
dry_run,
date,
event_name,
skip_capture_event=skip_capture_event,
only_organization_id=organization_id,
)
else:
send_all_org_usage_reports(
dry_run,
date,
event_name,
skip_capture_event=skip_capture_event,
only_organization_id=organization_id,
)
if dry_run:
print("Dry run so not sent.") # noqa T201
print("Done!") # noqa T201
|
cmd | split | from __future__ import absolute_import, division, print_function
import sys
import time
from binascii import hexlify
from bup import client, compat, git, hashsplit, options
from bup.compat import argv_bytes, environ, nullcontext
from bup.hashsplit import HashSplitter
from bup.helpers import (
add_error,
hostname,
log,
parse_date_or_fatal,
parse_num,
qprogress,
reprogress,
saved_errors,
valid_save_name,
)
from bup.io import byte_stream
from bup.pwdgrp import userfullname, username
optspec = """
bup split [-t] [-c] [-n name] OPTIONS [--git-ids | filenames...]
bup split -b OPTIONS [--git-ids | filenames...]
bup split --copy OPTIONS [--git-ids | filenames...]
bup split --noop [-b|-t] OPTIONS [--git-ids | filenames...]
--
Modes:
b,blobs output a series of blob ids. Implies --fanout=0.
t,tree output a tree id
c,commit output a commit id
n,name= save the result under the given name
noop split the input, but throw away the result
copy split the input, copy it to stdout, don't save to repo
Options:
r,remote= remote repository path
d,date= date for the commit (seconds since the epoch)
q,quiet don't print progress messages
v,verbose increase log output (can be used more than once)
git-ids read a list of git object ids from stdin and split their contents
keep-boundaries don't let one chunk span two input files
bench print benchmark timings to stderr
max-pack-size= maximum bytes in a single pack
max-pack-objects= maximum number of objects in a single pack
fanout= average number of blobs in a single tree
bwlimit= maximum bytes/sec to transmit to server
#,compress= set compression level to # (0-9, 9 is highest) [1]
"""
class NoOpPackWriter:
def __init__(self):
self.closed = False
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def close(self):
self.closed = True
def __del__(self):
assert self.closed
def new_blob(self, content):
return git.calc_hash(b"blob", content)
def new_tree(self, shalist):
return git.calc_hash(b"tree", git.tree_encode(shalist))
def opts_from_cmdline(argv):
o = options.Options(optspec)
opt, flags, extra = o.parse_bytes(argv[1:])
opt.sources = extra
if opt.name:
opt.name = argv_bytes(opt.name)
if opt.remote:
opt.remote = argv_bytes(opt.remote)
if opt.verbose is None:
opt.verbose = 0
if not (opt.blobs or opt.tree or opt.commit or opt.name or opt.noop or opt.copy):
o.fatal("use one or more of -b, -t, -c, -n, --noop, --copy")
if opt.copy and (opt.blobs or opt.tree):
o.fatal("--copy is incompatible with -b, -t")
if (opt.noop or opt.copy) and (opt.commit or opt.name):
o.fatal("--noop and --copy are incompatible with -c, -n")
if opt.blobs and (opt.tree or opt.commit or opt.name):
o.fatal("-b is incompatible with -t, -c, -n")
if extra and opt.git_ids:
o.fatal("don't provide filenames when using --git-ids")
if opt.verbose >= 2:
git.verbose = opt.verbose - 1
opt.bench = 1
if opt.max_pack_size:
opt.max_pack_size = parse_num(opt.max_pack_size)
if opt.max_pack_objects:
opt.max_pack_objects = parse_num(opt.max_pack_objects)
if opt.fanout:
opt.fanout = parse_num(opt.fanout)
if opt.bwlimit:
opt.bwlimit = parse_num(opt.bwlimit)
if opt.date:
opt.date = parse_date_or_fatal(opt.date, o.fatal)
else:
opt.date = time.time()
opt.is_reverse = environ.get(b"BUP_SERVER_REVERSE")
if opt.is_reverse and opt.remote:
o.fatal("don't use -r in reverse mode; it's automatic")
if opt.name and not valid_save_name(opt.name):
o.fatal("'%r' is not a valid branch name." % opt.name)
return opt
def split(opt, files, parent, out, pack_writer):
# Hack around lack of nonlocal vars in python 2
total_bytes = [0]
def prog(filenum, nbytes):
total_bytes[0] += nbytes
if filenum > 0:
qprogress(
"Splitting: file #%d, %d kbytes\r"
% (filenum + 1, total_bytes[0] // 1024)
)
else:
qprogress("Splitting: %d kbytes\r" % (total_bytes[0] // 1024))
new_blob = pack_writer.new_blob
new_tree = pack_writer.new_tree
if opt.blobs:
shalist = hashsplit.split_to_blobs(
new_blob, files, keep_boundaries=opt.keep_boundaries, progress=prog
)
for sha, size, level in shalist:
out.write(hexlify(sha) + b"\n")
reprogress()
elif opt.tree or opt.commit or opt.name:
if opt.name: # insert dummy_name which may be used as a restore target
mode, sha = hashsplit.split_to_blob_or_tree(
new_blob,
new_tree,
files,
keep_boundaries=opt.keep_boundaries,
progress=prog,
)
splitfile_name = git.mangle_name(b"data", hashsplit.GIT_MODE_FILE, mode)
shalist = [(mode, splitfile_name, sha)]
else:
shalist = hashsplit.split_to_shalist(
new_blob,
new_tree,
files,
keep_boundaries=opt.keep_boundaries,
progress=prog,
)
tree = new_tree(shalist)
else:
last = 0
for blob, level in HashSplitter(
files,
progress=prog,
keep_boundaries=opt.keep_boundaries,
bits=hashsplit.BUP_BLOBBITS,
fanbits=hashsplit.fanbits(),
):
hashsplit.total_split += len(blob)
if opt.copy:
sys.stdout.write(str(blob))
megs = hashsplit.total_split // 1024 // 1024
if not opt.quiet and last != megs:
last = megs
if opt.verbose:
log("\n")
if opt.tree:
out.write(hexlify(tree) + b"\n")
commit = None
if opt.commit or opt.name:
msg = b"bup split\n\nGenerated by command:\n%r\n" % compat.get_argvb()
userline = b"%s <%s@%s>" % (userfullname(), username(), hostname())
commit = pack_writer.new_commit(
tree, parent, userline, opt.date, None, userline, opt.date, None, msg
)
if opt.commit:
out.write(hexlify(commit) + b"\n")
return commit
def main(argv):
opt = opts_from_cmdline(argv)
if opt.verbose >= 2:
git.verbose = opt.verbose - 1
if opt.fanout:
hashsplit.fanout = opt.fanout
if opt.blobs:
hashsplit.fanout = 0
if opt.bwlimit:
client.bwlimit = opt.bwlimit
start_time = time.time()
sys.stdout.flush()
out = byte_stream(sys.stdout)
stdin = byte_stream(sys.stdin)
if opt.git_ids:
# the input is actually a series of git object ids that we should retrieve
# and split.
#
# This is a bit messy, but basically it converts from a series of
# CatPipe.get() iterators into a series of file-type objects.
# It would be less ugly if either CatPipe.get() returned a file-like object
# (not very efficient), or split_to_shalist() expected an iterator instead
# of a file.
cp = git.CatPipe()
class IterToFile:
def __init__(self, it):
self.it = iter(it)
def read(self, size):
v = next(self.it, None)
return v or b""
def read_ids():
while 1:
line = stdin.readline()
if not line:
break
if line:
line = line.strip()
try:
it = cp.get(line.strip())
next(it, None) # skip the file info
except KeyError as e:
add_error("error: %s" % e)
continue
yield IterToFile(it)
files = read_ids()
else:
# the input either comes from a series of files or from stdin.
if opt.sources:
files = (open(argv_bytes(fn), "rb") for fn in opt.sources)
else:
files = [stdin]
writing = not (opt.noop or opt.copy)
remote_dest = opt.remote or opt.is_reverse
if writing:
git.check_repo_or_die()
if remote_dest and writing:
cli = repo = client.Client(opt.remote)
else:
cli = nullcontext()
repo = git
# cli creation must be last nontrivial command in each if clause above
with cli:
if opt.name and writing:
refname = opt.name and b"refs/heads/%s" % opt.name
oldref = repo.read_ref(refname)
else:
refname = oldref = None
if not writing:
pack_writer = NoOpPackWriter()
elif not remote_dest:
pack_writer = git.PackWriter(
compression_level=opt.compress,
max_pack_size=opt.max_pack_size,
max_pack_objects=opt.max_pack_objects,
)
else:
pack_writer = cli.new_packwriter(
compression_level=opt.compress,
max_pack_size=opt.max_pack_size,
max_pack_objects=opt.max_pack_objects,
)
# packwriter creation must be last command in each if clause above
with pack_writer:
commit = split(opt, files, oldref, out, pack_writer)
# pack_writer must be closed before we can update the ref
if refname:
repo.update_ref(refname, commit, oldref)
secs = time.time() - start_time
size = hashsplit.total_split
if opt.bench:
log(
"bup: %.2f kbytes in %.2f secs = %.2f kbytes/sec\n"
% (size / 1024, secs, size / 1024 / secs)
)
if saved_errors:
log("WARNING: %d errors encountered while saving.\n" % len(saved_errors))
sys.exit(1)
|
versions | 060_31ad11c518fc_add_system_info_table | # encoding: utf-8
"""060 Add system info table
Revision ID: 31ad11c518fc
Revises: 9291bb46f352
Create Date: 2018-09-04 18:49:09.587220
"""
import sqlalchemy as sa
from alembic import op
from ckan.migration import skip_based_on_legacy_engine_version
# revision identifiers, used by Alembic.
revision = "31ad11c518fc"
down_revision = "9291bb46f352"
branch_labels = None
depends_on = None
def upgrade():
if skip_based_on_legacy_engine_version(op, __name__):
return
op.create_table(
"system_info",
sa.Column("id", sa.Integer, primary_key=True, nullable=False),
sa.Column("key", sa.Unicode(100), unique=True, nullable=False),
sa.Column("value", sa.UnicodeText),
sa.Column("revision_id", sa.UnicodeText, sa.ForeignKey("revision.id")),
)
op.create_table(
"system_info_revision",
sa.Column("id", sa.Integer, primary_key=True, nullable=False),
sa.Column("key", sa.Unicode(100), unique=True, nullable=False),
sa.Column("value", sa.UnicodeText),
sa.Column(
"revision_id",
sa.UnicodeText,
sa.ForeignKey("revision.id"),
primary_key=True,
),
sa.Column("continuity_id", sa.Integer, sa.ForeignKey("system_info.id")),
)
def downgrade():
op.drop_table("system_info_revision")
op.drop_table("system_info")
|
connectors | abstract_connector | """ functionality outline for a book data connector """
from __future__ import annotations
import asyncio
import imghdr
import logging
import re
from abc import ABC, abstractmethod
from typing import Any, Callable, Iterator, Optional, TypedDict, Union
from urllib.parse import quote_plus
import aiohttp
import requests
from bookwyrm import activitypub, models, settings
from bookwyrm.settings import USER_AGENT
from django.core.files.base import ContentFile
from django.db import transaction
from requests.exceptions import RequestException
from ..book_search import SearchResult
from .connector_manager import ConnectorException, load_more_data, raise_not_valid_url
from .format_mappings import format_mappings
logger = logging.getLogger(__name__)
JsonDict = dict[str, Any]
class ConnectorResults(TypedDict):
"""TypedDict for results returned by connector"""
connector: AbstractMinimalConnector
results: list[SearchResult]
class AbstractMinimalConnector(ABC):
"""just the bare bones, for other bookwyrm instances"""
def __init__(self, identifier: str):
# load connector settings
info = models.Connector.objects.get(identifier=identifier)
self.connector = info
# the things in the connector model to copy over
self.base_url = info.base_url
self.books_url = info.books_url
self.covers_url = info.covers_url
self.search_url = info.search_url
self.isbn_search_url = info.isbn_search_url
self.name = info.name
self.identifier = info.identifier
def get_search_url(self, query: str) -> str:
"""format the query url"""
# Check if the query resembles an ISBN
if maybe_isbn(query) and self.isbn_search_url and self.isbn_search_url != "":
# Up-case the ISBN string to ensure any 'X' check-digit is correct
# If the ISBN has only 9 characters, prepend missing zero
normalized_query = query.strip().upper().rjust(10, "0")
return f"{self.isbn_search_url}{normalized_query}"
# NOTE: previously, we tried searching isbn and if that produces no results,
# searched as free text. This, instead, only searches isbn if it's isbn-y
return f"{self.search_url}{quote_plus(query)}"
def process_search_response(
self, query: str, data: Any, min_confidence: float
) -> list[SearchResult]:
"""Format the search results based on the format of the query"""
if maybe_isbn(query):
return list(self.parse_isbn_search_data(data))[:10]
return list(self.parse_search_data(data, min_confidence))[:10]
async def get_results(
self,
session: aiohttp.ClientSession,
url: str,
min_confidence: float,
query: str,
) -> Optional[ConnectorResults]:
"""try this specific connector"""
# pylint: disable=line-too-long
headers = {
"Accept": (
'application/json, application/activity+json, application/ld+json; profile="https://www.w3.org/ns/activitystreams"; charset=utf-8'
),
"User-Agent": USER_AGENT,
}
params = {"min_confidence": min_confidence}
try:
async with session.get(url, headers=headers, params=params) as response:
if not response.ok:
logger.info("Unable to connect to %s: %s", url, response.reason)
return None
try:
raw_data = await response.json()
except aiohttp.client_exceptions.ContentTypeError as err:
logger.exception(err)
return None
return ConnectorResults(
connector=self,
results=self.process_search_response(
query, raw_data, min_confidence
),
)
except asyncio.TimeoutError:
logger.info("Connection timed out for url: %s", url)
except aiohttp.ClientError as err:
logger.info(err)
return None
@abstractmethod
def get_or_create_book(self, remote_id: str) -> Optional[models.Book]:
"""pull up a book record by whatever means possible"""
@abstractmethod
def parse_search_data(
self, data: Any, min_confidence: float
) -> Iterator[SearchResult]:
"""turn the result json from a search into a list"""
@abstractmethod
def parse_isbn_search_data(self, data: Any) -> Iterator[SearchResult]:
"""turn the result json from a search into a list"""
class AbstractConnector(AbstractMinimalConnector):
"""generic book data connector"""
generated_remote_link_field = ""
def __init__(self, identifier: str):
super().__init__(identifier)
# fields we want to look for in book data to copy over
# title we handle separately.
self.book_mappings: list[Mapping] = []
self.author_mappings: list[Mapping] = []
def get_or_create_book(self, remote_id: str) -> Optional[models.Book]:
"""translate arbitrary json into an Activitypub dataclass"""
# first, check if we have the origin_id saved
existing = models.Edition.find_existing_by_remote_id(
remote_id
) or models.Work.find_existing_by_remote_id(remote_id)
if existing:
if hasattr(existing, "default_edition") and isinstance(
existing.default_edition, models.Edition
):
return existing.default_edition
return existing
# load the json data from the remote data source
data = self.get_book_data(remote_id)
if self.is_work_data(data):
try:
edition_data = self.get_edition_from_work_data(data)
except (KeyError, ConnectorException):
# hack: re-use the work data as the edition data
# this is why remote ids aren't necessarily unique
edition_data = data
work_data = data
else:
edition_data = data
try:
work_data = self.get_work_from_edition_data(data)
except (KeyError, ConnectorException) as err:
logger.info(err)
work_data = data
if not work_data or not edition_data:
raise ConnectorException(f"Unable to load book data: {remote_id}")
with transaction.atomic():
# create activitypub object
work_activity = activitypub.Work(
**dict_from_mappings(work_data, self.book_mappings)
)
# this will dedupe automatically
work = work_activity.to_model(model=models.Work, overwrite=False)
if not work:
return None
for author in self.get_authors_from_data(work_data):
work.authors.add(author)
edition = self.create_edition_from_data(work, edition_data)
load_more_data.delay(self.connector.id, work.id)
return edition
def get_book_data(self, remote_id: str) -> JsonDict: # pylint: disable=no-self-use
"""this allows connectors to override the default behavior"""
return get_data(remote_id)
def create_edition_from_data(
self,
work: models.Work,
edition_data: Union[str, JsonDict],
instance: Optional[models.Edition] = None,
) -> Optional[models.Edition]:
"""if we already have the work, we're ready"""
if isinstance(edition_data, str):
# We don't expect a string here
return None
mapped_data = dict_from_mappings(edition_data, self.book_mappings)
mapped_data["work"] = work.remote_id
edition_activity = activitypub.Edition(**mapped_data)
edition = edition_activity.to_model(
model=models.Edition, overwrite=False, instance=instance
)
if not edition:
return None
# if we're updating an existing instance, we don't need to load authors
if instance:
return edition
if not edition.connector:
edition.connector = self.connector
edition.save(broadcast=False, update_fields=["connector"])
for author in self.get_authors_from_data(edition_data):
edition.authors.add(author)
# use the authors from the work if none are found for the edition
if not edition.authors.exists() and work.authors.exists():
edition.authors.set(work.authors.all())
return edition
def get_or_create_author(
self, remote_id: str, instance: Optional[models.Author] = None
) -> Optional[models.Author]:
"""load that author"""
if not instance:
existing = models.Author.find_existing_by_remote_id(remote_id)
if existing:
return existing
data = self.get_book_data(remote_id)
mapped_data = dict_from_mappings(data, self.author_mappings)
try:
activity = activitypub.Author(**mapped_data)
except activitypub.ActivitySerializerError:
return None
# this will dedupe
return activity.to_model(
model=models.Author, overwrite=False, instance=instance
)
def get_remote_id_from_model(self, obj: models.BookDataModel) -> Optional[str]:
"""given the data stored, how can we look this up"""
remote_id: Optional[str] = getattr(obj, self.generated_remote_link_field)
return remote_id
def update_author_from_remote(self, obj: models.Author) -> Optional[models.Author]:
"""load the remote data from this connector and add it to an existing author"""
remote_id = self.get_remote_id_from_model(obj)
if not remote_id:
return None
return self.get_or_create_author(remote_id, instance=obj)
def update_book_from_remote(self, obj: models.Edition) -> Optional[models.Edition]:
"""load the remote data from this connector and add it to an existing book"""
remote_id = self.get_remote_id_from_model(obj)
if not remote_id:
return None
data = self.get_book_data(remote_id)
return self.create_edition_from_data(obj.parent_work, data, instance=obj)
@abstractmethod
def is_work_data(self, data: JsonDict) -> bool:
"""differentiate works and editions"""
@abstractmethod
def get_edition_from_work_data(self, data: JsonDict) -> JsonDict:
"""every work needs at least one edition"""
@abstractmethod
def get_work_from_edition_data(self, data: JsonDict) -> JsonDict:
"""every edition needs a work"""
@abstractmethod
def get_authors_from_data(self, data: JsonDict) -> Iterator[models.Author]:
"""load author data"""
@abstractmethod
def expand_book_data(self, book: models.Book) -> None:
"""get more info on a book"""
def dict_from_mappings(data: JsonDict, mappings: list[Mapping]) -> JsonDict:
"""create a dict in Activitypub format, using mappings supplies by
the subclass"""
result: JsonDict = {}
for mapping in mappings:
# sometimes there are multiple mappings for one field, don't
# overwrite earlier writes in that case
if mapping.local_field in result and result[mapping.local_field]:
continue
result[mapping.local_field] = mapping.get_value(data)
return result
def get_data(
url: str,
params: Optional[dict[str, str]] = None,
timeout: int = settings.QUERY_TIMEOUT,
) -> JsonDict:
"""wrapper for request.get"""
# check if the url is blocked
raise_not_valid_url(url)
try:
resp = requests.get(
url,
params=params,
headers={ # pylint: disable=line-too-long
"Accept": (
'application/json, application/activity+json, application/ld+json; profile="https://www.w3.org/ns/activitystreams"; charset=utf-8'
),
"User-Agent": settings.USER_AGENT,
},
timeout=timeout,
)
except RequestException as err:
logger.info(err)
raise ConnectorException(err)
if not resp.ok:
if resp.status_code == 401:
# this is probably an AUTHORIZED_FETCH issue
resp.raise_for_status()
else:
raise ConnectorException()
try:
data = resp.json()
except ValueError as err:
logger.info(err)
raise ConnectorException(err)
if not isinstance(data, dict):
raise ConnectorException("Unexpected data format")
return data
def get_image(
url: str, timeout: int = 10
) -> Union[tuple[ContentFile[bytes], str], tuple[None, None]]:
"""wrapper for requesting an image"""
raise_not_valid_url(url)
try:
resp = requests.get(
url,
headers={
"User-Agent": settings.USER_AGENT,
},
timeout=timeout,
)
except RequestException as err:
logger.info(err)
return None, None
if not resp.ok:
return None, None
image_content = ContentFile(resp.content)
extension = imghdr.what(None, image_content.read())
if not extension:
logger.info("File requested was not an image: %s", url)
return None, None
return image_content, extension
class Mapping:
"""associate a local database field with a field in an external dataset"""
def __init__(
self,
local_field: str,
remote_field: Optional[str] = None,
formatter: Optional[Callable[[Any], Any]] = None,
):
noop = lambda x: x
self.local_field = local_field
self.remote_field = remote_field or local_field
self.formatter = formatter or noop
def get_value(self, data: JsonDict) -> Optional[Any]:
"""pull a field from incoming json and return the formatted version"""
value = data.get(self.remote_field)
if not value:
return None
try:
return self.formatter(value)
except: # pylint: disable=bare-except
return None
def infer_physical_format(format_text: str) -> Optional[str]:
"""try to figure out what the standardized format is from the free value"""
format_text = format_text.lower()
if format_text in format_mappings:
# try a direct match
return format_mappings[format_text]
# failing that, try substring
matches = [v for k, v in format_mappings.items() if k in format_text]
if not matches:
return None
return matches[0]
def unique_physical_format(format_text: str) -> Optional[str]:
"""only store the format if it isn't directly in the format mappings"""
format_text = format_text.lower()
if format_text in format_mappings:
# try a direct match, so saving this would be redundant
return None
return format_text
def maybe_isbn(query: str) -> bool:
"""check if a query looks like an isbn"""
isbn = re.sub(r"[\W_]", "", query) # removes filler characters
# ISBNs must be numeric except an ISBN10 checkdigit can be 'X'
if not isbn.upper().rstrip("X").isnumeric():
return False
return len(isbn) in [
9,
10,
13,
] # ISBN10 or ISBN13, or maybe ISBN10 missing a leading zero
|
PyObjCTest | test_keyvalue | """
Tests for the Key-Value Coding for hybrid python objects.
NOTE: Testcases here should be synchronized with the Key-Value Coding tests
in PyObjCTools.test.test_keyvalue and objc.test.test_keyvalue.
TODO:
- This test uses C code, that code should be added to this package!
- Tests that access properties in the parent Objective-C class!
- More key-error tests, the tests don't cover all relevant code yet.
"""
import sys
import objc
from Foundation import *
from PyObjCTest.testhelper import PyObjC_TestClass3 as STUB
from PyObjCTools.TestSupport import *
class KeyValueClass1(NSObject):
def init(self):
self = super(KeyValueClass1, self).init()
self.key3 = 3
self._key4 = b"4".decode("ascii")
self.__private = b"private".decode("ascii")
return self
def addMultiple(self):
self.multiple = KeyValueClass1.alloc().init()
self.multiple.level2 = KeyValueClass1.alloc().init()
self.multiple.level2.level3 = KeyValueClass1.alloc().init()
self.multiple.level2.level3.keyA = b"hello".decode("ascii")
self.multiple.level2.level3.keyB = b"world".decode("ascii")
def getKey1(self):
return 1
def key2(self):
return 2
def setKey4_(self, value):
self._key4 = value * 4
def setKey5_(self, value):
self.key5 = value * 5
def keyRaisingValueError(self):
raise ValueError("42")
def keyRaisingNSUnknownKeyException(self):
return self.valueForKey_("thisKeyDoesNotExist")
def keyReturningSameSlector(self):
return self.keyReturningSameSelector
def keyReturningOtherSelector(self):
return self.getKey1
class KeyValueClass1Explicit(NSObject):
def init(self):
self = super(KeyValueClass1Explicit, self).init()
self._values = {}
self._values[b"key3".decode("ascii")] = 3
self._values[b"key4".decode("ascii")] = b"4".decode("ascii")
self._values["_private"] = b"private".decode("ascii")
return self
def addMultiple(self):
self._values["multiple"] = KeyValueClass1Explicit.alloc().init()
self._values["multiple"]._values[
"level2"
] = KeyValueClass1Explicit.alloc().init()
self._values["multiple"]._values["level2"]._values[
"level3"
] = KeyValueClass1Explicit.alloc().init()
self._values["multiple"]._values["level2"]._values["level3"]._values[
"keyA"
] = b"hello".decode("ascii")
self._values["multiple"]._values["level2"]._values["level3"]._values[
"keyB"
] = b"world".decode("ascii")
def valueForKey_(self, key):
if key == "key1":
return 1
elif key == "key2":
return 2
return self._values[key]
def storedValueForKey_(self, key):
return self.valueForKey_(key)
def setValue_forKey_(self, value, key):
if key == "key4":
value = value * 4
elif key == "key5":
value = value * 5
self._values[key] = value
def takeStoredValue_forKey_(self, value, key):
self.setValue_forKey_(value, key)
def takeValue_forKey_(self, value, key):
self.setValue_forKey_(value, key)
class KeyValueClass4(NSObject):
__slots__ = ("foo",)
def init(self):
self = super(KeyValueClass4, self).init()
self.foo = b"foobar".decode("ascii")
return self
# Definition for property 'bar'. Use odd names for the methods
# because the KeyValue support recognizes the usual names.
def read_bar(self):
return self.foo + self.foo
def write_bar(self, value):
self.foo = value + value
bar = property(read_bar, write_bar)
roprop = property(lambda self: b"read-only".decode("ascii"))
class KVOClass(NSObject):
def automaticallyNotifiesObserversForKey_(self, aKey):
return objc.NO
def test(self):
return b"test".decode("ascii")
class KeyValueObserver(NSObject):
def init(self):
self.observed = []
return self
def observeValueForKeyPath_ofObject_change_context_(
self, keyPath, object, change, context
):
self.observed.append((keyPath, object, change))
class PyKeyValueCoding(TestCase):
def testNoPrivateVars(self):
# Private instance variables ('anObject.__value') are not accessible using
# key-value coding.
o = KeyValueClass1.alloc().init()
self.assertRaises(
KeyError, STUB.keyValue_forObject_key_, 0, o, b"private".decode("ascii")
)
def testValueForKey(self):
o = KeyValueClass1.alloc().init()
o.addMultiple()
self.assertEqual(STUB.keyValue_forObject_key_(0, o, b"key1".decode("ascii")), 1)
self.assertEqual(STUB.keyValue_forObject_key_(0, o, b"key2".decode("ascii")), 2)
self.assertEqual(STUB.keyValue_forObject_key_(0, o, b"key3".decode("ascii")), 3)
self.assertEqual(
STUB.keyValue_forObject_key_(0, o, b"key4".decode("ascii")), "4"
)
self.assertEqual(
STUB.keyValue_forObject_key_(0, o, b"multiple".decode("ascii")), o.multiple
)
self.assertRaises(
KeyError, STUB.keyValue_forObject_key_, 0, o, b"nokey".decode("ascii")
)
self.assertRaises(
ValueError,
STUB.keyValue_forObject_key_,
0,
o,
b"keyRaisingValueError".decode("ascii"),
)
self.assertRaises(
KeyError,
STUB.keyValue_forObject_key_,
0,
o,
b"keyRaisingNSUnknownKeyException".decode("ascii"),
)
self.assertRaises(
KeyError,
STUB.keyValue_forObject_key_,
0,
o,
b"keyReturningSameSelector".decode("ascii"),
)
obj = STUB.keyValue_forObject_key_(
0, o, b"keyReturningOtherSelector".decode("ascii")
)
self.assertIsInstance(obj, objc.selector)
self.assertEqual(obj.selector, b"getKey1")
self.assertIs(obj.self, o)
def testValueForKey2(self):
o = KeyValueClass4.alloc().init()
self.assertEqual(
STUB.keyValue_forObject_key_(0, o, b"foo".decode("ascii")),
b"foobar".decode("ascii"),
)
self.assertEqual(
STUB.keyValue_forObject_key_(0, o, b"bar".decode("ascii")),
b"foobarfoobar".decode("ascii"),
)
self.assertEqual(
STUB.keyValue_forObject_key_(0, o, b"roprop".decode("ascii")),
b"read-only".decode("ascii"),
)
def testStoredValueForKey(self):
o = KeyValueClass1.alloc().init()
o.addMultiple()
self.assertEqual(STUB.keyValue_forObject_key_(2, o, b"key1".decode("ascii")), 1)
self.assertEqual(STUB.keyValue_forObject_key_(2, o, b"key2".decode("ascii")), 2)
self.assertEqual(STUB.keyValue_forObject_key_(2, o, b"key3".decode("ascii")), 3)
self.assertEqual(
STUB.keyValue_forObject_key_(2, o, b"key4".decode("ascii")), "4"
)
self.assertEqual(
STUB.keyValue_forObject_key_(2, o, b"multiple".decode("ascii")), o.multiple
)
self.assertRaises(
KeyError, STUB.keyValue_forObject_key_, 2, o, b"nokey".decode("ascii")
)
def testStoredValueForKey2(self):
o = KeyValueClass4.alloc().init()
self.assertEqual(
STUB.keyValue_forObject_key_(2, o, b"foo".decode("ascii")),
b"foobar".decode("ascii"),
)
self.assertEqual(
STUB.keyValue_forObject_key_(2, o, b"bar".decode("ascii")),
b"foobarfoobar".decode("ascii"),
)
self.assertEqual(
STUB.keyValue_forObject_key_(2, o, b"roprop".decode("ascii")),
b"read-only".decode("ascii"),
)
def testValueForKeyPath(self):
o = KeyValueClass1.alloc().init()
o.addMultiple()
self.assertEqual(
STUB.keyValue_forObject_key_(1, o, b"multiple".decode("ascii")), o.multiple
)
self.assertEqual(
STUB.keyValue_forObject_key_(1, o, b"multiple.level2".decode("ascii")),
o.multiple.level2,
)
self.assertEqual(
STUB.keyValue_forObject_key_(
1, o, b"multiple.level2.level3.keyA".decode("ascii")
),
o.multiple.level2.level3.keyA,
)
self.assertEqual(
STUB.keyValue_forObject_key_(
1, o, b"multiple.level2.level3.keyB".decode("ascii")
),
o.multiple.level2.level3.keyB,
)
self.assertRaises(
KeyError,
STUB.keyValue_forObject_key_,
1,
o,
b"multiple.level2.nokey".decode("ascii"),
)
@max_os_level("10.5")
def testValuesForKeys(self):
o = KeyValueClass1.alloc().init()
self.assertEqual(
STUB.keyValue_forObject_key_(
3,
o,
[
b"key1".decode("ascii"),
b"key2".decode("ascii"),
b"key3".decode("ascii"),
b"key4".decode("ascii"),
],
),
{
b"key1".decode("ascii"): 1,
b"key2".decode("ascii"): 2,
b"key3".decode("ascii"): 3,
b"key4".decode("ascii"): b"4".decode("ascii"),
},
)
self.assertRaises(
KeyError,
STUB.keyValue_forObject_key_,
3,
o,
[
b"key1".decode("ascii"),
b"key3".decode("ascii"),
b"nosuchkey".decode("ascii"),
],
)
@max_os_level("10.5")
def testTakeValueForKey(self):
o = KeyValueClass1.alloc().init()
self.assertEqual(o.key3, 3)
STUB.setKeyValue_forObject_key_value_(
0, o, b"key3".decode("ascii"), b"drie".decode("ascii")
)
self.assertEqual(o.key3, b"drie".decode("ascii"))
self.assertEqual(o._key4, b"4".decode("ascii"))
STUB.setKeyValue_forObject_key_value_(
0, o, b"key4".decode("ascii"), b"vier".decode("ascii")
)
self.assert_(not hasattr(o, b"key4".decode("ascii")))
self.assertEqual(o._key4, b"viervierviervier".decode("ascii"))
o.key5 = 1
STUB.setKeyValue_forObject_key_value_(
0, o, b"key5".decode("ascii"), b"V".decode("ascii")
)
self.assertEqual(o.key5, b"VVVVV".decode("ascii"))
self.assert_(not hasattr(o, b"key9".decode("ascii")))
STUB.setKeyValue_forObject_key_value_(
0, o, b"key9".decode("ascii"), b"IX".decode("ascii")
)
self.assert_(hasattr(o, b"key9".decode("ascii")))
self.assertEqual(o.key9, b"IX".decode("ascii"))
@max_os_level("10.5")
def testTakeValueForKey2(self):
o = KeyValueClass4.alloc().init()
self.assertEqual(o.foo, b"foobar".decode("ascii"))
STUB.setKeyValue_forObject_key_value_(
0, o, b"foo".decode("ascii"), b"FOO".decode("ascii")
)
self.assertEqual(o.foo, b"FOO".decode("ascii"))
self.assertRaises(
KeyError,
STUB.setKeyValue_forObject_key_value_,
0,
o,
b"key9".decode("ascii"),
b"IX".decode("ascii"),
)
def testTakeStoredValueForKey(self):
o = KeyValueClass1.alloc().init()
self.assertEqual(o.key3, 3)
STUB.setKeyValue_forObject_key_value_(
2, o, b"key3".decode("ascii"), b"drie".decode("ascii")
)
self.assertEqual(o.key3, b"drie".decode("ascii"))
self.assertEqual(o._key4, b"4".decode("ascii"))
STUB.setKeyValue_forObject_key_value_(
2, o, b"key4".decode("ascii"), b"vier".decode("ascii")
)
self.assertEqual(o._key4, b"viervierviervier".decode("ascii"))
o.key5 = 1
STUB.setKeyValue_forObject_key_value_(
2, o, b"key5".decode("ascii"), b"V".decode("ascii")
)
self.assertEqual(o.key5, b"VVVVV".decode("ascii"))
self.assert_(not hasattr(o, b"key9".decode("ascii")))
STUB.setKeyValue_forObject_key_value_(
2, o, b"key9".decode("ascii"), b"IX".decode("ascii")
)
self.assert_(hasattr(o, b"key9".decode("ascii")))
self.assertEqual(o.key9, b"IX".decode("ascii"))
def testStoredTakeValueForKey2(self):
o = KeyValueClass4.alloc().init()
self.assertEqual(o.foo, b"foobar".decode("ascii"))
STUB.setKeyValue_forObject_key_value_(
2, o, b"foo".decode("ascii"), b"FOO".decode("ascii")
)
self.assertEqual(o.foo, b"FOO".decode("ascii"))
self.assertRaises(
KeyError,
STUB.setKeyValue_forObject_key_value_,
2,
o,
b"key9".decode("ascii"),
b"IX".decode("ascii"),
)
self.assertRaises(
KeyError,
STUB.setKeyValue_forObject_key_value_,
2,
o,
b"roprop".decode("ascii"),
b"IX".decode("ascii"),
)
@max_os_level("10.5")
def testTakeValuesFromDictionary(self):
o = KeyValueClass1.alloc().init()
self.assertEqual(o.key3, 3)
self.assertEqual(o._key4, b"4".decode("ascii"))
o.key5 = 1
self.assert_(not hasattr(o, b"key9".decode("ascii")))
STUB.setKeyValue_forObject_key_value_(
3,
o,
None,
{
b"key3".decode("ascii"): b"drie".decode("ascii"),
b"key4".decode("ascii"): b"vier".decode("ascii"),
b"key5".decode("ascii"): b"V".decode("ascii"),
b"key9".decode("ascii"): b"IX".decode("ascii"),
},
)
self.assertEqual(o.key3, b"drie".decode("ascii"))
self.assertEqual(o._key4, b"viervierviervier".decode("ascii"))
self.assertEqual(o.key5, b"VVVVV".decode("ascii"))
self.assert_(hasattr(o, b"key9".decode("ascii")))
self.assertEqual(o.key9, b"IX".decode("ascii"))
@max_os_level("10.5")
def testTakeValuesFromDictionary2(self):
o = KeyValueClass4.alloc().init()
self.assertEqual(o.foo, b"foobar".decode("ascii"))
STUB.setKeyValue_forObject_key_value_(
3, o, None, {b"foo".decode("ascii"): b"FOO".decode("ascii")}
)
self.assertEqual(o.foo, b"FOO".decode("ascii"))
self.assertRaises(
KeyError,
STUB.setKeyValue_forObject_key_value_,
3,
o,
None,
{b"key9".decode("ascii"): b"IX".decode("ascii")},
)
self.assertRaises(
KeyError,
STUB.setKeyValue_forObject_key_value_,
3,
o,
None,
{b"roprop".decode("ascii"): b"IX".decode("ascii")},
)
@max_os_level("10.5")
def testTakeValueForKeyPath(self):
o = KeyValueClass1.alloc().init()
o.addMultiple()
self.assertEqual(o.multiple.level2.level3.keyA, b"hello".decode("ascii"))
self.assertEqual(o.multiple.level2.level3.keyB, b"world".decode("ascii"))
STUB.setKeyValue_forObject_key_value_(
1,
o,
b"multiple.level2.level3.keyA".decode("ascii"),
b"KeyAValue".decode("ascii"),
)
self.assertEqual(o.multiple.level2.level3.keyA, b"KeyAValue".decode("ascii"))
STUB.setKeyValue_forObject_key_value_(
1, o, b"multiple.level2.level3.keyB".decode("ascii"), 9.999
)
self.assertEqual(o.multiple.level2.level3.keyB, 9.999)
if hasattr(NSObject, b"willChangeValueForKey_".decode("ascii")):
# NSKeyValueObserving is only available on Panther and beyond
def testKVO1(self):
o = KVOClass.alloc().init()
o.addObserver_forKeyPath_options_context_(
self, b"test".decode("ascii"), 0, None
)
o.removeObserver_forKeyPath_(self, b"test".decode("ascii"))
def testKVO2(self):
"""
Check if observations work for python-based keys on ObjC classes
"""
observer = KeyValueObserver.alloc().init()
self.assertEqual(observer.observed, [])
o = KeyValueClass1.alloc().init()
o.addObserver_forKeyPath_options_context_(
observer, b"key3".decode("ascii"), 0, 0
)
try:
STUB.setKeyValue_forObject_key_value_(
2, o, b"key3".decode("ascii"), b"drie".decode("ascii")
)
self.assertEqual(o.key3, b"drie".decode("ascii"))
self.assertEqual(len(observer.observed), 1)
keyPath, object, change = observer.observed[0]
self.assertEqual(keyPath, b"key3".decode("ascii"))
self.assert_(object is o)
self.assertEqual(change, {NSKeyValueChangeKindKey: 1})
finally:
o.removeObserver_forKeyPath_(observer, b"key3".decode("ascii"))
def testKVO3(self):
"""
Check if observations work for python-based keys on ObjC classes
"""
observer = KeyValueObserver.alloc().init()
self.assertEqual(observer.observed, [])
o = KeyValueClass1.alloc().init()
STUB.setKeyValue_forObject_key_value_(
2, o, b"key3".decode("ascii"), b"three".decode("ascii")
)
o.addObserver_forKeyPath_options_context_(
observer,
b"key3".decode("ascii"),
NSKeyValueObservingOptionNew | NSKeyValueObservingOptionOld,
0,
)
try:
STUB.setKeyValue_forObject_key_value_(
2, o, b"key3".decode("ascii"), b"drie".decode("ascii")
)
self.assertEqual(o.key3, b"drie".decode("ascii"))
self.assertEqual(len(observer.observed), 1)
keyPath, object, change = observer.observed[0]
self.assertEqual(keyPath, b"key3".decode("ascii"))
self.assert_(object is o)
self.assertEqual(
change,
{
NSKeyValueChangeKindKey: 1,
NSKeyValueChangeNewKey: b"drie".decode("ascii"),
NSKeyValueChangeOldKey: b"three".decode("ascii"),
},
)
finally:
o.removeObserver_forKeyPath_(observer, b"key3".decode("ascii"))
class PyKeyValueCodingExplicit(TestCase):
def testValueForKey(self):
o = KeyValueClass1Explicit.alloc().init()
o.addMultiple()
self.assertEqual(STUB.keyValue_forObject_key_(0, o, b"key1".decode("ascii")), 1)
self.assertEqual(STUB.keyValue_forObject_key_(0, o, b"key2".decode("ascii")), 2)
self.assertEqual(STUB.keyValue_forObject_key_(0, o, b"key3".decode("ascii")), 3)
self.assertEqual(
STUB.keyValue_forObject_key_(0, o, b"key4".decode("ascii")), "4"
)
self.assertEqual(
STUB.keyValue_forObject_key_(0, o, b"multiple".decode("ascii")),
o._values["multiple"],
)
self.assertRaises(
KeyError, STUB.keyValue_forObject_key_, 0, o, b"nokey".decode("ascii")
)
def testStoredValueForKey(self):
o = KeyValueClass1Explicit.alloc().init()
o.addMultiple()
self.assertEqual(STUB.keyValue_forObject_key_(2, o, b"key1".decode("ascii")), 1)
self.assertEqual(STUB.keyValue_forObject_key_(2, o, b"key2".decode("ascii")), 2)
self.assertEqual(STUB.keyValue_forObject_key_(2, o, b"key3".decode("ascii")), 3)
self.assertEqual(
STUB.keyValue_forObject_key_(2, o, b"key4".decode("ascii")), "4"
)
self.assertEqual(
STUB.keyValue_forObject_key_(2, o, b"multiple".decode("ascii")),
o._values["multiple"],
)
self.assertRaises(
KeyError, STUB.keyValue_forObject_key_, 2, o, b"nokey".decode("ascii")
)
def testValueForKeyPath(self):
o = KeyValueClass1Explicit.alloc().init()
o.addMultiple()
self.assertEqual(
STUB.keyValue_forObject_key_(1, o, b"multiple".decode("ascii")),
o._values["multiple"],
)
self.assertEqual(
STUB.keyValue_forObject_key_(1, o, b"multiple.level2".decode("ascii")),
o._values["multiple"]._values["level2"],
)
self.assertEqual(
STUB.keyValue_forObject_key_(
1, o, b"multiple.level2.level3.keyA".decode("ascii")
),
o._values["multiple"]._values["level2"]._values["level3"]._values["keyA"],
)
self.assertEqual(
STUB.keyValue_forObject_key_(
1, o, b"multiple.level2.level3.keyB".decode("ascii")
),
o._values["multiple"]._values["level2"]._values["level3"]._values["keyB"],
)
self.assertRaises(
KeyError,
STUB.keyValue_forObject_key_,
1,
o,
b"multiple.level2.nokey".decode("ascii"),
)
@max_os_level("10.5")
def testValuesForKeys(self):
o = KeyValueClass1Explicit.alloc().init()
self.assertEqual(
STUB.keyValue_forObject_key_(
3,
o,
[
b"key1".decode("ascii"),
b"key2".decode("ascii"),
b"key3".decode("ascii"),
b"key4".decode("ascii"),
],
),
{
b"key1".decode("ascii"): 1,
b"key2".decode("ascii"): 2,
b"key3".decode("ascii"): 3,
b"key4".decode("ascii"): b"4".decode("ascii"),
},
)
self.assertRaises(
KeyError,
STUB.keyValue_forObject_key_,
3,
o,
[
b"key1".decode("ascii"),
b"key3".decode("ascii"),
b"nosuchkey".decode("ascii"),
],
)
def testTakeValueForKey(self):
o = KeyValueClass1Explicit.alloc().init()
self.assertEqual(o._values["key3"], 3)
STUB.setKeyValue_forObject_key_value_(
0, o, b"key3".decode("ascii"), b"drie".decode("ascii")
)
self.assertEqual(o._values["key3"], b"drie".decode("ascii"))
self.assertEqual(o._values["key4"], b"4".decode("ascii"))
STUB.setKeyValue_forObject_key_value_(
0, o, b"key4".decode("ascii"), b"vier".decode("ascii")
)
self.assert_(not hasattr(o, b"key4".decode("ascii")))
self.assertEqual(o._values["key4"], b"viervierviervier".decode("ascii"))
o._values["key5"] = 1
STUB.setKeyValue_forObject_key_value_(
0, o, b"key5".decode("ascii"), b"V".decode("ascii")
)
self.assertEqual(o._values["key5"], b"VVVVV".decode("ascii"))
self.assert_(not hasattr(o, b"key9".decode("ascii")))
self.assert_("key9" not in o._values)
STUB.setKeyValue_forObject_key_value_(
0, o, b"key9".decode("ascii"), b"IX".decode("ascii")
)
self.assert_(not hasattr(o, b"key9".decode("ascii")))
self.assert_("key9" in o._values)
self.assertEqual(o._values["key9"], b"IX".decode("ascii"))
def testTakeStoredValueForKey(self):
o = KeyValueClass1Explicit.alloc().init()
self.assertEqual(o._values["key3"], 3)
STUB.setKeyValue_forObject_key_value_(
2, o, b"key3".decode("ascii"), b"drie".decode("ascii")
)
self.assertEqual(o._values["key3"], b"drie".decode("ascii"))
self.assertEqual(o._values["key4"], b"4".decode("ascii"))
STUB.setKeyValue_forObject_key_value_(
2, o, b"key4".decode("ascii"), b"vier".decode("ascii")
)
self.assertEqual(o._values["key4"], b"viervierviervier".decode("ascii"))
o.key5 = 1
STUB.setKeyValue_forObject_key_value_(
2, o, b"key5".decode("ascii"), b"V".decode("ascii")
)
self.assertEqual(o._values["key5"], b"VVVVV".decode("ascii"))
self.assert_("key9" not in o._values)
STUB.setKeyValue_forObject_key_value_(
2, o, b"key9".decode("ascii"), b"IX".decode("ascii")
)
self.assert_("key9" in o._values)
self.assertEqual(o._values["key9"], b"IX".decode("ascii"))
@max_os_level("10.5")
def testTakeValuesFromDictionary(self):
o = KeyValueClass1Explicit.alloc().init()
self.assertEqual(o._values["key3"], 3)
self.assertEqual(o._values["key4"], b"4".decode("ascii"))
o._values["key5"] = 1
self.assert_("key9" not in o._values)
STUB.setKeyValue_forObject_key_value_(
3,
o,
None,
{
b"key3".decode("ascii"): b"drie".decode("ascii"),
b"key4".decode("ascii"): b"vier".decode("ascii"),
b"key5".decode("ascii"): b"V".decode("ascii"),
b"key9".decode("ascii"): b"IX".decode("ascii"),
},
)
self.assertEqual(o._values["key3"], b"drie".decode("ascii"))
self.assertEqual(o._values["key4"], b"viervierviervier".decode("ascii"))
self.assertEqual(o._values["key5"], b"VVVVV".decode("ascii"))
self.assertEqual(o._values["key9"], b"IX".decode("ascii"))
@max_os_level("10.5")
def testTakeValueForKeyPath(self):
o = KeyValueClass1Explicit.alloc().init()
o.addMultiple()
self.assertEqual(
o._values["multiple"]._values["level2"]._values["level3"]._values["keyA"],
b"hello".decode("ascii"),
)
self.assertEqual(
o._values["multiple"]._values["level2"]._values["level3"]._values["keyB"],
b"world".decode("ascii"),
)
STUB.setKeyValue_forObject_key_value_(
1,
o,
b"multiple.level2.level3.keyA".decode("ascii"),
b"KeyAValue".decode("ascii"),
)
self.assertEqual(
o._values["multiple"]._values["level2"]._values["level3"]._values["keyA"],
b"KeyAValue".decode("ascii"),
)
STUB.setKeyValue_forObject_key_value_(
1, o, b"multiple.level2.level3.keyB".decode("ascii"), 9.999
)
self.assertEqual(
o._values["multiple"]._values["level2"]._values["level3"]._values["keyB"],
9.999,
)
class TestBaseExceptions(TestCase):
"""
Check that NSObject implementation of Key-Value coding raises the
exception that we expect it to raise.
"""
def testValueForKey(self):
o = NSObject.alloc().init()
self.assertRaises(KeyError, o.valueForKey_, b"unknownKey".decode("ascii"))
def testStoredValueForKey(self):
o = NSObject.alloc().init()
self.assertRaises(KeyError, o.storedValueForKey_, b"unknownKey".decode("ascii"))
def testTakeStoredValue(self):
o = NSObject.alloc().init()
self.assertRaises(
KeyError,
o.takeStoredValue_forKey_,
b"value".decode("ascii"),
b"unknownKey".decode("ascii"),
)
if __name__ == "__main__":
main()
|
archiver | extract_cmd | import argparse
import logging
import os
import stat
import sys
from ..archive import BackupError, BackupOSError
from ..constants import * # NOQA
from ..helpers import (
HardLinkManager,
ProgressIndicatorPercent,
archivename_validator,
remove_surrogates,
)
from ..logger import create_logger
from ..manifest import Manifest
from ._common import build_filter, build_matcher, with_archive, with_repository
logger = create_logger()
class ExtractMixIn:
@with_repository(compatibility=(Manifest.Operation.READ,))
@with_archive
def do_extract(self, args, repository, manifest, archive):
"""Extract archive contents"""
# be restrictive when restoring files, restore permissions later
if sys.getfilesystemencoding() == "ascii":
logger.warning(
'Warning: File system encoding is "ascii", extracting non-ascii filenames will not be supported.'
)
if sys.platform.startswith(("linux", "freebsd", "netbsd", "openbsd", "darwin")):
logger.warning(
"Hint: You likely need to fix your locale setup. E.g. install locales and use: LANG=en_US.UTF-8"
)
matcher = build_matcher(args.patterns, args.paths)
progress = args.progress
output_list = args.output_list
dry_run = args.dry_run
stdout = args.stdout
sparse = args.sparse
strip_components = args.strip_components
continue_extraction = args.continue_extraction
dirs = []
hlm = HardLinkManager(id_type=bytes, info_type=str) # hlid -> path
filter = build_filter(matcher, strip_components)
if progress:
pi = ProgressIndicatorPercent(msg="%5.1f%% Extracting: %s", step=0.1, msgid="extract")
pi.output("Calculating total archive size for the progress indicator (might take long for large archives)")
extracted_size = sum(item.get_size() for item in archive.iter_items(filter))
pi.total = extracted_size
else:
pi = None
for item in archive.iter_items(filter, preload=True):
orig_path = item.path
if strip_components:
item.path = os.sep.join(orig_path.split(os.sep)[strip_components:])
if not args.dry_run:
while dirs and not item.path.startswith(dirs[-1].path):
dir_item = dirs.pop(-1)
try:
archive.extract_item(dir_item, stdout=stdout)
except BackupOSError as e:
self.print_warning("%s: %s", remove_surrogates(dir_item.path), e)
if output_list:
logging.getLogger("borg.output.list").info(remove_surrogates(item.path))
try:
if dry_run:
archive.extract_item(item, dry_run=True, hlm=hlm, pi=pi)
else:
if stat.S_ISDIR(item.mode):
dirs.append(item)
archive.extract_item(item, stdout=stdout, restore_attrs=False)
else:
archive.extract_item(
item,
stdout=stdout,
sparse=sparse,
hlm=hlm,
pi=pi,
continue_extraction=continue_extraction,
)
except (BackupOSError, BackupError) as e:
self.print_warning("%s: %s", remove_surrogates(orig_path), e)
if pi:
pi.finish()
if not args.dry_run:
pi = ProgressIndicatorPercent(
total=len(dirs),
msg="Setting directory permissions %3.0f%%",
msgid="extract.permissions",
)
while dirs:
pi.show()
dir_item = dirs.pop(-1)
try:
archive.extract_item(dir_item, stdout=stdout)
except BackupOSError as e:
self.print_warning("%s: %s", remove_surrogates(dir_item.path), e)
for pattern in matcher.get_unmatched_include_patterns():
self.print_warning("Include pattern '%s' never matched.", pattern)
if pi:
# clear progress output
pi.finish()
return self.exit_code
def build_parser_extract(self, subparsers, common_parser, mid_common_parser):
from ._common import define_exclusion_group, process_epilog
extract_epilog = process_epilog(
"""
This command extracts the contents of an archive. By default the entire
archive is extracted but a subset of files and directories can be selected
by passing a list of ``PATHs`` as arguments. The file selection can further
be restricted by using the ``--exclude`` option.
For more help on include/exclude patterns, see the :ref:`borg_patterns` command output.
By using ``--dry-run``, you can do all extraction steps except actually writing the
output data: reading metadata and data chunks from the repo, checking the hash/hmac,
decrypting, decompressing.
``--progress`` can be slower than no progress display, since it makes one additional
pass over the archive metadata.
.. note::
Currently, extract always writes into the current working directory ("."),
so make sure you ``cd`` to the right place before calling ``borg extract``.
When parent directories are not extracted (because of using file/directory selection
or any other reason), borg can not restore parent directories' metadata, e.g. owner,
group, permission, etc.
"""
)
subparser = subparsers.add_parser(
"extract",
parents=[common_parser],
add_help=False,
description=self.do_extract.__doc__,
epilog=extract_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help="extract archive contents",
)
subparser.set_defaults(func=self.do_extract)
subparser.add_argument(
"--list",
dest="output_list",
action="store_true",
help="output verbose list of items (files, dirs, ...)",
)
subparser.add_argument(
"-n",
"--dry-run",
dest="dry_run",
action="store_true",
help="do not actually change any files",
)
subparser.add_argument(
"--numeric-ids",
dest="numeric_ids",
action="store_true",
help="only obey numeric user and group identifiers",
)
subparser.add_argument(
"--noflags",
dest="noflags",
action="store_true",
help="do not extract/set flags (e.g. NODUMP, IMMUTABLE)",
)
subparser.add_argument(
"--noacls",
dest="noacls",
action="store_true",
help="do not extract/set ACLs",
)
subparser.add_argument(
"--noxattrs",
dest="noxattrs",
action="store_true",
help="do not extract/set xattrs",
)
subparser.add_argument(
"--stdout",
dest="stdout",
action="store_true",
help="write all extracted data to stdout",
)
subparser.add_argument(
"--sparse",
dest="sparse",
action="store_true",
help="create holes in output sparse file from all-zero chunks",
)
subparser.add_argument(
"--continue",
dest="continue_extraction",
action="store_true",
help="continue a previously interrupted extraction of same archive",
)
subparser.add_argument(
"name",
metavar="NAME",
type=archivename_validator,
help="specify the archive name",
)
subparser.add_argument(
"paths",
metavar="PATH",
nargs="*",
type=str,
help="paths to extract; patterns are supported",
)
define_exclusion_group(subparser, strip_components=True)
|
extractor | iwara | # coding: utf-8
from __future__ import unicode_literals
from ..compat import compat_urllib_parse_urlparse
from ..utils import int_or_none, mimetype2ext, remove_end, url_or_none
from .common import InfoExtractor
class IwaraIE(InfoExtractor):
_VALID_URL = r"https?://(?:www\.|ecchi\.)?iwara\.tv/videos/(?P<id>[a-zA-Z0-9]+)"
_TESTS = [
{
"url": "http://iwara.tv/videos/amVwUl1EHpAD9RD",
# md5 is unstable
"info_dict": {
"id": "amVwUl1EHpAD9RD",
"ext": "mp4",
"title": "【MMD R-18】ガールフレンド carry_me_off",
"age_limit": 18,
},
},
{
"url": "http://ecchi.iwara.tv/videos/Vb4yf2yZspkzkBO",
"md5": "7e5f1f359cd51a027ba4a7b7710a50f0",
"info_dict": {
"id": "0B1LvuHnL-sRFNXB1WHNqbGw4SXc",
"ext": "mp4",
"title": "[3D Hentai] Kyonyu × Genkai × Emaki Shinobi Girls.mp4",
"age_limit": 18,
},
"add_ie": ["GoogleDrive"],
},
{
"url": "http://www.iwara.tv/videos/nawkaumd6ilezzgq",
# md5 is unstable
"info_dict": {
"id": "6liAP9s2Ojc",
"ext": "mp4",
"age_limit": 18,
"title": "[MMD] Do It Again Ver.2 [1080p 60FPS] (Motion,Camera,Wav+DL)",
"description": "md5:590c12c0df1443d833fbebe05da8c47a",
"upload_date": "20160910",
"uploader": "aMMDsork",
"uploader_id": "UCVOFyOSCyFkXTYYHITtqB7A",
},
"add_ie": ["Youtube"],
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage, urlh = self._download_webpage_handle(url, video_id)
hostname = compat_urllib_parse_urlparse(urlh.geturl()).hostname
# ecchi is 'sexy' in Japanese
age_limit = 18 if hostname.split(".")[0] == "ecchi" else 0
video_data = self._download_json(
"http://www.iwara.tv/api/video/%s" % video_id, video_id
)
if not video_data:
iframe_url = self._html_search_regex(
r'<iframe[^>]+src=([\'"])(?P<url>[^\'"]+)\1',
webpage,
"iframe URL",
group="url",
)
return {
"_type": "url_transparent",
"url": iframe_url,
"age_limit": age_limit,
}
title = remove_end(
self._html_search_regex(r"<title>([^<]+)</title>", webpage, "title"),
" | Iwara",
)
formats = []
for a_format in video_data:
format_uri = url_or_none(a_format.get("uri"))
if not format_uri:
continue
format_id = a_format.get("resolution")
height = int_or_none(
self._search_regex(r"(\d+)p", format_id, "height", default=None)
)
formats.append(
{
"url": self._proto_relative_url(format_uri, "https:"),
"format_id": format_id,
"ext": mimetype2ext(a_format.get("mime")) or "mp4",
"height": height,
"width": int_or_none(height / 9.0 * 16.0 if height else None),
"quality": 1 if format_id == "Source" else 0,
}
)
self._sort_formats(formats)
return {
"id": video_id,
"title": title,
"age_limit": age_limit,
"formats": formats,
}
|
PyObjCTest | test_nsprogressindicator | from AppKit import *
from PyObjCTools.TestSupport import *
class TestNSProgressIndicator(TestCase):
def testConstants(self):
self.assertEqual(NSProgressIndicatorPreferredThickness, 14)
self.assertEqual(NSProgressIndicatorPreferredSmallThickness, 10)
self.assertEqual(NSProgressIndicatorPreferredLargeThickness, 18)
self.assertEqual(NSProgressIndicatorPreferredAquaThickness, 12)
self.assertEqual(NSProgressIndicatorBarStyle, 0)
self.assertEqual(NSProgressIndicatorSpinningStyle, 1)
def testMethods(self):
self.assertResultIsBOOL(NSProgressIndicator.isIndeterminate)
self.assertArgIsBOOL(NSProgressIndicator.setIndeterminate_, 0)
self.assertResultIsBOOL(NSProgressIndicator.isBezeled)
self.assertArgIsBOOL(NSProgressIndicator.setBezeled_, 0)
self.assertResultIsBOOL(NSProgressIndicator.usesThreadedAnimation)
self.assertArgIsBOOL(NSProgressIndicator.setUsesThreadedAnimation_, 0)
self.assertResultIsBOOL(NSProgressIndicator.isDisplayedWhenStopped)
self.assertArgIsBOOL(NSProgressIndicator.setDisplayedWhenStopped_, 0)
if __name__ == "__main__":
main()
|
extractors | qq_egame | import json
import re
from ..common import *
from ..extractors import VideoExtractor
from ..util import log
from ..util.strings import unescape_html
__all__ = ["qq_egame_download"]
def qq_egame_download(url, output_dir=".", merge=True, info_only=False, **kwargs):
uid = re.search("\d\d\d+", url)
an_url = "https://m.egame.qq.com/live?anchorid={}&".format(uid.group(0))
page = get_content(an_url)
server_data = re.search(r"window\.serverData\s*=\s*({.+?});", page)
if server_data is None:
log.wtf("Can not find window.server_data")
json_data = json.loads(server_data.group(1))
if json_data["anchorInfo"]["data"]["isLive"] == 0:
log.wtf("Offline...")
live_info = json_data["liveInfo"]["data"]
title = "{}_{}".format(
live_info["profileInfo"]["nickName"], live_info["videoInfo"]["title"]
)
real_url = live_info["videoInfo"]["streamInfos"][0]["playUrl"]
print_info(site_info, title, "flv", float("inf"))
if not info_only:
download_url_ffmpeg(
real_url, title, "flv", params={}, output_dir=output_dir, merge=merge
)
site_info = "egame.qq.com"
download = qq_egame_download
download_playlist = playlist_not_supported("qq_egame")
|
migrations | 0002_person | # Generated by Django 2.2.7 on 2020-01-24 18:38
import django.contrib.postgres.fields.jsonb
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("posthog", "0001_initial"),
]
operations = [
migrations.CreateModel(
name="Person",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"distinct_ids",
django.contrib.postgres.fields.jsonb.JSONField(default=list),
),
("created_at", models.DateTimeField(auto_now_add=True)),
(
"properties",
django.contrib.postgres.fields.jsonb.JSONField(default=dict),
),
(
"team",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="posthog.Team"
),
),
],
),
]
|
models | report | """ flagged for moderation """
from bookwyrm.settings import DOMAIN
from django.core.exceptions import PermissionDenied
from django.db import models
from django.utils.translation import gettext_lazy as _
from .base_model import BookWyrmModel
# Report action enums
COMMENT = "comment"
RESOLVE = "resolve"
REOPEN = "reopen"
MESSAGE_REPORTER = "message_reporter"
MESSAGE_OFFENDER = "message_offender"
USER_SUSPENSION = "user_suspension"
USER_UNSUSPENSION = "user_unsuspension"
USER_DELETION = "user_deletion"
USER_PERMS = "user_perms"
BLOCK_DOMAIN = "block_domain"
APPROVE_DOMAIN = "approve_domain"
DELETE_ITEM = "delete_item"
class Report(BookWyrmModel):
"""reported status or user"""
reporter = models.ForeignKey(
"User", related_name="reporter", on_delete=models.PROTECT
)
note = models.TextField(null=True, blank=True)
user = models.ForeignKey("User", on_delete=models.PROTECT, null=True, blank=True)
status = models.ForeignKey(
"Status",
null=True,
blank=True,
on_delete=models.PROTECT,
)
links = models.ManyToManyField("Link", blank=True)
resolved = models.BooleanField(default=False)
def raise_not_editable(self, viewer):
"""instead of user being the owner field, it's reporter"""
if self.reporter == viewer or viewer.has_perm("bookwyrm.moderate_user"):
return
raise PermissionDenied()
def get_remote_id(self):
return f"https://{DOMAIN}/settings/reports/{self.id}"
def comment(self, user, note):
"""comment on a report"""
ReportAction.objects.create(
action_type=COMMENT, user=user, note=note, report=self
)
def resolve(self, user):
"""Mark a report as complete"""
self.resolved = True
self.save()
ReportAction.objects.create(action_type=RESOLVE, user=user, report=self)
def reopen(self, user):
"""Wait! This report isn't complete after all"""
self.resolved = False
self.save()
ReportAction.objects.create(action_type=REOPEN, user=user, report=self)
@classmethod
def record_action(cls, report_id: int, action: str, user):
"""Note that someone did something"""
if not report_id:
return
report = cls.objects.get(id=report_id)
ReportAction.objects.create(action_type=action, user=user, report=report)
class Meta:
"""set order by default"""
ordering = ("-created_date",)
ReportActionTypes = [
(COMMENT, _("Comment")),
(RESOLVE, _("Resolved report")),
(REOPEN, _("Re-opened report")),
(MESSAGE_REPORTER, _("Messaged reporter")),
(MESSAGE_OFFENDER, _("Messaged reported user")),
(USER_SUSPENSION, _("Suspended user")),
(USER_UNSUSPENSION, _("Un-suspended user")),
(USER_PERMS, _("Changed user permission level")),
(USER_DELETION, _("Deleted user account")),
(BLOCK_DOMAIN, _("Blocked domain")),
(APPROVE_DOMAIN, _("Approved domain")),
(DELETE_ITEM, _("Deleted item")),
]
class ReportAction(BookWyrmModel):
"""updates on a report"""
user = models.ForeignKey("User", on_delete=models.PROTECT)
action_type = models.CharField(
max_length=20, blank=False, default="comment", choices=ReportActionTypes
)
note = models.TextField()
report = models.ForeignKey(Report, on_delete=models.PROTECT)
class Meta:
"""sort comments"""
ordering = ("created_date",)
|
utils | test_utils | """Support utilities for testing scripts.
"""
__copyright__ = "Copyright (C) 2014-2016 Martin Blais"
__license__ = "GNU GPLv2"
import builtins
import collections
import contextlib
import functools
import io
import os
import re
import shutil
import sys
import tempfile
import textwrap
import unittest
from os import path
import click.testing
def nottest(func):
"Make the given function not testable."
func.__test__ = False
return func
def find_repository_root(filename=None):
"""Return the path to the repository root.
Args:
filename: A string, the name of a file within the repository.
Returns:
A string, the root directory.
"""
if filename is None:
filename = __file__
# Support root directory under Bazel.
match = re.match(r"(.*\.runfiles/beancount)/", filename)
if match:
return match.group(1)
while not path.exists(path.join(filename, "pyproject.toml")):
prev_filename = filename
filename = path.dirname(filename)
if prev_filename == filename:
raise ValueError("Failed to find the root directory.")
return filename
def find_python_lib():
"""Return the path to the root of the Python libraries.
Returns:
A string, the root directory.
"""
return path.dirname(path.dirname(path.dirname(__file__)))
def subprocess_env():
"""Return a dict to use as environment for running subprocesses.
Returns:
A string, the root directory.
"""
# Ensure we have locations to invoke our Python executable and our
# runnable binaries in the test environment to run subprocesses.
binpath = ":".join(
[
path.dirname(sys.executable),
path.join(find_repository_root(__file__), "bin"),
os.environ.get("PATH", "").strip(":"),
]
).strip(":")
return {"PATH": binpath, "PYTHONPATH": find_python_lib()}
@contextlib.contextmanager
def tempdir(delete=True, **kw):
"""A context manager that creates a temporary directory and deletes its
contents unconditionally once done.
Args:
delete: A boolean, true if we want to delete the directory after running.
**kw: Keyword arguments for mkdtemp.
Yields:
A string, the name of the temporary directory created.
"""
tempdir = tempfile.mkdtemp(prefix="beancount-test-tmpdir.", **kw)
try:
yield tempdir
finally:
if delete:
shutil.rmtree(tempdir, ignore_errors=True)
def create_temporary_files(root, contents_map):
"""Create a number of temporary files under 'root'.
This routine is used to initialize the contents of multiple files under a
temporary directory.
Args:
root: A string, the name of the directory under which to create the files.
contents_map: A dict of relative filenames to their contents. The content
strings will be automatically dedented for convenience. In addition, the
string 'ROOT' in the contents will be automatically replaced by the root
directory name.
"""
os.makedirs(root, exist_ok=True)
for relative_filename, contents in contents_map.items():
assert not path.isabs(relative_filename)
filename = path.join(root, relative_filename)
os.makedirs(path.dirname(filename), exist_ok=True)
clean_contents = textwrap.dedent(contents.replace("{root}", root))
with open(filename, "w") as f:
f.write(clean_contents)
# TODO(blais): Improve this with kwargs instead.
def capture(*attributes):
"""A context manager that captures what's printed to stdout.
Args:
*attributes: A tuple of strings, the name of the sys attributes to override
with StringIO instances.
Yields:
A StringIO string accumulator.
"""
if not attributes:
attributes = "stdout"
elif len(attributes) == 1:
attributes = attributes[0]
return patch(sys, attributes, io.StringIO)
@contextlib.contextmanager
def patch(obj, attributes, replacement_type):
"""A context manager that temporarily patches an object's attributes.
All attributes in 'attributes' are saved and replaced by new instances
of type 'replacement_type'.
Args:
obj: The object to patch up.
attributes: A string or a sequence of strings, the names of attributes to replace.
replacement_type: A callable to build replacement objects.
Yields:
An instance of a list of sequences of 'replacement_type'.
"""
single = isinstance(attributes, str)
if single:
attributes = [attributes]
saved = []
replacements = []
for attribute in attributes:
replacement = replacement_type()
replacements.append(replacement)
saved.append(getattr(obj, attribute))
setattr(obj, attribute, replacement)
yield replacements[0] if single else replacements
for attribute, saved_attr in zip(attributes, saved):
setattr(obj, attribute, saved_attr)
def docfile(function, **kwargs):
"""A decorator that write the function's docstring to a temporary file
and calls the decorated function with the temporary filename. This is
useful for writing tests.
Args:
function: A function to decorate.
Returns:
The decorated function.
"""
contents = kwargs.pop("contents", None)
@functools.wraps(function)
def new_function(self):
allowed = ("buffering", "encoding", "newline", "dir", "prefix", "suffix")
if any(key not in allowed for key in kwargs):
raise ValueError("Invalid kwarg to docfile_extra")
with tempfile.NamedTemporaryFile("w", **kwargs) as file:
text = contents or function.__doc__
file.write(textwrap.dedent(text))
file.flush()
return function(self, file.name)
new_function.__doc__ = None
return new_function
def docfile_extra(**kwargs):
"""
A decorator identical to @docfile,
but it also takes kwargs for the temporary file,
Kwargs:
e.g. buffering, encoding, newline, dir, prefix, and suffix.
Returns:
docfile
"""
return functools.partial(docfile, **kwargs)
def search_words(words, line):
"""Search for a sequence of words in a line.
Args:
words: A list of strings, the words to look for, or a space-separated string.
line: A string, the line to search into.
Returns:
A MatchObject, or None.
"""
if isinstance(words, str):
words = words.split()
return re.search(".*".join(r"\b{}\b".format(word) for word in words), line)
class TestTempdirMixin:
def setUp(self):
super().setUp()
# Create a temporary directory.
self.prefix = self.__class__.__name__
self.tempdir = tempfile.mkdtemp(prefix="{}.".format(self.prefix))
def tearDown(self):
super().tearDown()
# Clean up the temporary directory.
shutil.rmtree(self.tempdir)
class TmpFilesTestBase(unittest.TestCase):
"""A test utility base class that creates and cleans up a directory hierarchy.
This convenience is useful for testing functions that work on files, such as the
documents tests, or the accounts walk.
"""
# The list of strings, documents to create.
# Filenames ending with a '/' will be created as directories.
TEST_DOCUMENTS = None
def setUp(self):
self.tempdir, self.root = self.create_file_hierarchy(self.TEST_DOCUMENTS)
def tearDown(self):
shutil.rmtree(self.tempdir, ignore_errors=True)
@staticmethod
def create_file_hierarchy(test_files, subdir="root"):
"""A test utility that creates a hierarchy of files.
Args:
test_files: A list of strings, relative filenames to a temporary root
directory. If the filename ends with a '/', we create a directory;
otherwise, we create a regular file.
subdir: A string, the subdirectory name under the temporary directory
location, to create the hierarchy under.
Returns:
A pair of strings, the temporary directory, and the subdirectory under
that which hosts the root of the tree.
"""
tempdir = tempfile.mkdtemp(prefix="beancount-test-tmpdir.")
root = path.join(tempdir, subdir)
for filename in test_files:
abs_filename = path.join(tempdir, filename)
if filename.endswith("/"):
os.makedirs(abs_filename)
else:
parent_dir = path.dirname(abs_filename)
if not path.exists(parent_dir):
os.makedirs(parent_dir)
with open(abs_filename, "w"):
pass
return tempdir, root
class TestCase(unittest.TestCase):
def assertLines(self, text1, text2, message=None):
"""Compare the lines of text1 and text2, ignoring whitespace.
Args:
text1: A string, the expected text.
text2: A string, the actual text.
message: An optional string message in case the assertion fails.
Raises:
AssertionError: If the exception fails.
"""
clean_text1 = textwrap.dedent(text1.strip())
clean_text2 = textwrap.dedent(text2.strip())
lines1 = [line.strip() for line in clean_text1.splitlines()]
lines2 = [line.strip() for line in clean_text2.splitlines()]
# Compress all space longer than 4 spaces to exactly 4.
# This affords us to be even looser.
lines1 = [re.sub(" [ \t]*", " ", line) for line in lines1]
lines2 = [re.sub(" [ \t]*", " ", line) for line in lines2]
self.assertEqual(lines1, lines2, message)
@contextlib.contextmanager
def assertOutput(self, expected_text):
"""Expect text printed to stdout.
Args:
expected_text: A string, the text that should have been printed to stdout.
Raises:
AssertionError: If the text differs.
"""
with capture() as oss:
yield oss
self.assertLines(textwrap.dedent(expected_text), oss.getvalue())
class ClickTestCase(TestCase):
"""Base class for command-line program test cases."""
def run_with_args(self, function, *args):
runner = click.testing.CliRunner()
result = runner.invoke(function, args, catch_exceptions=False)
self.assertEqual(result.exit_code, 0)
return result
@contextlib.contextmanager
def skipIfRaises(*exc_types):
"""A context manager (or decorator) that skips a test if an exception is raised.
Args:
exc_type
Yields:
Nothing, for you to execute the function code.
Raises:
SkipTest: if the test raised the expected exception.
"""
try:
yield
except exc_types as exception:
raise unittest.SkipTest(exception)
def make_failing_importer(*removed_module_names):
"""Make an importer that raise an ImportError for some modules.
Use it like this:
@mock.patch('builtins.__import__', make_failing_importer('setuptools'))
def test_...
Args:
removed_module_name: The name of the module import that should raise an exception.
Returns:
A decorated test decorator.
"""
def failing_import(name, *args, **kwargs):
if name in removed_module_names:
raise ImportError("Could not import {}".format(name))
return builtins.__import__(name, *args, **kwargs)
return failing_import
@contextlib.contextmanager
def environ(varname, newvalue):
"""A context manager which pushes varname's value and restores it later.
Args:
varname: A string, the environ variable name.
newvalue: A string, the desired value.
"""
oldvalue = os.environ.get(varname, None)
os.environ[varname] = newvalue
yield
if oldvalue is not None:
os.environ[varname] = oldvalue
else:
del os.environ[varname]
# A function call's arguments, including its return value.
# This is an improvement onto what mock.call provides.
# That has not the return value normally.
# You can use this to build internal call interceptors.
RCall = collections.namedtuple("RCall", "args kwargs return_value")
def record(fun):
"""Decorates the function to intercept and record all calls and return values.
Args:
fun: A callable to be decorated.
Returns:
A wrapper function with a .calls attribute, a list of RCall instances.
"""
@functools.wraps(fun)
def wrapped(*args, **kw):
return_value = fun(*args, **kw)
wrapped.calls.append(RCall(args, kw, return_value))
return return_value
wrapped.calls = []
return wrapped
|
widgets | channelsmenulistwidget | from PyQt5.QtCore import QSize, Qt
from PyQt5.QtGui import QBrush, QColor, QIcon, QPixmap
from PyQt5.QtWidgets import (
QAbstractItemView,
QAbstractScrollArea,
QAction,
QListWidget,
QListWidgetItem,
)
from tribler.core.components.metadata_store.db.serialization import CHANNEL_TORRENT
from tribler.core.utilities.simpledefs import CHANNEL_STATE
from tribler.gui.network.request_manager import request_manager
from tribler.gui.tribler_action_menu import TriblerActionMenu
from tribler.gui.utilities import connect, get_image_path, tr
def entry_to_tuple(entry):
return (
entry["public_key"],
entry["id"],
entry.get("subscribed", False),
entry.get("state"),
entry.get("progress"),
)
class ChannelListItem(QListWidgetItem):
loading_brush = QBrush(Qt.darkGray)
def __init__(self, parent=None, channel_info=None):
self.channel_info = channel_info
title = channel_info.get("name")
QListWidgetItem.__init__(self, title, parent=parent)
# This is necessary to increase vertical height of the items
self.setSizeHint(QSize(50, 25))
if channel_info.get("state") not in (
CHANNEL_STATE.COMPLETE.value,
CHANNEL_STATE.PERSONAL.value,
):
self.setForeground(self.loading_brush)
def setData(self, role, new_value):
# TODO: call higher-level signal to propagate the change to other widgets
if role == Qt.EditRole:
item = self.channel_info
if item["name"] != new_value:
request_manager.patch(
f"metadata/{item['public_key']}/{item['id']}",
data={"title": new_value},
)
return super().setData(role, new_value)
class ChannelsMenuListWidget(QListWidget):
def __init__(self, parent=None):
QListWidget.__init__(self, parent=parent)
self.base_url = "channels"
self.setSizeAdjustPolicy(QAbstractScrollArea.AdjustToContents)
# Items set, used for checking changes
self.items_set = frozenset()
self.personal_channel_icon = QIcon(get_image_path("share.png"))
empty_transparent_image = QPixmap(15, 15)
empty_transparent_image.fill(QColor(0, 0, 0, 0))
self.empty_image = QIcon(empty_transparent_image)
self.foreign_channel_menu = self.create_foreign_menu()
self.personal_channel_menu = self.create_personal_menu()
self.setSelectionMode(QAbstractItemView.NoSelection)
def sizeHint(self):
count = self.count()
height = self.sizeHintForRow(0) * count if count else 0
# !!!ACHTUNG!!!
# !!! Qt Bug !!!
# Qt never shrinks QListWidget vertically to less than the size
# that is required to contain list three items. Even if there a no items.
# sizeHint is ignored completely, the real minimum size is always at least
# three items. Also, Qt ignores the overloaded self.maximumHeight method.
# So, the only way to shrink it is to call setMaximumHeight manually.
# Qt, I hate you! Why are you doing this to me!?
self.setMaximumHeight(height)
return QSize(self.width(), height)
def contextMenuEvent(self, event):
item = self.itemAt(event.pos())
if item is None:
return
if item.channel_info["state"] == CHANNEL_STATE.PERSONAL.value:
self.personal_channel_menu.exec_(self.mapToGlobal(event.pos()))
else:
self.foreign_channel_menu.exec_(self.mapToGlobal(event.pos()))
def create_foreign_menu(self):
menu = TriblerActionMenu(self)
unsubscribe_action = QAction(tr("Unsubscribe"), self)
connect(unsubscribe_action.triggered, self._on_unsubscribe_action)
menu.addAction(unsubscribe_action)
return menu
def create_personal_menu(self):
menu = TriblerActionMenu(self)
delete_action = QAction(tr("Delete channel"), self)
connect(delete_action.triggered, self._on_delete_action)
menu.addAction(delete_action)
rename_action = QAction(tr("Rename channel"), self)
connect(rename_action.triggered, self._trigger_name_editor)
menu.addAction(rename_action)
return menu
def _trigger_name_editor(self, checked):
self.editItem(self.currentItem())
def _on_unsubscribe_action(self, checked):
self.window().on_channel_unsubscribe(self.currentItem().channel_info)
def _on_delete_action(self, checked):
self.window().on_channel_delete(self.currentItem().channel_info)
def on_query_results(self, response):
channels = response.get("results")
if channels is None:
return
self.clear()
for channel_info in sorted(
channels, key=lambda x: x.get("state") != "Personal"
):
item = ChannelListItem(channel_info=channel_info)
self.addItem(item)
# ACHTUNG! Qt bug prevents moving this thing into ChannelListItem !
if channel_info.get("state") == CHANNEL_STATE.PERSONAL.value:
item.setFlags(
Qt.ItemIsEnabled | Qt.ItemIsSelectable | Qt.ItemIsEditable
)
item.setIcon(self.personal_channel_icon)
else:
# We assign a transparent icon to foreign channels to align
# their text with the personal ones
item.setIcon(self.empty_image)
tooltip_text = channel_info["name"] + "\n" + channel_info["state"]
if channel_info.get("progress"):
tooltip_text += f" {int(float(channel_info['progress']) * 100)}%"
item.setToolTip(tooltip_text)
self.items_set = frozenset(
entry_to_tuple(channel_info) for channel_info in channels
)
def load_channels(self):
request_manager.get(
self.base_url,
self.on_query_results,
url_params={"subscribed": True, "last": 1000},
)
def reload_if_necessary(self, changed_entries):
# Compare the state changes in the changed entries list to our current list
# and update the list if necessary
changeset = frozenset(
entry_to_tuple(entry)
for entry in changed_entries
if entry.get("state") == "Deleted" or entry.get("type") == CHANNEL_TORRENT
)
need_update = not self.items_set.issuperset(changeset)
if need_update:
self.load_channels()
|
upload | views | # -*- encoding: utf-8 -*-
# Dissemin: open access policy enforcement tool
# Copyright (C) 2014 Antonin Delpeuch
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
from io import BytesIO
import PyPDF2
import requests
import wand.exceptions
import wand.image
from django.conf import settings
from django.contrib.auth.decorators import user_passes_test
from django.core.files.base import ContentFile
from django.utils.translation import ugettext as _
from django.views.decorators.http import require_POST
from jsonview.decorators import json_view
from papers.user import is_authenticated
from PyPDF2.utils import PyPdfError
from ratelimit.decorators import ratelimit
from requests.packages.urllib3.exceptions import HTTPError, ReadTimeoutError
from upload.forms import AjaxUploadForm, UrlDownloadForm
from upload.models import MAX_ORIG_NAME_LENGTH, THUMBNAIL_MAX_WIDTH, UploadedPDF
# AJAX upload
@json_view
@require_POST
@user_passes_test(is_authenticated)
@ratelimit(key="ip", rate="300/d")
def handleAjaxUpload(request):
form = AjaxUploadForm(request.POST, request.FILES)
if form.is_valid():
# We read the whole file in memory, which
# is reasonably safe because we know it's not too big
pdf_file = request.FILES["upl"].read()
orig_name = request.FILES["upl"].name
status = save_pdf(request.user, orig_name, pdf_file)
if status["status"] == "error":
status["upl"] = status["message"]
return status, 403
return status
else:
return form.errors, 403
def make_thumbnail(pdf_blob):
"""
Takes a PDF file (represented as a string) and returns a pair:
- the number of pages
- a thumbnail of its first page in PNG (as a string again),
or None if anything failed.
"""
try:
resolution = int(THUMBNAIL_MAX_WIDTH / (21 / 2.54)) + 1
num_pages = None
try: # We try to extract the first page of the PDF
orig_pdf = BytesIO(pdf_blob)
reader = PyPDF2.PdfFileReader(orig_pdf)
num_pages = reader.getNumPages()
if not reader.isEncrypted and num_pages == 0:
return
writer = PyPDF2.PdfFileWriter()
writer.addPage(reader.getPage(0))
first_page = BytesIO()
writer.write(first_page)
except PyPdfError:
# PyPDF2 failed (maybe it believes the file is encrypted…)
# We try to convert the file with ImageMagick (wand) anyway,
# rendering the whole PDF as we have not been able to
# select the first page
pass
# We render the PDF
with wand.image.Image(
blob=pdf_blob, format="pdf", resolution=resolution
) as image:
if image.height == 0 or image.width == 0:
return
if num_pages is None:
num_pages = len(image.sequence)
if num_pages == 0:
return
image = wand.image.Image(image=image.sequence[0])
image.format = "png"
return (num_pages, image.make_blob())
except wand.exceptions.WandException:
# Wand failed: we consider the PDF file as invalid
pass
except ValueError:
pass
def save_pdf(user, orig_name, pdf_blob):
"""
Given a User and a PDF file represented as a stream,
create the UploadedPDF object.
:returns: the status context telling whether the operation has succeded.
"""
response = {"status": "error"}
# Check that the file is a valid PDF by extracting the first page
res = make_thumbnail(pdf_blob)
if res is None:
response["message"] = _("Invalid PDF file.")
return response
num_pages, png_blob = res
# Otherwise we save the file!
upload = UploadedPDF(
user=user, num_pages=num_pages, orig_name=orig_name[:MAX_ORIG_NAME_LENGTH]
)
f = ContentFile(pdf_blob)
thumbnail_file = ContentFile(png_blob)
upload.file.save("document.pdf", f)
upload.thumbnail.save("thumbnail.png", thumbnail_file)
upload.save()
response = {
"status": "success",
"size": round(len(pdf_blob) / 1024 / 1024, 2), # Size in MB
"num_pages": num_pages,
"thumbnail": upload.thumbnail.url,
"file_id": upload.id,
}
return response
@json_view
@require_POST
@user_passes_test(is_authenticated)
@ratelimit(key="ip", rate="300/d")
def handleUrlDownload(request):
response = {"status": "error"}
form = UrlDownloadForm(request.POST)
if not form.is_valid():
response["message"] = _("Invalid form.")
return response, 400
content = None
try:
r = requests.get(
form.cleaned_data["url"],
timeout=settings.URL_DEPOSIT_DOWNLOAD_TIMEOUT,
stream=True,
)
r.raise_for_status()
content = r.raw.read(settings.DEPOSIT_MAX_FILE_SIZE + 1, decode_content=False)
if len(content) > settings.DEPOSIT_MAX_FILE_SIZE:
response["message"] = _("File too large.")
content_type = r.headers.get("content-type")
if "text/html" in content_type:
response["message"] = _( # Left as one line for compatibility purposes
"Invalid content type: this link points to a web page, we need a direct link to a PDF file."
)
except requests.exceptions.SSLError:
response["message"] = _("Invalid SSL certificate on the remote server.")
except requests.exceptions.Timeout:
response["message"] = _("Invalid URL (server timed out).")
except requests.exceptions.RequestException:
response["message"] = _("Invalid URL.")
except ReadTimeoutError:
response["message"] = _("Invalid URL (server timed out).")
except HTTPError:
response["message"] = _("Invalid URL.")
if "message" in response:
return response, 403
orig_name = form.cleaned_data["url"]
response = save_pdf(request.user, orig_name, content)
if response["status"] == "error":
return response, 403
return response
|
wiki | actions | from __future__ import annotations
from typing import Any, cast
from abilian.sbe.apps.communities.actions import CommunityEndpoint
from abilian.sbe.apps.communities.security import is_manager
from abilian.services import get_service
from abilian.services.security import Admin, SecurityService
from abilian.web.action import Action, FAIcon, ModalActionMixin, actions
from flask import url_for
from flask.blueprints import BlueprintSetupState
from flask_babel import lazy_gettext as _l
from flask_login import current_user
class WikiPageAction(Action):
Endpoint = CommunityEndpoint
def pre_condition(self, context: dict[str, Any]) -> bool:
page = context.get("object")
return bool(page)
def url(self, context=None):
if self._url:
return self._url
else:
page = context.get("object")
kw = self.endpoint.get_kwargs()
kw["title"] = page.title
return url_for(self.endpoint.name, **kw)
def is_admin(context):
security = cast(SecurityService, get_service("security"))
return security.has_role(current_user, Admin, object=context.get("object"))
class WikiPageModalAction(ModalActionMixin, WikiPageAction):
pass
class WikiAction(Action):
Endpoint = CommunityEndpoint
_actions = (
WikiPageAction(
"wiki:page",
"page_viewers",
_l("Readers list"),
icon="user",
condition=lambda ctx: is_manager(context=ctx),
endpoint=".page_viewers",
),
WikiPageAction("wiki:page", "view", _l("View"), endpoint=".page", icon="eye-open"),
WikiPageAction(
"wiki:page", "edit", _l("Edit"), endpoint=".page_edit", icon="pencil"
),
WikiPageModalAction(
"wiki:page",
"upload_attachment",
_l("Upload an attachment"),
url="#upload-files",
icon="plus",
),
WikiPageAction(
"wiki:page",
"source",
_l("Source"),
endpoint=".page_source",
icon=FAIcon("code"),
),
WikiPageAction(
"wiki:page", "changes", _l("Changes"), endpoint=".page_changes", icon="time"
),
WikiPageModalAction(
"wiki:page", "delete", _l("Delete"), url="#modal-delete", icon="trash"
),
WikiAction("wiki:global", "new", _l("New page"), endpoint=".page_new", icon="plus"),
WikiAction(
"wiki:global", "pages", _l("All pages"), endpoint=".wiki_pages", icon="list"
),
WikiAction(
"wiki:global",
"help",
_l("Syntax help"),
endpoint=".wiki_help",
icon="info-sign",
),
)
def register_actions(state: BlueprintSetupState):
if not actions.installed(state.app):
return
with state.app.app_context():
actions.register(*_actions)
|
update | update_engine_descriptions | #!/usr/bin/env python
import json
import sys
from urllib.parse import quote, urlparse
import detect_language
import searx
import searx.poolrequests
import searx.search
from lxml.html import fromstring
from searx.engines.wikidata import send_wikidata_query
from searx.utils import extract_text
SPARQL_WIKIPEDIA_ARTICLE = """
SELECT DISTINCT ?item ?name
WHERE {
VALUES ?item { %IDS% }
?article schema:about ?item ;
schema:inLanguage ?lang ;
schema:name ?name ;
schema:isPartOf [ wikibase:wikiGroup "wikipedia" ] .
FILTER(?lang in (%LANGUAGES_SPARQL%)) .
FILTER (!CONTAINS(?name, ':')) .
}
"""
SPARQL_DESCRIPTION = """
SELECT DISTINCT ?item ?itemDescription
WHERE {
VALUES ?item { %IDS% }
?item schema:description ?itemDescription .
FILTER (lang(?itemDescription) in (%LANGUAGES_SPARQL%))
}
ORDER BY ?itemLang
"""
LANGUAGES = searx.settings["locales"].keys()
LANGUAGES_SPARQL = ", ".join(set(map(lambda l: repr(l.split("_")[0]), LANGUAGES)))
IDS = None
descriptions = {}
wd_to_engine_name = {}
def normalize_description(description):
for c in [chr(c) for c in range(0, 31)]:
description = description.replace(c, " ")
description = " ".join(description.strip().split())
return description
def update_description(engine_name, lang, description, source, replace=True):
if replace or lang not in descriptions[engine_name]:
descriptions[engine_name][lang] = [normalize_description(description), source]
def get_wikipedia_summary(language, pageid):
search_url = "https://{language}.wikipedia.org/api/rest_v1/page/summary/{title}"
url = search_url.format(title=quote(pageid), language=language)
try:
response = searx.poolrequests.get(url)
response.raise_for_status()
api_result = json.loads(response.text)
return api_result.get("extract")
except:
return None
def detect_language(text):
r = cld3.get_language(str(text)) # pylint: disable=E1101
if r is not None and r.probability >= 0.98 and r.is_reliable:
return r.language
return None
def get_website_description(url, lang1, lang2=None):
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:84.0) Gecko/20100101 Firefox/84.0",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"DNT": "1",
"Upgrade-Insecure-Requests": "1",
"Sec-GPC": "1",
"Cache-Control": "max-age=0",
}
if lang1 is not None:
lang_list = [lang1]
if lang2 is not None:
lang_list.append(lang2)
headers["Accept-Language"] = f'{",".join(lang_list)};q=0.8'
try:
response = searx.poolrequests.get(url, headers=headers, timeout=10)
response.raise_for_status()
except Exception:
return (None, None)
try:
html = fromstring(response.text)
except ValueError:
html = fromstring(response.content)
description = extract_text(
html.xpath('/html/head/meta[@name="description"]/@content')
)
if not description:
description = extract_text(
html.xpath('/html/head/meta[@property="og:description"]/@content')
)
if not description:
description = extract_text(html.xpath("/html/head/title"))
lang = extract_text(html.xpath("/html/@lang"))
if lang is None and len(lang1) > 0:
lang = lang1
lang = detect_language(description) or lang or "en"
lang = lang.split("_")[0]
lang = lang.split("-")[0]
return (lang, description)
def initialize():
global descriptions, wd_to_engine_name, IDS
searx.search.initialize()
for engine_name, engine in searx.engines.engines.items():
descriptions[engine_name] = {}
wikidata_id = getattr(engine, "about", {}).get("wikidata_id")
if wikidata_id is not None:
wd_to_engine_name.setdefault(wikidata_id, set()).add(engine_name)
IDS = " ".join(list(map(lambda wd_id: "wd:" + wd_id, wd_to_engine_name.keys())))
def fetch_wikidata_descriptions():
global IDS
result = send_wikidata_query(
SPARQL_DESCRIPTION.replace("%IDS%", IDS).replace(
"%LANGUAGES_SPARQL%", LANGUAGES_SPARQL
)
)
if result is not None:
for binding in result["results"]["bindings"]:
wikidata_id = binding["item"]["value"].replace(
"http://www.wikidata.org/entity/", ""
)
lang = binding["itemDescription"]["xml:lang"]
description = binding["itemDescription"]["value"]
if " " in description: # skip unique word description (like "website")
for engine_name in wd_to_engine_name[wikidata_id]:
update_description(engine_name, lang, description, "wikidata")
def fetch_wikipedia_descriptions():
global IDS
result = send_wikidata_query(
SPARQL_WIKIPEDIA_ARTICLE.replace("%IDS%", IDS).replace(
"%LANGUAGES_SPARQL%", LANGUAGES_SPARQL
)
)
if result is not None:
for binding in result["results"]["bindings"]:
wikidata_id = binding["item"]["value"].replace(
"http://www.wikidata.org/entity/", ""
)
lang = binding["name"]["xml:lang"]
pageid = binding["name"]["value"]
description = get_wikipedia_summary(lang, pageid)
if description is not None and " " in description:
for engine_name in wd_to_engine_name[wikidata_id]:
update_description(engine_name, lang, description, "wikipedia")
def normalize_url(url):
url = url.replace("{language}", "en")
url = urlparse(url)._replace(path="/", params="", query="", fragment="").geturl()
url = url.replace("https://api.", "https://")
return url
def fetch_website_description(engine_name, website):
default_lang, default_description = get_website_description(website, None, None)
if default_lang is None or default_description is None:
return
if default_lang not in descriptions[engine_name]:
descriptions[engine_name][default_lang] = [
normalize_description(default_description),
website,
]
for request_lang in ("en-US", "es-US", "fr-FR", "zh", "ja", "ru", "ar", "ko"):
if request_lang.split("-")[0] not in descriptions[engine_name]:
lang, desc = get_website_description(
website, request_lang, request_lang.split("-")[0]
)
if desc is not None and desc != default_description:
update_description(engine_name, lang, desc, website, replace=False)
else:
break
def fetch_website_descriptions():
for engine_name, engine in searx.engines.engines.items():
website = getattr(engine, "about", {}).get("website")
if website is None:
website = normalize_url(getattr(engine, "search_url"))
if website is None:
website = normalize_url(getattr(engine, "base_url"))
if website is not None:
fetch_website_description(engine_name, website)
def main():
initialize()
fetch_wikidata_descriptions()
fetch_wikipedia_descriptions()
fetch_website_descriptions()
sys.stdout.write(
json.dumps(descriptions, indent=1, separators=(",", ":"), ensure_ascii=False)
)
if __name__ == "__main__":
main()
|
misc | dmg_settings | import os.path
import plistlib
# dmgbuild -s settings.py -D app=QuodLibet.app "Quod Libet" QuodLibet.dmg
application = defines["app"]
appname = os.path.basename(application)
def icon_from_app(app_path):
plist_path = os.path.join(app_path, "Contents", "Info.plist")
with open(plist_path, "rb") as h:
plist = plistlib.load(h)
icon_name = plist["CFBundleIconFile"]
icon_root, icon_ext = os.path.splitext(icon_name)
if not icon_ext:
icon_ext = ".icns"
icon_name = icon_root + icon_ext
return os.path.join(app_path, "Contents", "Resources", icon_name)
format = "UDBZ"
size = "250M"
files = [application]
symlinks = {"Applications": "/Applications"}
badge_icon = icon_from_app(application)
icon_locations = {
appname: (140, 120),
"Applications": (500, 120),
}
background = "builtin-arrow"
window_rect = ((100, 100), (640, 280))
default_view = "icon-view"
show_icon_preview = False
include_icon_view_settings = "auto"
include_list_view_settings = "auto"
arrange_by = None
grid_offset = (0, 0)
grid_spacing = 100
scroll_position = (0, 0)
label_pos = "bottom"
text_size = 16
icon_size = 128
|
api | tagged_item | from django.db.models import Prefetch, Q, QuerySet
from posthog.api.routing import StructuredViewSetMixin
from posthog.constants import AvailableFeature
from posthog.models import Tag, TaggedItem, User
from posthog.models.tag import tagify
from rest_framework import response, serializers, status, viewsets
from rest_framework.viewsets import GenericViewSet
class TaggedItemSerializerMixin(serializers.Serializer):
"""
Serializer mixin that resolves appropriate response for tags depending on license.
"""
tags = serializers.ListField(required=False)
def _is_licensed(self):
return (
"request" in self.context
and not self.context["request"].user.is_anonymous
and self.context["request"].user.organization.is_feature_available(
AvailableFeature.TAGGING
)
)
def _attempt_set_tags(self, tags, obj, force_create=False):
if not force_create and not self._is_licensed() and tags is not None:
# Silently fail on updating tags so that entire request isn't blocked
return
if not obj or tags is None:
# If the object hasn't been created yet, this method will be called again on the create method.
return
# Normalize and dedupe tags
deduped_tags = list({tagify(t) for t in tags})
tagged_item_objects = []
# Create tags
for tag in deduped_tags:
tag_instance, _ = Tag.objects.get_or_create(name=tag, team_id=obj.team_id)
tagged_item_instance, _ = obj.tagged_items.get_or_create(
tag_id=tag_instance.id
)
tagged_item_objects.append(tagged_item_instance)
# Delete tags that are missing
obj.tagged_items.exclude(tag__name__in=deduped_tags).delete()
# Cleanup tags that aren't used by team
Tag.objects.filter(
Q(team_id=obj.team_id) & Q(tagged_items__isnull=True)
).delete()
obj.prefetched_tags = tagged_item_objects
def to_representation(self, obj):
ret = super(TaggedItemSerializerMixin, self).to_representation(obj)
ret["tags"] = []
if self._is_licensed():
if hasattr(obj, "prefetched_tags"):
ret["tags"] = [p.tag.name for p in obj.prefetched_tags]
else:
ret["tags"] = (
list(obj.tagged_items.values_list("tag__name", flat=True))
if obj.tagged_items
else []
)
return ret
def create(self, validated_data):
validated_data.pop("tags", None)
instance = super(TaggedItemSerializerMixin, self).create(validated_data)
self._attempt_set_tags(self.initial_data.get("tags"), instance)
return instance
def update(self, instance, validated_data):
instance = super(TaggedItemSerializerMixin, self).update(
instance, validated_data
)
self._attempt_set_tags(self.initial_data.get("tags"), instance)
return instance
def is_licensed_for_tagged_items(user: User) -> bool:
return (
not user.is_anonymous
# The below triggers an extra query to resolve user's organization.
and user.organization is not None
and user.organization.is_feature_available(AvailableFeature.TAGGING)
)
class TaggedItemViewSetMixin(viewsets.GenericViewSet):
def is_licensed(self):
return is_licensed_for_tagged_items(self.request.user) # type: ignore
def prefetch_tagged_items_if_available(self, queryset: QuerySet) -> QuerySet:
if self.is_licensed():
return queryset.prefetch_related(
Prefetch(
"tagged_items",
queryset=TaggedItem.objects.select_related("tag"),
to_attr="prefetched_tags",
)
)
return queryset
def get_queryset(self):
queryset = super(TaggedItemViewSetMixin, self).get_queryset()
return self.prefetch_tagged_items_if_available(queryset)
class TaggedItemSerializer(serializers.Serializer):
tag = serializers.SerializerMethodField()
def get_tag(self, obj: TaggedItem) -> str:
return obj.tag.name
class TaggedItemViewSet(StructuredViewSetMixin, GenericViewSet):
serializer_class = TaggedItemSerializer
queryset = Tag.objects.none()
def list(self, request, *args, **kwargs) -> response.Response:
if not is_licensed_for_tagged_items(self.request.user): # type: ignore
return response.Response([], status=status.HTTP_402_PAYMENT_REQUIRED)
return response.Response(
Tag.objects.filter(team=self.team).values_list("name", flat=True).distinct()
)
|
PyObjCTest | test_nsfilecoordinator | import Foundation
from PyObjCTools.TestSupport import *
class TestSFileCoordinator(TestCase):
@min_os_level("10.7")
def testConstants(self):
self.assertEqual(Foundation.NSFileCoordinatorReadingWithoutChanges, 1 << 0)
self.assertEqual(
Foundation.NSFileCoordinatorReadingResolvesSymbolicLink, 1 << 1
)
self.assertEqual(Foundation.NSFileCoordinatorWritingForDeleting, 1 << 0)
self.assertEqual(Foundation.NSFileCoordinatorWritingForMoving, 1 << 1)
self.assertEqual(Foundation.NSFileCoordinatorWritingForMerging, 1 << 2)
self.assertEqual(Foundation.NSFileCoordinatorWritingForReplacing, 1 << 3)
@min_os_level("10.7")
def testMethods(self):
self.assertArgIsOut(
Foundation.NSFileCoordinator.coordinateReadingItemAtURL_options_error_byAccessor_,
2,
)
self.assertArgIsBlock(
Foundation.NSFileCoordinator.coordinateReadingItemAtURL_options_error_byAccessor_,
3,
b"v@",
)
self.assertArgIsOut(
Foundation.NSFileCoordinator.coordinateWritingItemAtURL_options_error_byAccessor_,
2,
)
self.assertArgIsBlock(
Foundation.NSFileCoordinator.coordinateWritingItemAtURL_options_error_byAccessor_,
3,
b"v@",
)
self.assertArgIsOut(
Foundation.NSFileCoordinator.coordinateReadingItemAtURL_options_writingItemAtURL_options_error_byAccessor_,
4,
)
self.assertArgIsBlock(
Foundation.NSFileCoordinator.coordinateReadingItemAtURL_options_writingItemAtURL_options_error_byAccessor_,
5,
b"v@@",
)
self.assertArgIsOut(
Foundation.NSFileCoordinator.coordinateWritingItemAtURL_options_writingItemAtURL_options_error_byAccessor_,
4,
)
self.assertArgIsBlock(
Foundation.NSFileCoordinator.coordinateWritingItemAtURL_options_writingItemAtURL_options_error_byAccessor_,
5,
b"v@@",
)
self.assertArgIsOut(
Foundation.NSFileCoordinator.prepareForReadingItemsAtURLs_options_writingItemsAtURLs_options_error_byAccessor_,
4,
)
self.assertArgIsBlock(
Foundation.NSFileCoordinator.prepareForReadingItemsAtURLs_options_writingItemsAtURLs_options_error_byAccessor_,
5,
b"v@?",
) # FIXME: Cannot represent this completion handler!
if __name__ == "__main__":
main()
|
femexamples | rc_wall_2d | # ***************************************************************************
# * Copyright (c) 2019 Bernd Hahnebach <bernd@bimstatik.org> *
# * Copyright (c) 2020 Sudhanshu Dubey <sudhanshu.thethunder@gmail.com *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
import Fem
import FreeCAD
import ObjectsFem
import Part
from FreeCAD import Vector as vec
from Part import makeLine as ln
from . import manager
from .manager import get_meshname, init_doc
def get_information():
return {
"name": "RC Wall 2D",
"meshtype": "face",
"meshelement": "Tria6",
"constraints": ["fixed", "force", "displacement"],
"solvers": ["calculix", "ccxtools"],
"material": "reinforced",
"equations": ["mechanical"],
}
def get_explanation(header=""):
return (
header
+ """
To run the example from Python console use:
from femexamples.rc_wall_2d import setup
setup()
See forum topic post:
https://forum.freecad.org/viewtopic.php?f=18&t=33106&start=80#p296469
example from Harry's epic topic: Concrete branch ready for testing
"""
)
def setup(doc=None, solvertype="ccxtools"):
# init FreeCAD document
if doc is None:
doc = init_doc()
# explanation object
# just keep the following line and change text string in get_explanation method
manager.add_explanation_obj(
doc, get_explanation(manager.get_header(get_information()))
)
# geometric object
v1 = vec(0, -2000, 0)
v2 = vec(500, -2000, 0)
v3 = vec(500, 0, 0)
v4 = vec(3500, 0, 0)
v5 = vec(3500, -2000, 0)
v6 = vec(4000, -2000, 0)
v7 = vec(4000, 2000, 0)
v8 = vec(0, 2000, 0)
l1 = ln(v1, v2)
l2 = ln(v2, v3)
l3 = ln(v3, v4)
l4 = ln(v4, v5)
l5 = ln(v5, v6)
l6 = ln(v6, v7)
l7 = ln(v7, v8)
l8 = ln(v8, v1)
geom_obj = doc.addObject("Part::Feature", "FIB_Wall")
geom_obj.Shape = Part.Face(Part.Wire([l1, l2, l3, l4, l5, l6, l7, l8]))
doc.recompute()
if FreeCAD.GuiUp:
geom_obj.ViewObject.Document.activeView().viewAxonometric()
geom_obj.ViewObject.Document.activeView().fitAll()
# analysis
analysis = ObjectsFem.makeAnalysis(doc, "Analysis")
# solver
if solvertype == "calculix":
solver_obj = ObjectsFem.makeSolverCalculix(doc, "SolverCalculiX")
elif solvertype == "ccxtools":
solver_obj = ObjectsFem.makeSolverCalculixCcxTools(doc, "CalculiXccxTools")
solver_obj.WorkingDir = ""
else:
FreeCAD.Console.PrintWarning(
"Unknown or unsupported solver type: {}. "
"No solver object was created.\n".format(solvertype)
)
if solvertype == "calculix" or solvertype == "ccxtools":
solver_obj.SplitInputWriter = False
solver_obj.AnalysisType = "static"
solver_obj.GeometricalNonlinearity = "linear"
solver_obj.ThermoMechSteadyState = False
solver_obj.MatrixSolverType = "default"
solver_obj.IterationsControlParameterTimeUse = False
analysis.addObject(solver_obj)
# shell thickness
thickness_obj = ObjectsFem.makeElementGeometry2D(doc, 150.0, "ShellThickness")
analysis.addObject(thickness_obj)
# material
matrixprop = {}
matrixprop["Name"] = "Concrete-EN-C35/45"
matrixprop["YoungsModulus"] = "32000 MPa"
matrixprop["PoissonRatio"] = "0.17"
matrixprop["CompressiveStrength"] = "15.75 MPa"
# make some hint on the possible angle units in material system
matrixprop["AngleOfFriction"] = "30 deg"
reinfoprop = {}
reinfoprop["Name"] = "Reinforcement-FIB-B500"
reinfoprop["YieldStrength"] = "315 MPa"
# not an official FreeCAD material property
reinfoprop["ReinforcementRatio"] = "0.0"
material_reinforced = ObjectsFem.makeMaterialReinforced(doc, "MaterialReinforced")
material_reinforced.Material = matrixprop
material_reinforced.Reinforcement = reinfoprop
analysis.addObject(material_reinforced)
# constraint fixed
con_fixed = ObjectsFem.makeConstraintFixed(doc, "ConstraintFixed")
con_fixed.References = [(geom_obj, "Edge1"), (geom_obj, "Edge5")]
analysis.addObject(con_fixed)
# constraint force
con_force = ObjectsFem.makeConstraintForce(doc, "ConstraintForce")
con_force.References = [(geom_obj, "Edge7")]
con_force.Force = 1000000.0
con_force.Direction = (geom_obj, ["Edge8"])
con_force.Reversed = False
analysis.addObject(con_force)
# constraint displacement
con_disp = ObjectsFem.makeConstraintDisplacement(
doc, "ConstraintDisplacmentPrescribed"
)
con_disp.References = [(geom_obj, "Face1")]
con_disp.zFree = False
con_disp.zFix = True
analysis.addObject(con_disp)
# mesh
from .meshes.mesh_rc_wall_2d_tria6 import create_elements, create_nodes
fem_mesh = Fem.FemMesh()
control = create_nodes(fem_mesh)
if not control:
FreeCAD.Console.PrintError("Error on creating nodes.\n")
control = create_elements(fem_mesh)
if not control:
FreeCAD.Console.PrintError("Error on creating elements.\n")
femmesh_obj = analysis.addObject(ObjectsFem.makeMeshGmsh(doc, get_meshname()))[0]
femmesh_obj.FemMesh = fem_mesh
femmesh_obj.Part = geom_obj
femmesh_obj.SecondOrderLinear = False
doc.recompute()
return doc
|
PyObjCTest | test_cgeventtypes | from PyObjCTools.TestSupport import *
from Quartz.CoreGraphics import *
class TestCGEventTypes(TestCase):
def testTypes(self):
self.assertIsCFType(CGEventRef)
self.assertIsCFType(CGEventSourceRef)
def testConstants(self):
self.assertEqual(kCGMouseButtonLeft, 0)
self.assertEqual(kCGMouseButtonRight, 1)
self.assertEqual(kCGMouseButtonCenter, 2)
self.assertEqual(kCGScrollEventUnitPixel, 0)
self.assertEqual(kCGScrollEventUnitLine, 1)
self.assertEqual(kCGEventFlagMaskAlphaShift, 0x00010000)
self.assertEqual(kCGEventFlagMaskShift, 0x00020000)
self.assertEqual(kCGEventFlagMaskControl, 0x00040000)
self.assertEqual(kCGEventFlagMaskAlternate, 0x00080000)
self.assertEqual(kCGEventFlagMaskCommand, 0x00100000)
self.assertEqual(kCGEventFlagMaskHelp, 0x00400000)
self.assertEqual(kCGEventFlagMaskSecondaryFn, 0x00800000)
self.assertEqual(kCGEventFlagMaskNumericPad, 0x00200000)
self.assertEqual(kCGEventFlagMaskNonCoalesced, 0x00000100)
self.assertEqual(kCGEventNull, 0)
self.assertEqual(kCGEventLeftMouseDown, 1)
self.assertEqual(kCGEventLeftMouseUp, 2)
self.assertEqual(kCGEventRightMouseDown, 3)
self.assertEqual(kCGEventRightMouseUp, 4)
self.assertEqual(kCGEventMouseMoved, 5)
self.assertEqual(kCGEventLeftMouseDragged, 6)
self.assertEqual(kCGEventRightMouseDragged, 7)
self.assertEqual(kCGEventKeyDown, 10)
self.assertEqual(kCGEventKeyUp, 11)
self.assertEqual(kCGEventFlagsChanged, 12)
self.assertEqual(kCGEventScrollWheel, 22)
self.assertEqual(kCGEventTabletPointer, 23)
self.assertEqual(kCGEventTabletProximity, 24)
self.assertEqual(kCGEventOtherMouseDown, 25)
self.assertEqual(kCGEventOtherMouseUp, 26)
self.assertEqual(kCGEventOtherMouseDragged, 27)
self.assertEqual(kCGEventTapDisabledByTimeout, 0xFFFFFFFE)
self.assertEqual(kCGEventTapDisabledByUserInput, 0xFFFFFFFF)
self.assertEqual(kCGMouseEventNumber, 0)
self.assertEqual(kCGMouseEventClickState, 1)
self.assertEqual(kCGMouseEventPressure, 2)
self.assertEqual(kCGMouseEventButtonNumber, 3)
self.assertEqual(kCGMouseEventDeltaX, 4)
self.assertEqual(kCGMouseEventDeltaY, 5)
self.assertEqual(kCGMouseEventInstantMouser, 6)
self.assertEqual(kCGMouseEventSubtype, 7)
self.assertEqual(kCGKeyboardEventKeycode, 9)
self.assertEqual(kCGKeyboardEventKeyboardType, 10)
self.assertEqual(kCGScrollWheelEventDeltaAxis1, 11)
self.assertEqual(kCGScrollWheelEventDeltaAxis2, 12)
self.assertEqual(kCGScrollWheelEventDeltaAxis3, 13)
self.assertEqual(kCGScrollWheelEventFixedPtDeltaAxis1, 93)
self.assertEqual(kCGScrollWheelEventFixedPtDeltaAxis2, 94)
self.assertEqual(kCGScrollWheelEventFixedPtDeltaAxis3, 95)
self.assertEqual(kCGScrollWheelEventPointDeltaAxis1, 96)
self.assertEqual(kCGScrollWheelEventPointDeltaAxis2, 97)
self.assertEqual(kCGScrollWheelEventPointDeltaAxis3, 98)
self.assertEqual(kCGScrollWheelEventInstantMouser, 14)
self.assertEqual(kCGTabletEventPointX, 15)
self.assertEqual(kCGTabletEventPointY, 16)
self.assertEqual(kCGTabletEventPointZ, 17)
self.assertEqual(kCGTabletEventPointButtons, 18)
self.assertEqual(kCGTabletEventPointPressure, 19)
self.assertEqual(kCGTabletEventTiltX, 20)
self.assertEqual(kCGTabletEventTiltY, 21)
self.assertEqual(kCGTabletEventRotation, 22)
self.assertEqual(kCGTabletEventTangentialPressure, 23)
self.assertEqual(kCGTabletEventDeviceID, 24)
self.assertEqual(kCGTabletEventVendor1, 25)
self.assertEqual(kCGTabletEventVendor2, 26)
self.assertEqual(kCGTabletEventVendor3, 27)
self.assertEqual(kCGTabletProximityEventVendorID, 28)
self.assertEqual(kCGTabletProximityEventTabletID, 29)
self.assertEqual(kCGTabletProximityEventPointerID, 30)
self.assertEqual(kCGTabletProximityEventDeviceID, 31)
self.assertEqual(kCGTabletProximityEventSystemTabletID, 32)
self.assertEqual(kCGTabletProximityEventVendorPointerType, 33)
self.assertEqual(kCGTabletProximityEventVendorPointerSerialNumber, 34)
self.assertEqual(kCGTabletProximityEventVendorUniqueID, 35)
self.assertEqual(kCGTabletProximityEventCapabilityMask, 36)
self.assertEqual(kCGTabletProximityEventPointerType, 37)
self.assertEqual(kCGTabletProximityEventEnterProximity, 38)
self.assertEqual(kCGEventTargetProcessSerialNumber, 39)
self.assertEqual(kCGEventTargetUnixProcessID, 40)
self.assertEqual(kCGEventSourceUnixProcessID, 41)
self.assertEqual(kCGEventSourceUserData, 42)
self.assertEqual(kCGEventSourceUserID, 43)
self.assertEqual(kCGEventSourceGroupID, 44)
self.assertEqual(kCGEventSourceStateID, 45)
self.assertEqual(kCGScrollWheelEventIsContinuous, 88)
self.assertEqual(kCGEventMouseSubtypeDefault, 0)
self.assertEqual(kCGEventMouseSubtypeTabletPoint, 1)
self.assertEqual(kCGEventMouseSubtypeTabletProximity, 2)
self.assertEqual(kCGHIDEventTap, 0)
self.assertEqual(kCGSessionEventTap, 1)
self.assertEqual(kCGAnnotatedSessionEventTap, 2)
self.assertEqual(kCGHeadInsertEventTap, 0)
self.assertEqual(kCGTailAppendEventTap, 1)
self.assertEqual(kCGEventTapOptionDefault, 0x00000000)
self.assertEqual(kCGEventTapOptionListenOnly, 0x00000001)
self.assertEqual(
kCGNotifyEventTapAdded, b"com.apple.coregraphics.eventTapAdded"
)
self.assertEqual(
kCGNotifyEventTapRemoved, b"com.apple.coregraphics.eventTapRemoved"
)
self.assertEqual(kCGEventSourceStatePrivate, -1)
self.assertEqual(kCGEventSourceStateCombinedSessionState, 0)
self.assertEqual(kCGEventSourceStateHIDSystemState, 1)
self.assertEqual(kCGAnyInputEventType, 0xFFFFFFFF)
def testStructs(self):
v = CGEventTapInformation()
self.assertTrue(hasattr(v, "eventTapID"))
self.assertTrue(hasattr(v, "tapPoint"))
self.assertTrue(hasattr(v, "options"))
self.assertTrue(hasattr(v, "eventsOfInterest"))
self.assertTrue(hasattr(v, "tappingProcess"))
self.assertTrue(hasattr(v, "processBeingTapped"))
self.assertTrue(hasattr(v, "enabled"))
self.assertTrue(hasattr(v, "minUsecLatency"))
self.assertTrue(hasattr(v, "avgUsecLatency"))
self.assertTrue(hasattr(v, "maxUsecLatency"))
def testInline(self):
self.assertEqual(CGEventMaskBit(10), 1 << 10)
if __name__ == "__main__":
main()
|
object-detection | trainer_test | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.trainer."""
import tensorflow as tf
from app.object_detection import trainer
from app.object_detection.core import losses, model
from app.object_detection.core import standard_fields as fields
from app.object_detection.protos import train_pb2
from google.protobuf import text_format
NUMBER_OF_CLASSES = 2
def get_input_function():
"""A function to get test inputs. Returns an image with one box."""
image = tf.random_uniform([32, 32, 3], dtype=tf.float32)
key = tf.constant("image_000000")
class_label = tf.random_uniform(
[1], minval=0, maxval=NUMBER_OF_CLASSES, dtype=tf.int32
)
box_label = tf.random_uniform([1, 4], minval=0.4, maxval=0.6, dtype=tf.float32)
return {
fields.InputDataFields.image: image,
fields.InputDataFields.key: key,
fields.InputDataFields.groundtruth_classes: class_label,
fields.InputDataFields.groundtruth_boxes: box_label,
}
class FakeDetectionModel(model.DetectionModel):
"""A simple (and poor) DetectionModel for use in test."""
def __init__(self):
super(FakeDetectionModel, self).__init__(num_classes=NUMBER_OF_CLASSES)
self._classification_loss = losses.WeightedSigmoidClassificationLoss(
anchorwise_output=True
)
self._localization_loss = losses.WeightedSmoothL1LocalizationLoss(
anchorwise_output=True
)
def preprocess(self, inputs):
"""Input preprocessing, resizes images to 28x28.
Args:
inputs: a [batch, height_in, width_in, channels] float32 tensor
representing a batch of images with values between 0 and 255.0.
Returns:
preprocessed_inputs: a [batch, 28, 28, channels] float32 tensor.
"""
return tf.image.resize_images(inputs, [28, 28])
def predict(self, preprocessed_inputs):
"""Prediction tensors from inputs tensor.
Args:
preprocessed_inputs: a [batch, 28, 28, channels] float32 tensor.
Returns:
prediction_dict: a dictionary holding prediction tensors to be
passed to the Loss or Postprocess functions.
"""
flattened_inputs = tf.contrib.layers.flatten(preprocessed_inputs)
class_prediction = tf.contrib.layers.fully_connected(
flattened_inputs, self._num_classes
)
box_prediction = tf.contrib.layers.fully_connected(flattened_inputs, 4)
return {
"class_predictions_with_background": tf.reshape(
class_prediction, [-1, 1, self._num_classes]
),
"box_encodings": tf.reshape(box_prediction, [-1, 1, 4]),
}
def postprocess(self, prediction_dict, **params):
"""Convert predicted output tensors to final detections. Unused.
Args:
prediction_dict: a dictionary holding prediction tensors.
**params: Additional keyword arguments for specific implementations of
DetectionModel.
Returns:
detections: a dictionary with empty fields.
"""
return {
"detection_boxes": None,
"detection_scores": None,
"detection_classes": None,
"num_detections": None,
}
def loss(self, prediction_dict):
"""Compute scalar loss tensors with respect to provided groundtruth.
Calling this function requires that groundtruth tensors have been
provided via the provide_groundtruth function.
Args:
prediction_dict: a dictionary holding predicted tensors
Returns:
a dictionary mapping strings (loss names) to scalar tensors representing
loss values.
"""
batch_reg_targets = tf.stack(self.groundtruth_lists(fields.BoxListFields.boxes))
batch_cls_targets = tf.stack(
self.groundtruth_lists(fields.BoxListFields.classes)
)
weights = tf.constant(
1.0,
dtype=tf.float32,
shape=[len(self.groundtruth_lists(fields.BoxListFields.boxes)), 1],
)
location_losses = self._localization_loss(
prediction_dict["box_encodings"], batch_reg_targets, weights=weights
)
cls_losses = self._classification_loss(
prediction_dict["class_predictions_with_background"],
batch_cls_targets,
weights=weights,
)
loss_dict = {
"localization_loss": tf.reduce_sum(location_losses),
"classification_loss": tf.reduce_sum(cls_losses),
}
return loss_dict
def restore_map(self, from_detection_checkpoint=True):
"""Returns a map of variables to load from a foreign checkpoint.
Args:
from_detection_checkpoint: whether to restore from a full detection
checkpoint (with compatible variable names) or to restore from a
classification checkpoint for initialization prior to training.
Returns:
A dict mapping variable names to variables.
"""
return {var.op.name: var for var in tf.global_variables()}
class TrainerTest(tf.test.TestCase):
def test_configure_trainer_and_train_two_steps(self):
train_config_text_proto = """
optimizer {
adam_optimizer {
learning_rate {
constant_learning_rate {
learning_rate: 0.01
}
}
}
}
data_augmentation_options {
random_adjust_brightness {
max_delta: 0.2
}
}
data_augmentation_options {
random_adjust_contrast {
min_delta: 0.7
max_delta: 1.1
}
}
num_steps: 2
"""
train_config = train_pb2.TrainConfig()
text_format.Merge(train_config_text_proto, train_config)
train_dir = self.get_temp_dir()
trainer.train(
create_tensor_dict_fn=get_input_function,
create_model_fn=FakeDetectionModel,
train_config=train_config,
master="",
task=0,
num_clones=1,
worker_replicas=1,
clone_on_cpu=True,
ps_tasks=0,
worker_job_name="worker",
is_chief=True,
train_dir=train_dir,
)
if __name__ == "__main__":
tf.test.main()
|
misc | ipc | # SPDX-FileCopyrightText: Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# SPDX-License-Identifier: GPL-3.0-or-later
"""Utilities for IPC with existing instances."""
import binascii
import getpass
import hashlib
import json
import os
import time
from typing import Optional
import qutebrowser
from qutebrowser.qt import sip
from qutebrowser.qt.core import QObject, Qt, pyqtSignal, pyqtSlot
from qutebrowser.qt.network import QAbstractSocket, QLocalServer, QLocalSocket
from qutebrowser.utils import debug, error, log, qtutils, standarddir, usertypes, utils
CONNECT_TIMEOUT = 100 # timeout for connecting/disconnecting
WRITE_TIMEOUT = 1000
READ_TIMEOUT = 5000
ATIME_INTERVAL = 5000 * 60 # 5 minutes
PROTOCOL_VERSION = 1
# The ipc server instance
server = None
def _get_socketname_windows(basedir):
"""Get a socketname to use for Windows."""
try:
username = getpass.getuser()
except ImportError:
# getpass.getuser() first tries a couple of environment variables. If
# none of those are set (i.e., USERNAME is missing), it tries to import
# the "pwd" module which is unavailable on Windows.
raise Error(
"Could not find username. This should only happen if "
"there is a bug in the application launching qutebrowser, "
"preventing the USERNAME environment variable from being "
"passed. If you know more about when this happens, please "
"report this to mail@qutebrowser.org."
)
parts = ["qutebrowser", username]
if basedir is not None:
md5 = hashlib.md5(basedir.encode("utf-8")).hexdigest()
parts.append(md5)
return "-".join(parts)
def _get_socketname(basedir):
"""Get a socketname to use."""
if utils.is_windows: # pragma: no cover
return _get_socketname_windows(basedir)
parts_to_hash = [getpass.getuser()]
if basedir is not None:
parts_to_hash.append(basedir)
data_to_hash = "-".join(parts_to_hash).encode("utf-8")
md5 = hashlib.md5(data_to_hash).hexdigest()
prefix = "i-" if utils.is_mac else "ipc-"
filename = "{}{}".format(prefix, md5)
return os.path.join(standarddir.runtime(), filename)
class Error(Exception):
"""Base class for IPC exceptions."""
class SocketError(Error):
"""Exception raised when there was an error with a QLocalSocket.
Args:
code: The error code.
message: The error message.
action: The action which was taken when the error happened.
"""
def __init__(self, action, socket):
"""Constructor.
Args:
action: The action which was taken when the error happened.
socket: The QLocalSocket which has the error set.
"""
super().__init__()
self.action = action
self.code: QLocalSocket.LocalSocketError = socket.error()
self.message: str = socket.errorString()
def __str__(self):
return "Error while {}: {} ({})".format(
self.action, self.message, debug.qenum_key(QLocalSocket, self.code)
)
class ListenError(Error):
"""Exception raised when there was a problem with listening to IPC.
Args:
code: The error code.
message: The error message.
"""
def __init__(self, local_server):
"""Constructor.
Args:
local_server: The QLocalServer which has the error set.
"""
super().__init__()
self.code: QAbstractSocket.SocketError = local_server.serverError()
self.message: str = local_server.errorString()
def __str__(self):
return "Error while listening to IPC server: {} ({})".format(
self.message, debug.qenum_key(QAbstractSocket, self.code)
)
class AddressInUseError(ListenError):
"""Emitted when the server address is already in use."""
class IPCServer(QObject):
"""IPC server to which clients connect to.
Attributes:
ignored: Whether requests are ignored (in exception hook).
_timer: A timer to handle timeouts.
_server: A QLocalServer to accept new connections.
_socket: The QLocalSocket we're currently connected to.
_socketname: The socketname to use.
_atime_timer: Timer to update the atime of the socket regularly.
Signals:
got_args: Emitted when there was an IPC connection and arguments were
passed.
got_args: Emitted with the raw data an IPC connection got.
got_invalid_data: Emitted when there was invalid incoming data.
"""
got_args = pyqtSignal(list, str, str)
got_raw = pyqtSignal(bytes)
got_invalid_data = pyqtSignal()
def __init__(self, socketname, parent=None):
"""Start the IPC server and listen to commands.
Args:
socketname: The socketname to use.
parent: The parent to be used.
"""
super().__init__(parent)
self.ignored = False
self._socketname = socketname
self._timer = usertypes.Timer(self, "ipc-timeout")
self._timer.setInterval(READ_TIMEOUT)
self._timer.timeout.connect(self.on_timeout)
if utils.is_windows: # pragma: no cover
self._atime_timer = None
else:
self._atime_timer = usertypes.Timer(self, "ipc-atime")
self._atime_timer.setInterval(ATIME_INTERVAL)
self._atime_timer.timeout.connect(self.update_atime)
self._atime_timer.setTimerType(Qt.TimerType.VeryCoarseTimer)
self._server: Optional[QLocalServer] = QLocalServer(self)
self._server.newConnection.connect(self.handle_connection)
self._socket = None
self._old_socket = None
if utils.is_windows: # pragma: no cover
# As a WORKAROUND for a Qt bug, we can't use UserAccessOption on Unix. If we
# do, we don't get an AddressInUseError anymore:
# https://bugreports.qt.io/browse/QTBUG-48635
#
# Thus, we only do so on Windows, and handle permissions manually in
# listen() on Linux.
log.ipc.debug("Calling setSocketOptions")
self._server.setSocketOptions(QLocalServer.SocketOption.UserAccessOption)
else: # pragma: no cover
log.ipc.debug("Not calling setSocketOptions")
def _remove_server(self):
"""Remove an existing server."""
ok = QLocalServer.removeServer(self._socketname)
if not ok:
raise Error("Error while removing server {}!".format(self._socketname))
def listen(self):
"""Start listening on self._socketname."""
assert self._server is not None
log.ipc.debug("Listening as {}".format(self._socketname))
if self._atime_timer is not None: # pragma: no branch
self._atime_timer.start()
self._remove_server()
ok = self._server.listen(self._socketname)
if not ok:
if (
self._server.serverError()
== QAbstractSocket.SocketError.AddressInUseError
):
raise AddressInUseError(self._server)
raise ListenError(self._server)
if not utils.is_windows: # pragma: no cover
# WORKAROUND for QTBUG-48635, see the comment in __init__ for details.
try:
os.chmod(self._server.fullServerName(), 0o700)
except FileNotFoundError:
# https://github.com/qutebrowser/qutebrowser/issues/1530
# The server doesn't actually exist even if ok was reported as
# True, so report this as an error.
raise ListenError(self._server)
@pyqtSlot("QLocalSocket::LocalSocketError")
def on_error(self, err):
"""Raise SocketError on fatal errors."""
if self._socket is None:
# Sometimes this gets called from stale sockets.
log.ipc.debug("In on_error with None socket!")
return
self._timer.stop()
log.ipc.debug(
"Socket 0x{:x}: error {}: {}".format(
id(self._socket), self._socket.error(), self._socket.errorString()
)
)
if err != QLocalSocket.LocalSocketError.PeerClosedError:
raise SocketError("handling IPC connection", self._socket)
@pyqtSlot()
def handle_connection(self):
"""Handle a new connection to the server."""
if self.ignored or self._server is None:
return
if self._socket is not None:
log.ipc.debug(
"Got new connection but ignoring it because we're "
"still handling another one (0x{:x}).".format(id(self._socket))
)
return
socket = qtutils.add_optional(self._server.nextPendingConnection())
if socket is None:
log.ipc.debug("No new connection to handle.")
return
log.ipc.debug("Client connected (socket 0x{:x}).".format(id(socket)))
self._socket = socket
self._timer.start()
socket.readyRead.connect(self.on_ready_read)
if socket.canReadLine():
log.ipc.debug("We can read a line immediately.")
self.on_ready_read()
socket.errorOccurred.connect(self.on_error)
# FIXME:v4 Ignore needed due to overloaded signal/method in Qt 5
socket_error = socket.error() # type: ignore[operator,unused-ignore]
if socket_error not in [
QLocalSocket.LocalSocketError.UnknownSocketError,
QLocalSocket.LocalSocketError.PeerClosedError,
]:
log.ipc.debug("We got an error immediately.")
self.on_error(socket_error)
socket.disconnected.connect(self.on_disconnected)
if socket.state() == QLocalSocket.LocalSocketState.UnconnectedState:
log.ipc.debug("Socket was disconnected immediately.")
self.on_disconnected()
@pyqtSlot()
def on_disconnected(self):
"""Clean up socket when the client disconnected."""
log.ipc.debug(
"Client disconnected from socket 0x{:x}.".format(id(self._socket))
)
self._timer.stop()
if self._old_socket is not None:
self._old_socket.deleteLater()
self._old_socket = self._socket
self._socket = None
# Maybe another connection is waiting.
self.handle_connection()
def _handle_invalid_data(self):
"""Handle invalid data we got from a QLocalSocket."""
assert self._socket is not None
log.ipc.error(
"Ignoring invalid IPC data from socket 0x{:x}.".format(id(self._socket))
)
self.got_invalid_data.emit()
self._socket.errorOccurred.connect(self.on_error)
self._socket.disconnectFromServer()
def _handle_data(self, data):
"""Handle data (as bytes) we got from on_ready_read."""
try:
decoded = data.decode("utf-8")
except UnicodeDecodeError:
log.ipc.error("invalid utf-8: {!r}".format(binascii.hexlify(data)))
self._handle_invalid_data()
return
log.ipc.debug("Processing: {}".format(decoded))
try:
json_data = json.loads(decoded)
except ValueError:
log.ipc.error("invalid json: {}".format(decoded.strip()))
self._handle_invalid_data()
return
for name in ["args", "target_arg"]:
if name not in json_data:
log.ipc.error("Missing {}: {}".format(name, decoded.strip()))
self._handle_invalid_data()
return
try:
protocol_version = int(json_data["protocol_version"])
except (KeyError, ValueError):
log.ipc.error("invalid version: {}".format(decoded.strip()))
self._handle_invalid_data()
return
if protocol_version != PROTOCOL_VERSION:
log.ipc.error(
"incompatible version: expected {}, got {}".format(
PROTOCOL_VERSION, protocol_version
)
)
self._handle_invalid_data()
return
args = json_data["args"]
target_arg = json_data["target_arg"]
if target_arg is None:
# https://www.riverbankcomputing.com/pipermail/pyqt/2016-April/037375.html
target_arg = ""
cwd = json_data.get("cwd", "")
assert cwd is not None
self.got_args.emit(args, target_arg, cwd)
def _get_socket(self, warn=True):
"""Get the current socket for on_ready_read.
Arguments:
warn: Whether to warn if no socket was found.
"""
if self._socket is None: # pragma: no cover
# This happens when doing a connection while another one is already
# active for some reason.
if self._old_socket is None:
if warn:
log.ipc.warning("In _get_socket with None socket and old_socket!")
return None
log.ipc.debug("In _get_socket with None socket!")
socket = self._old_socket
else:
socket = self._socket
if sip.isdeleted(socket): # pragma: no cover
log.ipc.warning("Ignoring deleted IPC socket")
return None
return socket
@pyqtSlot()
def on_ready_read(self):
"""Read json data from the client."""
self._timer.stop()
socket = self._get_socket()
while socket is not None and socket.canReadLine():
data = bytes(socket.readLine())
self.got_raw.emit(data)
log.ipc.debug("Read from socket 0x{:x}: {!r}".format(id(socket), data))
self._handle_data(data)
socket = self._get_socket(warn=False)
if self._socket is not None:
self._timer.start()
@pyqtSlot()
def on_timeout(self):
"""Cancel the current connection if it was idle for too long."""
assert self._socket is not None
log.ipc.error(
"IPC connection timed out " "(socket 0x{:x}).".format(id(self._socket))
)
self._socket.disconnectFromServer()
if self._socket is not None: # pragma: no cover
# on_socket_disconnected sets it to None
self._socket.waitForDisconnected(CONNECT_TIMEOUT)
if self._socket is not None: # pragma: no cover
# on_socket_disconnected sets it to None
self._socket.abort()
@pyqtSlot()
def update_atime(self):
"""Update the atime of the socket file all few hours.
From the XDG basedir spec:
To ensure that your files are not removed, they should have their
access time timestamp modified at least once every 6 hours of monotonic
time or the 'sticky' bit should be set on the file.
"""
assert self._server is not None
path = self._server.fullServerName()
if not path:
log.ipc.error("In update_atime with no server path!")
return
log.ipc.debug("Touching {}".format(path))
try:
os.utime(path)
except OSError:
log.ipc.exception("Failed to update IPC socket, trying to " "re-listen...")
self._server.close()
self.listen()
@pyqtSlot()
def shutdown(self):
"""Shut down the IPC server cleanly."""
if self._server is None:
# We can get called twice when using :restart -- there, IPC is shut down
# early to avoid processing new connections while shutting down, and then
# we get called again when the application is about to quit.
return
log.ipc.debug("Shutting down IPC (socket 0x{:x})".format(id(self._socket)))
if self._socket is not None:
self._socket.deleteLater()
self._socket = None
self._timer.stop()
if self._atime_timer is not None: # pragma: no branch
self._atime_timer.stop()
try:
self._atime_timer.timeout.disconnect(self.update_atime)
except TypeError:
pass
self._server.close()
self._server.deleteLater()
self._remove_server()
self._server = None
def send_to_running_instance(socketname, command, target_arg, *, socket=None):
"""Try to send a commandline to a running instance.
Blocks for CONNECT_TIMEOUT ms.
Args:
socketname: The name which should be used for the socket.
command: The command to send to the running instance.
target_arg: --target command line argument
socket: The socket to read data from, or None.
Return:
True if connecting was successful, False if no connection was made.
"""
if socket is None:
socket = QLocalSocket()
log.ipc.debug("Connecting to {}".format(socketname))
socket.connectToServer(socketname)
connected = socket.waitForConnected(CONNECT_TIMEOUT)
if connected:
log.ipc.info("Opening in existing instance")
json_data = {
"args": command,
"target_arg": target_arg,
"version": qutebrowser.__version__,
"protocol_version": PROTOCOL_VERSION,
}
try:
cwd = os.getcwd()
except OSError:
pass
else:
json_data["cwd"] = cwd
line = json.dumps(json_data) + "\n"
data = line.encode("utf-8")
log.ipc.debug("Writing: {!r}".format(data))
socket.writeData(data)
socket.waitForBytesWritten(WRITE_TIMEOUT)
if socket.error() != QLocalSocket.LocalSocketError.UnknownSocketError:
raise SocketError("writing to running instance", socket)
socket.disconnectFromServer()
if socket.state() != QLocalSocket.LocalSocketState.UnconnectedState:
socket.waitForDisconnected(CONNECT_TIMEOUT)
return True
else:
if socket.error() not in [
QLocalSocket.LocalSocketError.ConnectionRefusedError,
QLocalSocket.LocalSocketError.ServerNotFoundError,
]:
raise SocketError("connecting to running instance", socket)
log.ipc.debug(
"No existing instance present ({})".format(
debug.qenum_key(QLocalSocket, socket.error())
)
)
return False
def display_error(exc, args):
"""Display a message box with an IPC error."""
error.handle_fatal_exc(
exc,
"Error while connecting to running instance!",
no_err_windows=args.no_err_windows,
)
def send_or_listen(args):
"""Send the args to a running instance or start a new IPCServer.
Args:
args: The argparse namespace.
Return:
The IPCServer instance if no running instance was detected.
None if an instance was running and received our request.
"""
global server
try:
socketname = _get_socketname(args.basedir)
try:
sent = send_to_running_instance(socketname, args.command, args.target)
if sent:
return None
log.init.debug("Starting IPC server...")
server = IPCServer(socketname)
server.listen()
return server
except AddressInUseError:
# This could be a race condition...
log.init.debug("Got AddressInUseError, trying again.")
time.sleep(0.5)
sent = send_to_running_instance(socketname, args.command, args.target)
if sent:
return None
else:
raise
except Error as e:
display_error(e, args)
raise
|
sabnzbd | database | #!/usr/bin/python3 -OO
# Copyright 2007-2023 The SABnzbd-Team (sabnzbd.org)
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
sabnzbd.database - Database Support
"""
import logging
import os
import sqlite3
import sys
import threading
import time
import zlib
from sqlite3 import Connection, Cursor
from typing import Dict, List, Optional, Sequence, Union
import sabnzbd
import sabnzbd.cfg
from sabnzbd.bpsmeter import this_month, this_week
from sabnzbd.constants import DB_HISTORY_NAME, STAGES, Status
from sabnzbd.decorators import synchronized
from sabnzbd.encoding import ubtou, utob
from sabnzbd.filesystem import clip_path, remove_file
from sabnzbd.misc import caller_name, int_conv, opts_to_pp
DB_LOCK = threading.RLock()
def convert_search(search):
"""Convert classic wildcard to SQL wildcard"""
if not search:
# Default value
search = ""
else:
# Allow * for wildcard matching and space
search = search.replace("*", "%").replace(" ", "%")
# Allow ^ for start of string and $ for end of string
if search and search.startswith("^"):
search = search.replace("^", "")
search += "%"
elif search and search.endswith("$"):
search = search.replace("$", "")
search = "%" + search
else:
search = "%" + search + "%"
return search
class HistoryDB:
"""Class to access the History database
Each class-instance will create an access channel that
can be used in one thread.
Each thread needs its own class-instance!
"""
# These class attributes will be accessed directly because
# they need to be shared by all instances
db_path = None # Will contain full path to history database
done_cleaning = False # Ensure we only do one Vacuum per session
@synchronized(DB_LOCK)
def __init__(self):
"""Determine database path and create connection"""
self.connection: Optional[Connection] = None
self.cursor: Optional[Cursor] = None
if not HistoryDB.db_path:
HistoryDB.db_path = os.path.join(
sabnzbd.cfg.admin_dir.get_path(), DB_HISTORY_NAME
)
self.connect()
def connect(self):
"""Create a connection to the database"""
create_table = not os.path.exists(HistoryDB.db_path)
self.connection = sqlite3.connect(HistoryDB.db_path)
self.connection.row_factory = sqlite3.Row
self.cursor = self.connection.cursor()
if create_table:
self.create_history_db()
elif not HistoryDB.done_cleaning:
# Run VACUUM on sqlite
# When an object (table, index, or trigger) is dropped from the database, it leaves behind empty space
# http://www.sqlite.org/lang_vacuum.html
HistoryDB.done_cleaning = True
self.execute("VACUUM")
self.execute("PRAGMA user_version;")
try:
version = self.cursor.fetchone()["user_version"]
except IndexError:
version = 0
if version < 1:
# Add any missing columns added since first DB version
# Use "and" to stop when database has been reset due to corruption
_ = (
self.execute("PRAGMA user_version = 1;")
and self.execute('ALTER TABLE "history" ADD COLUMN series TEXT;')
and self.execute('ALTER TABLE "history" ADD COLUMN md5sum TEXT;')
)
if version < 2:
# Add any missing columns added since second DB version
# Use "and" to stop when database has been reset due to corruption
_ = self.execute("PRAGMA user_version = 2;") and self.execute(
'ALTER TABLE "history" ADD COLUMN password TEXT;'
)
def execute(self, command: str, args: Sequence = (), save: bool = False) -> bool:
"""Wrapper for executing SQL commands"""
for tries in range(5, 0, -1):
try:
self.cursor.execute(command, args)
if save:
self.connection.commit()
return True
except:
error = str(sys.exc_info()[1])
if tries >= 0 and "is locked" in error:
logging.debug("Database locked, wait and retry")
time.sleep(0.5)
continue
elif "readonly" in error:
logging.error(
T("Cannot write to History database, check access rights!")
)
# Report back success, because there's no recovery possible
return True
elif (
"not a database" in error
or "malformed" in error
or "duplicate column name" in error
):
logging.error(
T("Damaged History database, created empty replacement")
)
logging.info("Traceback: ", exc_info=True)
self.close()
try:
remove_file(HistoryDB.db_path)
except:
pass
self.connect()
# Return False in case of "duplicate column" error
# because the column addition in connect() must be terminated
return "duplicate column name" not in error
else:
logging.error(T("SQL Command Failed, see log"))
logging.info("SQL: %s", command)
logging.info("Arguments: %s", repr(args))
logging.info("Traceback: ", exc_info=True)
try:
self.connection.rollback()
except:
logging.debug("Rollback Failed:", exc_info=True)
return False
def create_history_db(self):
"""Create a new (empty) database file"""
self.execute(
"""
CREATE TABLE "history" (
"id" INTEGER PRIMARY KEY,
"completed" INTEGER NOT NULL,
"name" TEXT NOT NULL,
"nzb_name" TEXT NOT NULL,
"category" TEXT,
"pp" TEXT,
"script" TEXT,
"report" TEXT,
"url" TEXT,
"status" TEXT,
"nzo_id" TEXT,
"storage" TEXT,
"path" TEXT,
"script_log" BLOB,
"script_line" TEXT,
"download_time" INTEGER,
"postproc_time" INTEGER,
"stage_log" TEXT,
"downloaded" INTEGER,
"completeness" INTEGER,
"fail_message" TEXT,
"url_info" TEXT,
"bytes" INTEGER,
"meta" TEXT,
"series" TEXT,
"md5sum" TEXT,
"password" TEXT
)
"""
)
self.execute("PRAGMA user_version = 2;")
def close(self):
"""Close database connection"""
try:
self.cursor.close()
self.connection.close()
except:
logging.error(T("Failed to close database, see log"))
logging.info("Traceback: ", exc_info=True)
def remove_completed(self, search=None):
"""Remove all completed jobs from the database, optional with `search` pattern"""
search = convert_search(search)
logging.info("Removing all completed jobs from history")
return self.execute(
"""DELETE FROM history WHERE name LIKE ? AND status = ?""",
(search, Status.COMPLETED),
save=True,
)
def get_failed_paths(self, search=None):
"""Return list of all storage paths of failed jobs (may contain non-existing or empty paths)"""
search = convert_search(search)
fetch_ok = self.execute(
"""SELECT path FROM history WHERE name LIKE ? AND status = ?""",
(search, Status.FAILED),
)
if fetch_ok:
return [item["path"] for item in self.cursor.fetchall()]
else:
return []
def remove_failed(self, search=None):
"""Remove all failed jobs from the database, optional with `search` pattern"""
search = convert_search(search)
logging.info("Removing all failed jobs from history")
return self.execute(
"""DELETE FROM history WHERE name LIKE ? AND status = ?""",
(search, Status.FAILED),
save=True,
)
def remove_history(self, jobs=None):
"""Remove all jobs in the list `jobs`, empty list will remove all completed jobs"""
if jobs is None:
self.remove_completed()
else:
if not isinstance(jobs, list):
jobs = [jobs]
for job in jobs:
self.execute(
"""DELETE FROM history WHERE nzo_id = ?""", (job,), save=True
)
logging.info("[%s] Removing job %s from history", caller_name(), job)
def auto_history_purge(self):
"""Remove history items based on the configured history-retention"""
if sabnzbd.cfg.history_retention() == "0":
return
if sabnzbd.cfg.history_retention() == "-1":
# Delete all non-failed ones
self.remove_completed()
if "d" in sabnzbd.cfg.history_retention():
# How many days to keep?
days_to_keep = int_conv(sabnzbd.cfg.history_retention().strip()[:-1])
seconds_to_keep = int(time.time()) - days_to_keep * 86400
if days_to_keep > 0:
logging.info(
"Removing completed jobs older than %s days from history",
days_to_keep,
)
return self.execute(
"""DELETE FROM history WHERE status = ? AND completed < ?""",
(Status.COMPLETED, seconds_to_keep),
save=True,
)
else:
# How many to keep?
to_keep = int_conv(sabnzbd.cfg.history_retention())
if to_keep > 0:
logging.info(
"Removing all but last %s completed jobs from history", to_keep
)
return self.execute(
"""DELETE FROM history WHERE status = ? AND id NOT IN (
SELECT id FROM history WHERE status = ? ORDER BY completed DESC LIMIT ?
)""",
(Status.COMPLETED, Status.COMPLETED, to_keep),
save=True,
)
def add_history_db(
self, nzo, storage="", postproc_time=0, script_output="", script_line=""
):
"""Add a new job entry to the database"""
t = build_history_info(
nzo, storage, postproc_time, script_output, script_line, series_info=True
)
self.execute(
"""INSERT INTO history (completed, name, nzb_name, category, pp, script, report,
url, status, nzo_id, storage, path, script_log, script_line, download_time, postproc_time, stage_log,
downloaded, fail_message, url_info, bytes, series, md5sum, password)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)""",
t,
save=True,
)
logging.info("Added job %s to history", nzo.final_name)
def fetch_history(
self,
start: Optional[int] = None,
limit: Optional[int] = None,
search: Optional[str] = None,
failed_only: int = 0,
categories: Optional[List[str]] = None,
nzo_ids: Optional[List[str]] = None,
):
"""Return records for specified jobs"""
command_args = [convert_search(search)]
post = ""
if categories:
categories = ["*" if c == "Default" else c for c in categories]
post = " AND (CATEGORY = ?"
post += " OR CATEGORY = ? " * (len(categories) - 1)
post += ")"
command_args.extend(categories)
if nzo_ids:
post += " AND (NZO_ID = ?"
post += " OR NZO_ID = ? " * (len(nzo_ids) - 1)
post += ")"
command_args.extend(nzo_ids)
if failed_only:
post += " AND STATUS = ?"
command_args.append(Status.FAILED)
cmd = "SELECT COUNT(*) FROM history WHERE name LIKE ?"
total_items = -1
if self.execute(cmd + post, command_args):
total_items = self.cursor.fetchone()["COUNT(*)"]
if not start:
start = 0
if not limit:
limit = total_items
command_args.extend([start, limit])
cmd = "SELECT * FROM history WHERE name LIKE ?"
if self.execute(
cmd + post + " ORDER BY completed desc LIMIT ?, ?", command_args
):
items = self.cursor.fetchall()
else:
items = []
# Unpack the single line stage log
# Stage Name is separated by ::: stage lines by ; and stages by \r\n
items = [unpack_history_info(item) for item in items]
return items, total_items
def have_episode(self, series: str, season: str, episode: str) -> bool:
"""Check whether History contains this series episode"""
total = 0
if series and season and episode:
pattern = "%s/%s/%s" % (series.lower(), season, episode)
if self.execute(
"""SELECT COUNT(*) FROM History WHERE series = ? AND STATUS != ?""",
(pattern, Status.FAILED),
):
total = self.cursor.fetchone()["COUNT(*)"]
return total > 0
def have_name_or_md5sum(self, name: str, md5sum: str) -> bool:
"""Check whether this name or md5sum is already in History"""
total = 0
if self.execute(
"""SELECT COUNT(*) FROM History WHERE ( LOWER(name) = LOWER(?) OR md5sum = ? ) AND STATUS != ?""",
(name, md5sum, Status.FAILED),
):
total = self.cursor.fetchone()["COUNT(*)"]
return total > 0
def get_history_size(self):
"""Returns the total size of the history and
amounts downloaded in the last month and week
"""
# Total Size of the history
total = 0
if self.execute("""SELECT sum(bytes) FROM history"""):
total = self.cursor.fetchone()["sum(bytes)"]
# Amount downloaded this month
month_timest = int(this_month(time.time()))
month = 0
if self.execute(
"""SELECT sum(bytes) FROM history WHERE completed > ?""", (month_timest,)
):
month = self.cursor.fetchone()["sum(bytes)"]
# Amount downloaded this week
week_timest = int(this_week(time.time()))
week = 0
if self.execute(
"""SELECT sum(bytes) FROM history WHERE completed > ?""", (week_timest,)
):
week = self.cursor.fetchone()["sum(bytes)"]
return total, month, week
def get_script_log(self, nzo_id):
"""Return decompressed log file"""
data = ""
if self.execute(
"""SELECT script_log FROM history WHERE nzo_id = ?""", (nzo_id,)
):
try:
data = ubtou(zlib.decompress(self.cursor.fetchone()["script_log"]))
except:
pass
return data
def get_name(self, nzo_id):
"""Return name of the job `nzo_id`"""
name = ""
if self.execute("""SELECT name FROM history WHERE nzo_id = ?""", (nzo_id,)):
try:
return self.cursor.fetchone()["name"]
except TypeError:
# No records found
pass
return name
def get_incomplete_path(self, nzo_id: str):
"""Return the `incomplete` path of the job `nzo_id` if
the job failed and if the path is still there"""
path = ""
if self.execute(
"""SELECT path FROM history WHERE nzo_id = ? AND status = ?""",
(nzo_id, Status.FAILED),
):
try:
path = self.cursor.fetchone()["path"]
except TypeError:
# No records found
pass
if os.path.exists(path):
return path
return None
def get_other(self, nzo_id):
"""Return additional data for job `nzo_id`"""
if self.execute("""SELECT * FROM history WHERE nzo_id = ?""", (nzo_id,)):
try:
item = self.cursor.fetchone()
return (
item["report"],
item["url"],
item["pp"],
item["script"],
item["category"],
)
except TypeError:
# No records found
pass
return "", "", "", "", ""
def __enter__(self):
"""For context manager support"""
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""For context manager support, ignore any exception"""
self.close()
_PP_LOOKUP = {0: "", 1: "R", 2: "U", 3: "D"}
def build_history_info(
nzo,
workdir_complete="",
postproc_time=0,
script_output="",
script_line="",
series_info=False,
):
"""Collects all the information needed for the database"""
completed = int(time.time())
pp = _PP_LOOKUP.get(opts_to_pp(nzo.repair, nzo.unpack, nzo.delete), "X")
if script_output:
# Compress the output of the script
script_output = sqlite3.Binary(zlib.compress(utob(script_output)))
download_time = nzo.nzo_info.get("download_time", 0)
url_info = nzo.nzo_info.get("details", "") or nzo.nzo_info.get("more_info", "")
# Get the dictionary containing the stages and their unpack process
# Pack the dictionary up into a single string
# Stage Name is separated by ::: stage lines by ; and stages by \r\n
lines = []
for key, results in nzo.unpack_info.items():
lines.append("%s:::%s" % (key, ";".join(results)))
stage_log = "\r\n".join(lines)
# Reuse the old 'report' column to indicate a URL-fetch
report = "future" if nzo.futuretype else ""
# Analyze series info only when job is finished
series = ""
if (
series_info
and (show_analysis := sabnzbd.newsunpack.analyse_show(nzo.final_name))[
"job_type"
]
== "tv"
):
seriesname, season, episode = (
show_analysis[key] for key in ("title", "season", "episode")
)
if seriesname and season and episode:
series = "%s/%s/%s" % (seriesname.lower(), season, episode)
return (
completed,
nzo.final_name,
nzo.filename,
nzo.cat,
pp,
nzo.script,
report,
nzo.url,
nzo.status,
nzo.nzo_id,
clip_path(workdir_complete),
clip_path(nzo.download_path),
script_output,
script_line,
download_time,
postproc_time,
stage_log,
nzo.bytes_downloaded,
nzo.fail_msg,
url_info,
nzo.bytes_downloaded,
series,
nzo.md5sum,
nzo.correct_password,
)
def unpack_history_info(item: Union[Dict, sqlite3.Row]):
"""Expands the single line stage_log from the DB
into a python dictionary for use in the history display
"""
# Convert result to dictionary
if isinstance(item, sqlite3.Row):
item = dict(item)
# Stage Name is separated by ::: stage lines by ; and stages by \r\n
lst = item["stage_log"]
if lst:
parsed_stage_log = []
try:
all_stages_lines = lst.split("\r\n")
except:
logging.error(T("Invalid stage logging in history for %s"), item["name"])
logging.debug("Lines: %s", lst)
all_stages_lines = []
for stage_lines in all_stages_lines:
try:
key, logs = stage_lines.split(":::")
except:
logging.info('Missing key:::logs "%s"', stage_lines)
continue
stage = {"name": key, "actions": []}
try:
stage["actions"] = logs.split(";")
except:
logging.error(
T("Invalid stage logging in history for %s"), item["name"]
)
logging.debug("Logs: %s", logs)
parsed_stage_log.append(stage)
# Sort it so it is more logical
parsed_stage_log.sort(key=lambda stage_log: STAGES.get(stage_log["name"], 100))
item["stage_log"] = parsed_stage_log
else:
item["stage_log"] = []
if item["script_log"]:
item["script_log"] = ""
# The action line is only available for items in the postproc queue
if "action_line" not in item:
item["action_line"] = ""
return item
def scheduled_history_purge():
logging.info("Scheduled history purge")
with HistoryDB() as history_db:
history_db.auto_history_purge()
|
clientScripts | verify_and_restructure_transfer_bag | #!/usr/bin/env python
# This file is part of Archivematica.
#
# Copyright 2010-2013 Artefactual Systems Inc. <http://artefactual.com>
#
# Archivematica is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Archivematica is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Archivematica. If not, see <http://www.gnu.org/licenses/>.
# @package Archivematica
# @subpackage archivematicaClientScript
# @author Joseph Perry <joseph@artefactual.com>
import os
import sys
import django
from django.db import transaction
django.setup()
import bag
import fileOperations
# archivematicaCommon
from archivematicaFunctions import OPTIONAL_FILES, REQUIRED_DIRECTORIES
from databaseFunctions import insertIntoEvents
# dashboard
from main.models import File
from move_or_merge import move_or_merge
def restructureBagForComplianceFileUUIDsAssigned(
job,
unitPath,
unitIdentifier,
unitIdentifierType="transfer_id",
unitPathReplaceWith="%transferDirectory%",
):
bagFileDefaultDest = os.path.join(unitPath, "logs", "BagIt")
MY_REQUIRED_DIRECTORIES = REQUIRED_DIRECTORIES + (bagFileDefaultDest,)
# This needs to be cast to a string since we're calling os.path.join(),
# and any of the other arguments could contain arbitrary, non-Unicode
# characters.
unitPath = str(unitPath)
unitDataPath = str(os.path.join(unitPath, "data"))
for dir in MY_REQUIRED_DIRECTORIES:
dirPath = os.path.join(unitPath, dir)
dirDataPath = os.path.join(unitPath, "data", dir)
if os.path.isdir(dirDataPath):
if dir == "metadata" and os.path.isdir(dirPath):
# We move the existing top-level metadata folder, or merge it
# with what is currently there, before the next set of
# directory operations to move everything up a level below.
job.pyprint(f"{dir}: moving/merging {dirPath} to {dirDataPath}")
move_or_merge(dirPath, dirDataPath)
# move to the top level
src = dirDataPath
dst = dirPath
fileOperations.updateDirectoryLocation(
src,
dst,
unitPath,
unitIdentifier,
unitIdentifierType,
unitPathReplaceWith,
)
job.pyprint("moving directory ", dir)
else:
if not os.path.isdir(dirPath):
job.pyprint("creating: ", dir)
os.makedirs(dirPath)
for item in os.listdir(unitPath):
src = os.path.join(unitPath, item)
if os.path.isfile(src):
if item.startswith("manifest"):
dst = os.path.join(unitPath, "metadata", item)
fileOperations.updateFileLocation2(
src,
dst,
unitPath,
unitIdentifier,
unitIdentifierType,
unitPathReplaceWith,
printfn=job.pyprint,
)
elif item in OPTIONAL_FILES:
job.pyprint("not moving:", item)
else:
dst = os.path.join(bagFileDefaultDest, item)
fileOperations.updateFileLocation2(
src,
dst,
unitPath,
unitIdentifier,
unitIdentifierType,
unitPathReplaceWith,
printfn=job.pyprint,
)
for item in os.listdir(unitDataPath):
itemPath = os.path.join(unitDataPath, item)
if os.path.isdir(itemPath) and item not in MY_REQUIRED_DIRECTORIES:
job.pyprint("moving directory to objects: ", item)
dst = os.path.join(unitPath, "objects", item)
fileOperations.updateDirectoryLocation(
itemPath,
dst,
unitPath,
unitIdentifier,
unitIdentifierType,
unitPathReplaceWith,
)
elif os.path.isfile(itemPath) and item not in OPTIONAL_FILES:
job.pyprint("moving file to objects: ", item)
dst = os.path.join(unitPath, "objects", item)
fileOperations.updateFileLocation2(
itemPath,
dst,
unitPath,
unitIdentifier,
unitIdentifierType,
unitPathReplaceWith,
printfn=job.pyprint,
)
elif item in OPTIONAL_FILES:
dst = os.path.join(unitPath, item)
fileOperations.updateFileLocation2(
itemPath,
dst,
unitPath,
unitIdentifier,
unitIdentifierType,
unitPathReplaceWith,
printfn=job.pyprint,
)
job.pyprint("removing empty data directory")
os.rmdir(unitDataPath)
def call(jobs):
with transaction.atomic():
for job in jobs:
with job.JobContext():
target = job.args[1]
transferUUID = job.args[2]
if not bag.is_valid(target, printfn=job.pyprint):
job.pyprint(
"Failed bagit compliance. Not restructuring.", file=sys.stderr
)
job.set_status(1)
else:
try:
restructureBagForComplianceFileUUIDsAssigned(
job, target, transferUUID
)
except fileOperations.UpdateFileLocationFailed as e:
job.set_status(e.code)
continue
files = File.objects.filter(
removedtime__isnull=True,
transfer_id=transferUUID,
currentlocation__startswith="%transferDirectory%objects/",
).values_list("uuid")
for (uuid,) in files:
insertIntoEvents(
fileUUID=uuid,
eventType="fixity check",
eventDetail="Bagit - verifypayloadmanifests",
eventOutcome="Pass",
)
|
uhd | qa_uhd | #!/usr/bin/env python
#
# Copyright 2005,2008,2010 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
"""
gr-uhd sanity checking
"""
from gnuradio import gr, gr_unittest, uhd
class test_uhd(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_000_nop(self):
"""Just see if we can import the module...
They may not have a UHD device connected, etc. Don't try to run anything"""
pass
def test_time_spec_t(self):
seconds = 42.0
time = uhd.time_spec_t(seconds)
twice_time = time + time
zero_time = time - time
self.assertEqual(time.get_real_secs() * 2, seconds * 2)
self.assertEqual(time.get_real_secs() - time.get_real_secs(), 0.0)
def test_stream_args_channel_foo(self):
"""
Try to manipulate the stream args channels for proper swig'ing checks.
"""
# FYI: stream_args_t.channels.append does not work due to copy operation of STL vectors
sa = uhd.stream_args_t()
sa.channels = [1, 0]
print(sa.channels)
self.assertEqual(len(sa.channels), 2)
self.assertEqual(sa.channels[0], 1)
self.assertEqual(sa.channels[1], 0)
if __name__ == "__main__":
gr_unittest.run(test_uhd)
|
simplejson | decoder | """Implementation of JSONDecoder
"""
import re
import struct
import sys
from scanner import make_scanner
def _import_c_scanstring():
try:
from simplejson._speedups import scanstring
return scanstring
except ImportError:
return None
c_scanstring = _import_c_scanstring()
__all__ = ["JSONDecoder"]
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
def _floatconstants():
_BYTES = "7FF80000000000007FF0000000000000".decode("hex")
# The struct module in Python 2.4 would get frexp() out of range here
# when an endian is specified in the format string. Fixed in Python 2.5+
if sys.byteorder != "big":
_BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
nan, inf = struct.unpack("dd", _BYTES)
return nan, inf, -inf
NaN, PosInf, NegInf = _floatconstants()
class JSONDecodeError(ValueError):
"""Subclass of ValueError with the following additional properties:
msg: The unformatted error message
doc: The JSON document being parsed
pos: The start index of doc where parsing failed
end: The end index of doc where parsing failed (may be None)
lineno: The line corresponding to pos
colno: The column corresponding to pos
endlineno: The line corresponding to end (may be None)
endcolno: The column corresponding to end (may be None)
"""
def __init__(self, msg, doc, pos, end=None):
ValueError.__init__(self, errmsg(msg, doc, pos, end=end))
self.msg = msg
self.doc = doc
self.pos = pos
self.end = end
self.lineno, self.colno = linecol(doc, pos)
if end is not None:
self.endlineno, self.endcolno = linecol(doc, end)
else:
self.endlineno, self.endcolno = None, None
def linecol(doc, pos):
lineno = doc.count("\n", 0, pos) + 1
if lineno == 1:
colno = pos
else:
colno = pos - doc.rindex("\n", 0, pos)
return lineno, colno
def errmsg(msg, doc, pos, end=None):
# Note that this function is called from _speedups
lineno, colno = linecol(doc, pos)
if end is None:
# fmt = '{0}: line {1} column {2} (char {3})'
# return fmt.format(msg, lineno, colno, pos)
fmt = "%s: line %d column %d (char %d)"
return fmt % (msg, lineno, colno, pos)
endlineno, endcolno = linecol(doc, end)
# fmt = '{0}: line {1} column {2} - line {3} column {4} (char {5} - {6})'
# return fmt.format(msg, lineno, colno, endlineno, endcolno, pos, end)
fmt = "%s: line %d column %d - line %d column %d (char %d - %d)"
return fmt % (msg, lineno, colno, endlineno, endcolno, pos, end)
_CONSTANTS = {
"-Infinity": NegInf,
"Infinity": PosInf,
"NaN": NaN,
}
STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS)
BACKSLASH = {
'"': '"',
"\\": "\\",
"/": "/",
"b": "\b",
"f": "\f",
"n": "\n",
"r": "\r",
"t": "\t",
}
DEFAULT_ENCODING = "utf-8"
def py_scanstring(
s, end, encoding=None, strict=True, _b=BACKSLASH, _m=STRINGCHUNK.match
):
"""Scan the string s for a JSON string. End is the index of the
character in s after the quote that started the JSON string.
Unescapes all valid JSON string escape sequences and raises ValueError
on attempt to decode an invalid string. If strict is False then literal
control characters are allowed in the string.
Returns a tuple of the decoded string and the index of the character in s
after the end quote."""
if encoding is None:
encoding = DEFAULT_ENCODING
chunks = []
_append = chunks.append
begin = end - 1
while 1:
chunk = _m(s, end)
if chunk is None:
raise JSONDecodeError("Unterminated string starting at", s, begin)
end = chunk.end()
content, terminator = chunk.groups()
# Content is contains zero or more unescaped string characters
if content:
if not isinstance(content, unicode):
content = unicode(content, encoding)
_append(content)
# Terminator is the end of string, a literal control character,
# or a backslash denoting that an escape sequence follows
if terminator == '"':
break
elif terminator != "\\":
if strict:
msg = "Invalid control character %r at" % (terminator,)
# msg = "Invalid control character {0!r} at".format(terminator)
raise JSONDecodeError(msg, s, end)
else:
_append(terminator)
continue
try:
esc = s[end]
except IndexError:
raise JSONDecodeError("Unterminated string starting at", s, begin)
# If not a unicode escape sequence, must be in the lookup table
if esc != "u":
try:
char = _b[esc]
except KeyError:
msg = "Invalid \\escape: " + repr(esc)
raise JSONDecodeError(msg, s, end)
end += 1
else:
# Unicode escape sequence
esc = s[end + 1 : end + 5]
next_end = end + 5
if len(esc) != 4:
msg = "Invalid \\uXXXX escape"
raise JSONDecodeError(msg, s, end)
uni = int(esc, 16)
# Check for surrogate pair on UCS-4 systems
if 0xD800 <= uni <= 0xDBFF and sys.maxunicode > 65535:
msg = "Invalid \\uXXXX\\uXXXX surrogate pair"
if not s[end + 5 : end + 7] == "\\u":
raise JSONDecodeError(msg, s, end)
esc2 = s[end + 7 : end + 11]
if len(esc2) != 4:
raise JSONDecodeError(msg, s, end)
uni2 = int(esc2, 16)
uni = 0x10000 + (((uni - 0xD800) << 10) | (uni2 - 0xDC00))
next_end += 6
char = unichr(uni)
end = next_end
# Append the unescaped character
_append(char)
return "".join(chunks), end
# Use speedup if available
scanstring = c_scanstring or py_scanstring
WHITESPACE = re.compile(r"[ \t\n\r]*", FLAGS)
WHITESPACE_STR = " \t\n\r"
def JSONObject(
(s, end),
encoding,
strict,
scan_once,
object_hook,
object_pairs_hook,
memo=None,
_w=WHITESPACE.match,
_ws=WHITESPACE_STR,
):
# Backwards compatibility
if memo is None:
memo = {}
memo_get = memo.setdefault
pairs = []
# Use a slice to prevent IndexError from being raised, the following
# check will raise a more specific ValueError if the string is empty
nextchar = s[end : end + 1]
# Normally we expect nextchar == '"'
if nextchar != '"':
if nextchar in _ws:
end = _w(s, end).end()
nextchar = s[end : end + 1]
# Trivial empty object
if nextchar == "}":
if object_pairs_hook is not None:
result = object_pairs_hook(pairs)
return result, end + 1
pairs = {}
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end + 1
elif nextchar != '"':
raise JSONDecodeError(
"Expecting property name enclosed in double quotes", s, end
)
end += 1
while True:
key, end = scanstring(s, end, encoding, strict)
key = memo_get(key, key)
# To skip some function call overhead we optimize the fast paths where
# the JSON key separator is ": " or just ":".
if s[end : end + 1] != ":":
end = _w(s, end).end()
if s[end : end + 1] != ":":
raise JSONDecodeError("Expecting ':' delimiter", s, end)
end += 1
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
try:
value, end = scan_once(s, end)
except StopIteration:
raise JSONDecodeError("Expecting object", s, end)
pairs.append((key, value))
try:
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ""
end += 1
if nextchar == "}":
break
elif nextchar != ",":
raise JSONDecodeError("Expecting ',' delimiter", s, end - 1)
try:
nextchar = s[end]
if nextchar in _ws:
end += 1
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ""
end += 1
if nextchar != '"':
raise JSONDecodeError(
"Expecting property name enclosed in double quotes", s, end - 1
)
if object_pairs_hook is not None:
result = object_pairs_hook(pairs)
return result, end
pairs = dict(pairs)
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end
def JSONArray((s, end), scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
values = []
nextchar = s[end : end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end : end + 1]
# Look-ahead for trivial empty array
if nextchar == "]":
return values, end + 1
_append = values.append
while True:
try:
value, end = scan_once(s, end)
except StopIteration:
raise JSONDecodeError("Expecting object", s, end)
_append(value)
nextchar = s[end : end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end : end + 1]
end += 1
if nextchar == "]":
break
elif nextchar != ",":
raise JSONDecodeError("Expecting ',' delimiter", s, end)
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
return values, end
class JSONDecoder(object):
"""Simple JSON <http://json.org> decoder
Performs the following translations in decoding by default:
+---------------+-------------------+
| JSON | Python |
+===============+===================+
| object | dict |
+---------------+-------------------+
| array | list |
+---------------+-------------------+
| string | unicode |
+---------------+-------------------+
| number (int) | int, long |
+---------------+-------------------+
| number (real) | float |
+---------------+-------------------+
| true | True |
+---------------+-------------------+
| false | False |
+---------------+-------------------+
| null | None |
+---------------+-------------------+
It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
their corresponding ``float`` values, which is outside the JSON spec.
"""
def __init__(
self,
encoding=None,
object_hook=None,
parse_float=None,
parse_int=None,
parse_constant=None,
strict=True,
object_pairs_hook=None,
):
"""
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
*strict* controls the parser's behavior when it encounters an
invalid control character in a string. The default setting of
``True`` means that unescaped control characters are parse errors, if
``False`` then control characters will be allowed in strings.
"""
self.encoding = encoding
self.object_hook = object_hook
self.object_pairs_hook = object_pairs_hook
self.parse_float = parse_float or float
self.parse_int = parse_int or int
self.parse_constant = parse_constant or _CONSTANTS.__getitem__
self.strict = strict
self.parse_object = JSONObject
self.parse_array = JSONArray
self.parse_string = scanstring
self.memo = {}
self.scan_once = make_scanner(self)
def decode(self, s, _w=WHITESPACE.match):
"""Return the Python representation of ``s`` (a ``str`` or ``unicode``
instance containing a JSON document)
"""
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
end = _w(s, end).end()
if end != len(s):
raise JSONDecodeError("Extra data", s, end, len(s))
return obj
def raw_decode(self, s, idx=0):
"""Decode a JSON document from ``s`` (a ``str`` or ``unicode``
beginning with a JSON document) and return a 2-tuple of the Python
representation and the index in ``s`` where the document ended.
This can be used to decode a JSON document from a string that may
have extraneous data at the end.
"""
try:
obj, end = self.scan_once(s, idx)
except StopIteration:
raise JSONDecodeError("No JSON object could be decoded", s, idx)
return obj, end
|
filters | round_corner | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com thumbor@googlegroups.com
from thumbor.ext.filters import _round_corner
from thumbor.filters import BaseFilter, filter_method
class Filter(BaseFilter):
@filter_method(
r"[\d]+(?:\|[\d]+)?",
BaseFilter.PositiveNumber,
BaseFilter.PositiveNumber,
BaseFilter.PositiveNumber,
BaseFilter.Boolean,
)
async def round_corner(self, radius, red, green, blue, transparent=False):
width, height = self.engine.size
radius_parts = radius.split("|")
a_radius = int(radius_parts[0])
b_radius = int(radius_parts[1]) if len(radius_parts) > 1 else a_radius
if transparent:
self.engine.enable_alpha()
mode, data = self.engine.image_data_as_rgb()
imgdata = _round_corner.apply(
1,
mode,
a_radius,
b_radius,
red,
green,
blue,
width,
height,
data,
transparent,
)
self.engine.set_image_data(imgdata)
|
extractor | kickstarter | # coding: utf-8
from __future__ import unicode_literals
from ..utils import smuggle_url
from .common import InfoExtractor
class KickStarterIE(InfoExtractor):
_VALID_URL = r"https?://(?:www\.)?kickstarter\.com/projects/(?P<id>[^/]*)/.*"
_TESTS = [
{
"url": "https://www.kickstarter.com/projects/1404461844/intersection-the-story-of-josh-grant/description",
"md5": "c81addca81327ffa66c642b5d8b08cab",
"info_dict": {
"id": "1404461844",
"ext": "mp4",
"title": "Intersection: The Story of Josh Grant by Kyle Cowling",
"description": (
"A unique motocross documentary that examines the "
"life and mind of one of sports most elite athletes: Josh Grant."
),
},
},
{
"note": "Embedded video (not using the native kickstarter video service)",
"url": "https://www.kickstarter.com/projects/597507018/pebble-e-paper-watch-for-iphone-and-android/posts/659178",
"info_dict": {
"id": "78704821",
"ext": "mp4",
"uploader_id": "pebble",
"uploader": "Pebble Technology",
"title": "Pebble iOS Notifications",
},
"add_ie": ["Vimeo"],
},
{
"url": "https://www.kickstarter.com/projects/1420158244/power-drive-2000/widget/video.html",
"info_dict": {
"id": "1420158244",
"ext": "mp4",
"title": "Power Drive 2000",
},
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(
r"<title>\s*(.*?)(?:\s*—\s*Kickstarter)?\s*</title>", webpage, "title"
)
video_url = self._search_regex(
r'data-video-url="(.*?)"', webpage, "video URL", default=None
)
if video_url is None: # No native kickstarter, look for embedded videos
return {
"_type": "url_transparent",
"ie_key": "Generic",
"url": smuggle_url(url, {"to_generic": True}),
"title": title,
}
thumbnail = self._og_search_thumbnail(webpage, default=None)
if thumbnail is None:
thumbnail = self._html_search_regex(
r'<img[^>]+class="[^"]+\s*poster\s*[^"]+"[^>]+src="([^"]+)"',
webpage,
"thumbnail image",
fatal=False,
)
return {
"id": video_id,
"url": video_url,
"title": title,
"description": self._og_search_description(webpage, default=None),
"thumbnail": thumbnail,
}
|
scripts | check_shortcut_keys | #!/usr/bin/env python3
#
# This script checks for duplicate shortcut keys in all translation files.
#
import collections
import os
import sys
from typing import Optional
COLOR_WARNING = "\033[93m"
COLOR_ENDC = "\033[0m"
regex_patter = (
"(&[\w])" # "&[a-zA-Z0-9]" - Search char '&' and at least one character after it
)
# Directory where this python file resides
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
class ShortcutKeysChecker:
MSGCTXT = (
"msgctxt" # Scope of the text . Like : msgctxt "@action:inmenu menubar:help"
)
MSGID = "msgid" # The id tag, also English text version
MSGSTR = "msgstr" # The translation tag
def has_duplicates(self, filename: str) -> bool:
"""
Checks if the given file has duplicate shortcut keys.
"""
with open(filename, "r", encoding="utf-8") as f:
all_lines = f.readlines()
all_lines = [l.strip() for l in all_lines]
shortcut_dict = collections.defaultdict(dict)
found_ctxt = False
current_data = dict()
current_field = None
start_line = None
for idx, line in enumerate(all_lines):
if line.startswith(self.MSGCTXT):
found_ctxt = True
current_data.clear()
current_field = self.MSGCTXT
current_data[current_field] = self._fetch_data(line)
start_line = idx
continue
elif found_ctxt and line.startswith(self.MSGID):
current_field = self.MSGID
current_data[current_field] = self._fetch_data(line)
continue
elif found_ctxt and line.startswith(self.MSGSTR):
current_field = self.MSGSTR
current_data[current_field] = self._fetch_data(line)
continue
elif found_ctxt and line.startswith('"'):
data = line[1:-1] # strip the beginning and ending double-quotes
current_data[current_field] += data
continue
if current_data:
self._process_translation(shortcut_dict, current_data, start_line)
current_data.clear()
current_field = None
found_ctxt = False
start_line = None
return self._show_all_duplicates(shortcut_dict, filename)
def _fetch_data(self, line: str) -> str:
return (line.split(" ", 1)[-1])[1:-1]
def _process_translation(
self, shortcut_dict: dict, data_dict: dict, start_line: int
) -> None:
# Only check the ones with shortcuts
msg = data_dict[self.MSGID]
if data_dict[self.MSGSTR]:
msg = data_dict[self.MSGSTR]
shortcut_key = self._get_shortcut_key(msg)
if shortcut_key is None:
return
msg_section = data_dict[self.MSGCTXT]
keys_dict = shortcut_dict[msg_section]
if shortcut_key not in keys_dict:
keys_dict[shortcut_key] = {
"shortcut_key": shortcut_key,
"section": msg_section,
"existing_lines": dict(),
}
existing_data_dict = keys_dict[shortcut_key]["existing_lines"]
existing_data_dict[start_line] = {
"message": msg,
}
def _get_shortcut_key(self, text: str) -> Optional[str]:
key = None
if text.count("&") == 1:
idx = text.find("&") + 1
if idx < len(text):
character = text[idx]
if not character.isspace():
key = character.lower()
return key
def _show_all_duplicates(self, shortcut_dict: dict, filename: str) -> bool:
has_duplicates = False
for keys_dict in shortcut_dict.values():
for shortcut_key, data_dict in keys_dict.items():
if len(data_dict["existing_lines"]) == 1:
continue
has_duplicates = True
print("")
print(
"The following messages have the same shortcut key '%s':"
% shortcut_key
)
print(" shortcut: '%s'" % data_dict["shortcut_key"])
print(" section : '%s'" % data_dict["section"])
for line, msg in data_dict["existing_lines"].items():
relative_filename = (filename.rsplit("..", 1)[-1])[1:]
print(
" - [%s] L%7d : '%s'"
% (relative_filename, line, msg["message"])
)
return has_duplicates
if __name__ == "__main__":
checker = ShortcutKeysChecker()
all_dirnames = [""]
for _, dirnames, _ in os.walk(os.path.join(SCRIPT_DIR, "..", "resources", "i18n")):
all_dirnames += [dn for dn in dirnames]
break
found_duplicates = False
for dirname in all_dirnames:
file_name = "cura.pot" if not dirname else "cura.po"
file_path = os.path.join(
SCRIPT_DIR, "..", "resources", "i18n", dirname, file_name
)
found_duplicates = found_duplicates or checker.has_duplicates(file_path)
sys.exit(0 if not found_duplicates else 1)
|
workflows | postgres_batch_export | import contextlib
import datetime as dt
import json
from dataclasses import dataclass
import psycopg2
from django.conf import settings
from posthog.batch_exports.service import PostgresBatchExportInputs
from posthog.temporal.workflows.base import PostHogWorkflow
from posthog.temporal.workflows.batch_exports import (
BatchExportTemporaryFile,
CreateBatchExportRunInputs,
UpdateBatchExportRunStatusInputs,
create_export_run,
get_batch_exports_logger,
get_data_interval,
get_results_iterator,
get_rows_count,
update_export_run_status,
)
from posthog.temporal.workflows.clickhouse import get_client
from psycopg2 import sql
from temporalio import activity, exceptions, workflow
from temporalio.common import RetryPolicy
@contextlib.contextmanager
def postgres_connection(inputs):
"""Manage a Postgres connection."""
connection = psycopg2.connect(
user=inputs.user,
password=inputs.password,
database=inputs.database,
host=inputs.host,
port=inputs.port,
# The 'hasSelfSignedCert' parameter in the postgres-plugin was provided mainly
# for users of Heroku and RDS. It was used to set 'rejectUnauthorized' to false if a self-signed cert was used.
# Mapping this to sslmode is not straight-forward, but going by Heroku's recommendation (see below) we should use 'no-verify'.
# Reference: https://devcenter.heroku.com/articles/connecting-heroku-postgres#connecting-in-node-js
sslmode="no-verify" if inputs.has_self_signed_cert is True else "prefer",
)
try:
yield connection
except Exception:
connection.rollback()
raise
else:
connection.commit()
finally:
connection.close()
def copy_tsv_to_postgres(
tsv_file, postgres_connection, schema: str, table_name: str, schema_columns
):
"""Execute a COPY FROM query with given connection to copy contents of tsv_file."""
tsv_file.seek(0)
with postgres_connection.cursor() as cursor:
cursor.execute(
sql.SQL("SET search_path TO {schema}").format(schema=sql.Identifier(schema))
)
cursor.copy_from(
tsv_file,
table_name,
null="",
columns=schema_columns,
)
@dataclass
class PostgresInsertInputs:
"""Inputs for Postgres."""
team_id: int
user: str
password: str
host: str
database: str
table_name: str
data_interval_start: str
data_interval_end: str
has_self_signed_cert: bool = False
schema: str = "public"
port: int = 5432
@activity.defn
async def insert_into_postgres_activity(inputs: PostgresInsertInputs):
"""Activity streams data from ClickHouse to Postgres."""
logger = get_batch_exports_logger(inputs=inputs)
logger.info(
"Running Postgres export batch %s - %s",
inputs.data_interval_start,
inputs.data_interval_end,
)
async with get_client() as client:
if not await client.is_alive():
raise ConnectionError("Cannot establish connection to ClickHouse")
count = await get_rows_count(
client=client,
team_id=inputs.team_id,
interval_start=inputs.data_interval_start,
interval_end=inputs.data_interval_end,
)
if count == 0:
logger.info(
"Nothing to export in batch %s - %s",
inputs.data_interval_start,
inputs.data_interval_end,
)
return
logger.info("BatchExporting %s rows to Postgres", count)
results_iterator = get_results_iterator(
client=client,
team_id=inputs.team_id,
interval_start=inputs.data_interval_start,
interval_end=inputs.data_interval_end,
)
with postgres_connection(inputs) as connection:
with connection.cursor() as cursor:
result = cursor.execute(
sql.SQL(
"""
CREATE TABLE IF NOT EXISTS {} (
"uuid" VARCHAR(200),
"event" VARCHAR(200),
"properties" JSONB,
"elements" JSONB,
"set" JSONB,
"set_once" JSONB,
"distinct_id" VARCHAR(200),
"team_id" INTEGER,
"ip" VARCHAR(200),
"site_url" VARCHAR(200),
"timestamp" TIMESTAMP WITH TIME ZONE
)
"""
).format(sql.Identifier(inputs.schema, inputs.table_name))
)
schema_columns = [
"uuid",
"event",
"properties",
"elements",
"set",
"set_once",
"distinct_id",
"team_id",
"ip",
"site_url",
"timestamp",
]
json_columns = ("properties", "elements", "set", "set_once")
with BatchExportTemporaryFile() as pg_file:
with postgres_connection(inputs) as connection:
for result in results_iterator:
row = {
key: json.dumps(result[key])
if key in json_columns and result[key] is not None
else result[key]
for key in schema_columns
}
pg_file.write_records_to_tsv([row], fieldnames=schema_columns)
if (
pg_file.tell()
> settings.BATCH_EXPORT_POSTGRES_UPLOAD_CHUNK_SIZE_BYTES
):
logger.info(
"Copying %s records of size %s bytes to Postgres",
pg_file.records_since_last_reset,
pg_file.bytes_since_last_reset,
)
copy_tsv_to_postgres(
pg_file,
connection,
inputs.schema,
inputs.table_name,
schema_columns,
)
pg_file.reset()
if pg_file.tell() > 0:
logger.info(
"Copying %s records of size %s bytes to Postgres",
pg_file.records_since_last_reset,
pg_file.bytes_since_last_reset,
)
copy_tsv_to_postgres(
pg_file,
connection,
inputs.schema,
inputs.table_name,
schema_columns,
)
@workflow.defn(name="postgres-export")
class PostgresBatchExportWorkflow(PostHogWorkflow):
"""A Temporal Workflow to export ClickHouse data into Postgres.
This Workflow is intended to be executed both manually and by a Temporal
Schedule. When ran by a schedule, `data_interval_end` should be set to
`None` so that we will fetch the end of the interval from the Temporal
search attribute `TemporalScheduledStartTime`.
"""
@staticmethod
def parse_inputs(inputs: list[str]) -> PostgresBatchExportInputs:
"""Parse inputs from the management command CLI."""
loaded = json.loads(inputs[0])
return PostgresBatchExportInputs(**loaded)
@workflow.run
async def run(self, inputs: PostgresBatchExportInputs):
"""Workflow implementation to export data to Postgres."""
logger = get_batch_exports_logger(inputs=inputs)
data_interval_start, data_interval_end = get_data_interval(
inputs.interval, inputs.data_interval_end
)
logger.info(
"Starting Postgres export batch %s - %s",
data_interval_start,
data_interval_end,
)
create_export_run_inputs = CreateBatchExportRunInputs(
team_id=inputs.team_id,
batch_export_id=inputs.batch_export_id,
data_interval_start=data_interval_start.isoformat(),
data_interval_end=data_interval_end.isoformat(),
)
run_id = await workflow.execute_activity(
create_export_run,
create_export_run_inputs,
start_to_close_timeout=dt.timedelta(minutes=5),
retry_policy=RetryPolicy(
initial_interval=dt.timedelta(seconds=10),
maximum_interval=dt.timedelta(seconds=60),
maximum_attempts=0,
non_retryable_error_types=["NotNullViolation", "IntegrityError"],
),
)
update_inputs = UpdateBatchExportRunStatusInputs(id=run_id, status="Completed")
insert_inputs = PostgresInsertInputs(
team_id=inputs.team_id,
user=inputs.user,
password=inputs.password,
host=inputs.host,
port=inputs.port,
database=inputs.database,
schema=inputs.schema,
table_name=inputs.table_name,
has_self_signed_cert=inputs.has_self_signed_cert,
data_interval_start=data_interval_start.isoformat(),
data_interval_end=data_interval_end.isoformat(),
)
try:
await workflow.execute_activity(
insert_into_postgres_activity,
insert_inputs,
start_to_close_timeout=dt.timedelta(hours=1),
retry_policy=RetryPolicy(
initial_interval=dt.timedelta(seconds=10),
maximum_interval=dt.timedelta(seconds=120),
maximum_attempts=10,
non_retryable_error_types=[
# Raised on errors that are related to database operation.
# For example: unexpected disconnect, database or other object not found.
"OperationalError"
],
),
)
except exceptions.ActivityError as e:
if isinstance(e.cause, exceptions.CancelledError):
logger.error("Postgres BatchExport was cancelled.")
update_inputs.status = "Cancelled"
else:
logger.exception("Postgres BatchExport failed.", exc_info=e.cause)
update_inputs.status = "Failed"
update_inputs.latest_error = str(e.cause)
raise
except Exception as e:
logger.exception(
"Postgers BatchExport failed with an unexpected error.", exc_info=e
)
update_inputs.status = "Failed"
update_inputs.latest_error = "An unexpected error has ocurred"
raise
else:
logger.info(
"Successfully finished Postgres export batch %s - %s",
data_interval_start,
data_interval_end,
)
finally:
await workflow.execute_activity(
update_export_run_status,
update_inputs,
start_to_close_timeout=dt.timedelta(minutes=5),
retry_policy=RetryPolicy(
initial_interval=dt.timedelta(seconds=10),
maximum_interval=dt.timedelta(seconds=60),
maximum_attempts=0,
non_retryable_error_types=["NotNullViolation", "IntegrityError"],
),
)
|
addons | WindowsPhoneNotify | # -*- coding: utf-8 -*-
import http.client
from contextlib import closing
from ..base.notifier import Notifier
class WindowsPhoneNotify(Notifier):
__name__ = "WindowsPhoneNotify"
__type__ = "addon"
__version__ = "0.19"
__status__ = "testing"
__config__ = [
("enabled", "bool", "Activated", False),
("pushid", "str", "Push ID", ""),
("pushurl", "str", "Push url", ""),
("captcha", "bool", "Notify captcha request", True),
("reconnection", "bool", "Notify reconnection request", False),
("downloadfinished", "bool", "Notify download finished", True),
("downloadfailed", "bool", "Notify download failed", True),
("alldownloadsfinished", "bool", "Notify all downloads finished", True),
("alldownloadsprocessed", "bool", "Notify all downloads processed", True),
("packagefinished", "bool", "Notify package finished", True),
("packagefailed", "bool", "Notify package failed", True),
("update", "bool", "Notify pyLoad update", False),
("exit", "bool", "Notify pyLoad shutdown/restart", False),
("sendinterval", "int", "Interval in seconds between notifications", 1),
("sendpermin", "int", "Max notifications per minute", 60),
("ignoreclient", "bool", "Send notifications if client is connected", True),
]
__description__ = """Send push notifications to Windows Phone"""
__license__ = "GPLv3"
__authors__ = [
("Andy Voigt", "phone-support@hotmail.de"),
("Walter Purcaro", "vuolter@gmail.com"),
]
def get_key(self):
return self.config.get("pushid"), self.config.get("pushurl")
def format_request(self, msg):
return (
"<?xml version='1.0' encoding='utf-8'?> <wp:Notification xmlns:wp='WPNotification'> "
"<wp:Toast> <wp:Text1>pyLoad</wp:Text1> <wp:Text2>{}</wp:Text2> "
"</wp:Toast> </wp:Notification>".format(msg)
)
def send(self, event, msg, key):
id, url = key
request = self.format_request("{}: {}".format(event, msg) if msg else event)
with closing(http.client.HTTPConnection(url)) as webservice:
webservice.putrequest("POST", id)
webservice.putheader("Host", url)
webservice.putheader("Content-type", "text/xml")
webservice.putheader("X-NotificationClass", "2")
webservice.putheader("X-WindowsPhone-Target", "toast")
webservice.putheader("Content-length", "{}".format(len(request)))
webservice.endheaders()
webservice.send(request)
|
scripts | KineticNCBeamicon2_post | # ***************************************************************************
# * Copyright (c) 2014 sliptonic <shopinthewoods@gmail.com> *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * FreeCAD is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Lesser General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with FreeCAD; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************/
# ****************************************************************************
# * Modifications by Samuel Mayer (samuel.mayer@posteo.de) *
# * 2021 *
# * *
# * This postprocessor is based on the linuxcnc_post coming with FreeCAD *
# * 0.19 and modified to work with Kinetic-NC (cnc-step.com) and Beamicon2 *
# * (benezan-electronics.de) (up to 4 Axis) *
# * *
# ***************************************************************************/
import argparse
import datetime
import shlex
import FreeCAD
import Path
import Path.Post.Utils as PostUtils
import PathScripts.PathUtils as PathUtils
from FreeCAD import Units
from PathScripts import PathUtils
TOOLTIP = """
This is a postprocessor file for the Path workbench. It is used to
take a pseudo-G-code fragment outputted by a Path object, and output
real G-code suitable for the KineticNC/Beamicon2 Control Software for up to 4 Axis (3 plus rotary).
The CORNER_MAX Values are set for a mill with max travel of 1000mm in X, 600mm in Y and 300mm in Z direction.
This postprocessor, once placed in the appropriate PathScripts folder, can be used directly from inside
FreeCAD, via the GUI importer or via python scripts with:
import KineticNCBeamicon2_post
KineticNCBeamicon2_post.export(object,"/path/to/file.ncc","")
"""
now = datetime.datetime.now()
parser = argparse.ArgumentParser(prog="linuxcnc", add_help=False)
parser.add_argument("--no-header", action="store_true", help="suppress header output")
parser.add_argument(
"--no-comments", action="store_true", help="suppress comment output"
)
parser.add_argument(
"--line-numbers", action="store_true", help="prefix with line numbers"
)
parser.add_argument(
"--no-show-editor",
action="store_true",
help="don't pop up editor before writing output",
)
parser.add_argument(
"--precision", default="3", help="number of digits of precision, default=3"
)
parser.add_argument(
"--preamble",
help='set commands to be issued before the first command, default="G17\nG90"',
)
parser.add_argument(
"--postamble",
help='set commands to be issued after the last command, default="M05\nG17 G90\nM2"',
)
parser.add_argument(
"--inches", action="store_true", help="Convert output for US imperial mode (G20)"
)
parser.add_argument(
"--modal",
action="store_true",
help="Output the Same G-command Name USE NonModal Mode",
)
parser.add_argument(
"--axis-modal", action="store_true", help="Output the Same Axis Value Mode"
)
TOOLTIP_ARGS = parser.format_help()
# These globals set common customization preferences
OUTPUT_COMMENTS = True
OUTPUT_HEADER = True
OUTPUT_LINE_NUMBERS = False
SHOW_EDITOR = True
MODAL = False # if true commands are suppressed if the same as previous line.
OUTPUT_DOUBLES = (
True # if false duplicate axis values are suppressed if the same as previous line.
)
COMMAND_SPACE = " "
LINENR = 100 # line number starting value
# These globals will be reflected in the Machine configuration of the project
UNITS = "G21" # G21 for metric, G20 for us standard
UNIT_SPEED_FORMAT = "mm/min"
UNIT_FORMAT = "mm"
MACHINE_NAME = "not set"
CORNER_MIN = {"x": 0, "y": 0, "z": 0}
CORNER_MAX = {"x": 1000, "y": 600, "z": 300}
PRECISION = 3
# Preamble text will appear at the beginning of the GCODE output file.
PREAMBLE = """%
G17 G21 G40 G49 G80 G90
M08
"""
# Postamble text will appear following the last operation.
POSTAMBLE = """M05 M09
G17 G90 G80 G40
M30
"""
# Pre operation text will be inserted before every operation
PRE_OPERATION = """"""
# Post operation text will be inserted after every operation
POST_OPERATION = """"""
# Tool Change commands will be inserted before a tool change
TOOL_CHANGE = """M05
M09"""
# to distinguish python built-in open function from the one declared below
if open.__module__ in ["__builtin__", "io"]:
pythonopen = open
def processArguments(argstring):
global OUTPUT_HEADER
global OUTPUT_COMMENTS
global OUTPUT_LINE_NUMBERS
global SHOW_EDITOR
global PRECISION
global PREAMBLE
global POSTAMBLE
global UNITS
global UNIT_SPEED_FORMAT
global UNIT_FORMAT
global MODAL
global OUTPUT_DOUBLES
try:
args = parser.parse_args(shlex.split(argstring))
if args.no_header:
OUTPUT_HEADER = False
if args.no_comments:
OUTPUT_COMMENTS = False
if args.line_numbers:
OUTPUT_LINE_NUMBERS = True
if args.no_show_editor:
SHOW_EDITOR = False
print("Show editor = %d" % SHOW_EDITOR)
PRECISION = args.precision
if args.preamble is not None:
PREAMBLE = args.preamble
if args.postamble is not None:
POSTAMBLE = args.postamble
if args.inches:
UNITS = "G20"
UNIT_SPEED_FORMAT = "in/min"
UNIT_FORMAT = "in"
PRECISION = 4
if args.modal:
MODAL = True
if args.axis_modal:
print("here")
OUTPUT_DOUBLES = False
except:
return False
return True
def export(objectslist, filename, argstring):
if not processArguments(argstring):
return None
global UNITS
global UNIT_FORMAT
global UNIT_SPEED_FORMAT
for obj in objectslist:
if not hasattr(obj, "Path"):
print(
"the object "
+ obj.Name
+ " is not a path. Please select only path and Compounds."
)
return None
print("postprocessing...")
gcode = ""
# write header
if OUTPUT_HEADER:
gcode += linenumber() + "(Exported by FreeCAD)\n"
gcode += linenumber() + "(Post Processor: " + __name__ + ")\n"
gcode += linenumber() + "(Output Time:" + str(now) + ")\n"
# Write the preamble
if OUTPUT_COMMENTS:
gcode += linenumber() + "(begin preamble)\n"
for line in PREAMBLE.splitlines(False):
gcode += linenumber() + line + "\n"
gcode += linenumber() + UNITS + "\n"
for obj in objectslist:
# fetch machine details
job = PathUtils.findParentJob(obj)
myMachine = "not set"
if hasattr(job, "MachineName"):
myMachine = job.MachineName
if hasattr(job, "MachineUnits"):
if job.MachineUnits == "Metric":
UNITS = "G21"
UNIT_FORMAT = "mm"
UNIT_SPEED_FORMAT = "mm/min"
else:
UNITS = "G20"
UNIT_FORMAT = "in"
UNIT_SPEED_FORMAT = "in/min"
# do the pre_op
if OUTPUT_COMMENTS:
gcode += linenumber() + "(begin operation: %s)\n" % obj.Label
gcode += linenumber() + "(machine: %s, %s)\n" % (
myMachine,
UNIT_SPEED_FORMAT,
)
for line in PRE_OPERATION.splitlines(True):
gcode += linenumber() + line
gcode += parse(obj)
# do the post_op
if OUTPUT_COMMENTS:
gcode += linenumber() + "(finish operation: %s)\n" % obj.Label
for line in POST_OPERATION.splitlines(True):
gcode += linenumber() + line
# do the post_amble
if OUTPUT_COMMENTS:
gcode += "(begin postamble)\n"
for line in POSTAMBLE.splitlines(True):
gcode += linenumber() + line
if FreeCAD.GuiUp and SHOW_EDITOR:
dia = PostUtils.GCodeEditorDialog()
dia.editor.setText(gcode)
result = dia.exec_()
if result:
final = dia.editor.toPlainText()
else:
final = gcode
else:
final = gcode
print("done postprocessing.")
if not filename == "-":
gfile = pythonopen(filename, "w")
gfile.write(final)
gfile.close()
return final
def linenumber():
global LINENR
if OUTPUT_LINE_NUMBERS is True:
LINENR += 10
return "N" + str(LINENR) + " "
return ""
def parse(pathobj):
global PRECISION
global MODAL
global OUTPUT_DOUBLES
global UNIT_FORMAT
global UNIT_SPEED_FORMAT
out = ""
lastcommand = None
precision_string = "." + str(PRECISION) + "f"
currLocation = {} # keep track for no doubles
# the order of parameters
# linuxcnc doesn't want K properties on XY plane Arcs need work.
params = [
"X",
"Y",
"Z",
"A",
"B",
"C",
"I",
"J",
"F",
"S",
"T",
"Q",
"R",
"L",
"H",
"D",
"P",
]
firstmove = Path.Command("G0", {"X": -1, "Y": -1, "Z": -1, "F": 0.0})
currLocation.update(firstmove.Parameters) # set First location Parameters
if hasattr(pathobj, "Group"): # We have a compound or project.
# if OUTPUT_COMMENTS:
# out += linenumber() + "(compound: " + pathobj.Label + ")\n"
for p in pathobj.Group:
out += parse(p)
return out
else: # parsing simple path
# groups might contain non-path things like stock.
if not hasattr(pathobj, "Path"):
return out
# if OUTPUT_COMMENTS:
# out += linenumber() + "(" + pathobj.Label + ")\n"
for c in PathUtils.getPathWithPlacement(pathobj).Commands:
outstring = []
command = c.Name
outstring.append(command)
# if modal: suppress the command if it is the same as the last one
if MODAL is True:
if command == lastcommand:
outstring.pop(0)
if c.Name[0] == "(" and not OUTPUT_COMMENTS: # command is a comment
continue
# Now add the remaining parameters in order
for param in params:
if param in c.Parameters:
if param == "F" and (
currLocation[param] != c.Parameters[param] or OUTPUT_DOUBLES
):
if c.Name not in [
"G0",
"G00",
]: # linuxcnc doesn't use rapid speeds
speed = Units.Quantity(
c.Parameters["F"], FreeCAD.Units.Velocity
)
if speed.getValueAs(UNIT_SPEED_FORMAT) > 0.0:
outstring.append(
param
+ format(
float(speed.getValueAs(UNIT_SPEED_FORMAT)),
precision_string,
)
)
else:
continue
elif param == "T":
outstring.append(param + str(int(c.Parameters["T"])))
elif param == "H":
outstring.append(param + str(int(c.Parameters["H"])))
elif param == "D":
outstring.append(param + str(int(c.Parameters["D"])))
elif param == "S":
outstring.append(param + str(int(c.Parameters["S"])))
else:
if (
(not OUTPUT_DOUBLES)
and (param in currLocation)
and (currLocation[param] == c.Parameters[param])
):
continue
else:
pos = Units.Quantity(
c.Parameters[param], FreeCAD.Units.Length
)
outstring.append(
param
+ format(
float(pos.getValueAs(UNIT_FORMAT)), precision_string
)
)
# store the latest command
lastcommand = command
currLocation.update(c.Parameters)
# Check for Tool Change:
if command == "M6":
# if OUTPUT_COMMENTS:
# out += linenumber() + "(begin toolchange)\n"
for line in TOOL_CHANGE.splitlines(True):
out += linenumber() + line
if command == "message":
if OUTPUT_COMMENTS is False:
out = []
else:
outstring.pop(0) # remove the command
# prepend a line number and append a newline
if len(outstring) >= 1:
if OUTPUT_LINE_NUMBERS:
outstring.insert(0, (linenumber()))
# append the line to the final output
for w in outstring:
out += w + COMMAND_SPACE
out = out.strip() + "\n"
return out
|
torrent-checker | tracker_manager | import logging
import time
from pathlib import Path
from pony.orm import count, db_session
from tribler.core.components.metadata_store.db.store import MetadataStore
from tribler.core.utilities.tracker_utils import get_uniformed_tracker_url
MAX_TRACKER_FAILURES = 5 # if a tracker fails this amount of times in a row, its 'is_alive' will be marked as 0 (dead).
TRACKER_RETRY_INTERVAL = 60 # A "dead" tracker will be retired every 60 seconds
class TrackerManager:
def __init__(self, state_dir: Path = None, metadata_store: MetadataStore = None):
self._logger = logging.getLogger(self.__class__.__name__)
self.state_dir = state_dir
self.TrackerState = metadata_store.TrackerState
self.blacklist = []
self.load_blacklist()
def load_blacklist(self):
"""
Load the tracker blacklist from tracker_blacklist.txt in the session state directory.
Entries are newline separated and are supposed to be sanitized.
"""
blacklist_file = Path(self.state_dir / "tracker_blacklist.txt").absolute()
if blacklist_file.exists():
with open(blacklist_file) as blacklist_file_handle:
# Note that get_uniformed_tracker_url will strip the newline at the end of .readlines()
self.blacklist.extend(
[
get_uniformed_tracker_url(url)
for url in blacklist_file_handle.readlines()
]
)
else:
self._logger.info("No tracker blacklist file found at %s.", blacklist_file)
def get_tracker_info(self, tracker_url):
"""
Gets the tracker information with the given tracker URL.
:param tracker_url: The given tracker URL.
:return: The tracker info dict if exists, None otherwise.
"""
sanitized_tracker_url = (
get_uniformed_tracker_url(tracker_url)
if tracker_url != "DHT"
else tracker_url
)
with db_session:
tracker = list(
self.TrackerState.select(lambda g: g.url == sanitized_tracker_url)
)
if tracker:
return {
"id": tracker[0].url,
"last_check": tracker[0].last_check,
"failures": tracker[0].failures,
"is_alive": tracker[0].alive,
}
return None
def add_tracker(self, tracker_url):
"""
Adds a new tracker into the tracker info dict and the database.
:param tracker_url: The new tracker URL to be added.
"""
sanitized_tracker_url = get_uniformed_tracker_url(tracker_url)
if sanitized_tracker_url is None:
self._logger.warning("skip invalid tracker: %s", repr(tracker_url))
return
with db_session:
num = count(g for g in self.TrackerState if g.url == sanitized_tracker_url)
if num > 0:
self._logger.debug("skip existing tracker: %s", repr(tracker_url))
return
# insert into database
self.TrackerState(
url=sanitized_tracker_url,
last_check=0,
failures=0,
alive=True,
torrents={},
)
def remove_tracker(self, tracker_url):
"""
Remove a given tracker from the database.
URL is sanitized first and removed from the database. If the URL is ill formed then try removing the non-
sanitized version.
:param tracker_url: The URL of the tracker to be deleted.
"""
sanitized_tracker_url = get_uniformed_tracker_url(tracker_url)
with db_session:
options = self.TrackerState.select(
lambda g: g.url in [tracker_url, sanitized_tracker_url]
)
for option in options[:]:
option.delete()
@db_session
def update_tracker_info(self, tracker_url, is_successful=True):
"""
Updates a tracker information.
:param tracker_url: The given tracker_url.
:param is_successful: If the check was successful.
"""
if tracker_url == "DHT":
return
sanitized_tracker_url = get_uniformed_tracker_url(tracker_url)
tracker = self.TrackerState.get(lambda g: g.url == sanitized_tracker_url)
if not tracker:
self._logger.error(
"Trying to update the tracker info of an unknown tracker URL"
)
return
current_time = int(time.time())
failures = 0 if is_successful else tracker.failures + 1
is_alive = failures < MAX_TRACKER_FAILURES
# update the dict
tracker.last_check = current_time
tracker.failures = failures
tracker.alive = is_alive
self._logger.info(
f"Tracker updated: {tracker.url}. Alive: {is_alive}. Failures: {failures}."
)
@db_session
def get_next_tracker(self):
"""
Gets the next tracker.
:return: The next tracker for torrent-checking.
"""
tracker = (
self.TrackerState.select(
lambda g: str(g.url)
and g.alive
and g.last_check + TRACKER_RETRY_INTERVAL <= int(time.time())
and str(g.url) not in self.blacklist
)
.order_by(self.TrackerState.last_check)
.limit(1)
)
if not tracker:
return None
return tracker[0]
|
archiver | debug_cmd | import argparse
import functools
import json
import textwrap
from binascii import hexlify, unhexlify
from ..archive import Archive
from ..compress import CompressionSpec
from ..constants import * # NOQA
from ..helpers import (
StableDict,
archivename_validator,
bin_to_hex,
dash_open,
msgpack,
positive_int_validator,
prepare_dump_dict,
sysinfo,
)
from ..manifest import Manifest
from ..platform import get_process_id
from ..repoobj import RepoObj
from ..repository import LIST_SCAN_LIMIT, TAG_COMMIT, TAG_DELETE, TAG_PUT, Repository
from ._common import Highlander, process_epilog, with_repository
class DebugMixIn:
def do_debug_info(self, args):
"""display system information for debugging / bug reports"""
print(sysinfo())
print("Process ID:", get_process_id())
return EXIT_SUCCESS
@with_repository(compatibility=Manifest.NO_OPERATION_CHECK)
def do_debug_dump_archive_items(self, args, repository, manifest):
"""dump (decrypted, decompressed) archive items metadata (not: data)"""
repo_objs = manifest.repo_objs
archive = Archive(manifest, args.name)
for i, item_id in enumerate(archive.metadata.items):
_, data = repo_objs.parse(item_id, repository.get(item_id), ro_type=ROBJ_ARCHIVE_STREAM)
filename = "%06d_%s.items" % (i, bin_to_hex(item_id))
print("Dumping", filename)
with open(filename, "wb") as fd:
fd.write(data)
print("Done.")
return EXIT_SUCCESS
@with_repository(compatibility=Manifest.NO_OPERATION_CHECK)
def do_debug_dump_archive(self, args, repository, manifest):
"""dump decoded archive metadata (not: data)"""
repo_objs = manifest.repo_objs
try:
archive_meta_orig = manifest.archives.get_raw_dict()[args.name]
except KeyError:
raise Archive.DoesNotExist(args.name)
indent = 4
def do_indent(d):
return textwrap.indent(json.dumps(d, indent=indent), prefix=" " * indent)
def output(fd):
# this outputs megabytes of data for a modest sized archive, so some manual streaming json output
fd.write("{\n")
fd.write(' "_name": ' + json.dumps(args.name) + ",\n")
fd.write(' "_manifest_entry":\n')
fd.write(do_indent(prepare_dump_dict(archive_meta_orig)))
fd.write(",\n")
archive_id = archive_meta_orig["id"]
_, data = repo_objs.parse(archive_id, repository.get(archive_id), ro_type=ROBJ_ARCHIVE_META)
archive_org_dict = msgpack.unpackb(data, object_hook=StableDict)
fd.write(' "_meta":\n')
fd.write(do_indent(prepare_dump_dict(archive_org_dict)))
fd.write(",\n")
fd.write(' "_items": [\n')
unpacker = msgpack.Unpacker(use_list=False, object_hook=StableDict)
first = True
items = []
for chunk_id in archive_org_dict["item_ptrs"]:
_, data = repo_objs.parse(chunk_id, repository.get(chunk_id), ro_type=ROBJ_ARCHIVE_CHUNKIDS)
items.extend(msgpack.unpackb(data))
for item_id in items:
_, data = repo_objs.parse(item_id, repository.get(item_id), ro_type=ROBJ_ARCHIVE_STREAM)
unpacker.feed(data)
for item in unpacker:
item = prepare_dump_dict(item)
if first:
first = False
else:
fd.write(",\n")
fd.write(do_indent(item))
fd.write("\n")
fd.write(" ]\n}\n")
with dash_open(args.path, "w") as fd:
output(fd)
return EXIT_SUCCESS
@with_repository(compatibility=Manifest.NO_OPERATION_CHECK)
def do_debug_dump_manifest(self, args, repository, manifest):
"""dump decoded repository manifest"""
repo_objs = manifest.repo_objs
_, data = repo_objs.parse(
manifest.MANIFEST_ID,
repository.get(manifest.MANIFEST_ID),
ro_type=ROBJ_MANIFEST,
)
meta = prepare_dump_dict(msgpack.unpackb(data, object_hook=StableDict))
with dash_open(args.path, "w") as fd:
json.dump(meta, fd, indent=4)
return EXIT_SUCCESS
@with_repository(manifest=False)
def do_debug_dump_repo_objs(self, args, repository):
"""dump (decrypted, decompressed) repo objects, repo index MUST be current/correct"""
from ..crypto.key import key_factory
def decrypt_dump(i, id, cdata, tag=None, segment=None, offset=None):
if cdata is not None:
_, data = repo_objs.parse(id, cdata, ro_type=ROBJ_DONTCARE)
else:
_, data = {}, b""
tag_str = "" if tag is None else "_" + tag
segment_str = "_" + str(segment) if segment is not None else ""
offset_str = "_" + str(offset) if offset is not None else ""
id_str = "_" + bin_to_hex(id) if id is not None else ""
filename = "%08d%s%s%s%s.obj" % (
i,
segment_str,
offset_str,
tag_str,
id_str,
)
print("Dumping", filename)
with open(filename, "wb") as fd:
fd.write(data)
if args.ghost:
# dump ghosty stuff from segment files: not yet committed objects, deleted / superseded objects, commit tags
# set up the key without depending on a manifest obj
for id, cdata, tag, segment, offset in repository.scan_low_level():
if tag == TAG_PUT:
key = key_factory(repository, cdata)
repo_objs = RepoObj(key)
break
i = 0
for id, cdata, tag, segment, offset in repository.scan_low_level(segment=args.segment, offset=args.offset):
if tag == TAG_PUT:
decrypt_dump(i, id, cdata, tag="put", segment=segment, offset=offset)
elif tag == TAG_DELETE:
decrypt_dump(i, id, None, tag="del", segment=segment, offset=offset)
elif tag == TAG_COMMIT:
decrypt_dump(i, None, None, tag="commit", segment=segment, offset=offset)
i += 1
else:
# set up the key without depending on a manifest obj
ids = repository.list(limit=1, marker=None)
cdata = repository.get(ids[0])
key = key_factory(repository, cdata)
repo_objs = RepoObj(key)
state = None
i = 0
while True:
ids, state = repository.scan(limit=LIST_SCAN_LIMIT, state=state) # must use on-disk order scanning here
if not ids:
break
for id in ids:
cdata = repository.get(id)
decrypt_dump(i, id, cdata)
i += 1
print("Done.")
return EXIT_SUCCESS
@with_repository(manifest=False)
def do_debug_search_repo_objs(self, args, repository):
"""search for byte sequences in repo objects, repo index MUST be current/correct"""
context = 32
def print_finding(info, wanted, data, offset):
before = data[offset - context : offset]
after = data[offset + len(wanted) : offset + len(wanted) + context]
print(
"{}: {} {} {} == {!r} {!r} {!r}".format(
info, before.hex(), wanted.hex(), after.hex(), before, wanted, after
)
)
wanted = args.wanted
try:
if wanted.startswith("hex:"):
wanted = unhexlify(wanted[4:])
elif wanted.startswith("str:"):
wanted = wanted[4:].encode()
else:
raise ValueError("unsupported search term")
except (ValueError, UnicodeEncodeError):
wanted = None
if not wanted:
self.print_error("search term needs to be hex:123abc or str:foobar style")
return EXIT_ERROR
from ..crypto.key import key_factory
# set up the key without depending on a manifest obj
ids = repository.list(limit=1, marker=None)
cdata = repository.get(ids[0])
key = key_factory(repository, cdata)
repo_objs = RepoObj(key)
state = None
last_data = b""
last_id = None
i = 0
while True:
ids, state = repository.scan(limit=LIST_SCAN_LIMIT, state=state) # must use on-disk order scanning here
if not ids:
break
for id in ids:
cdata = repository.get(id)
_, data = repo_objs.parse(id, cdata, ro_type=ROBJ_DONTCARE)
# try to locate wanted sequence crossing the border of last_data and data
boundary_data = last_data[-(len(wanted) - 1) :] + data[: len(wanted) - 1]
if wanted in boundary_data:
boundary_data = last_data[-(len(wanted) - 1 + context) :] + data[: len(wanted) - 1 + context]
offset = boundary_data.find(wanted)
info = "%d %s | %s" % (i, last_id.hex(), id.hex())
print_finding(info, wanted, boundary_data, offset)
# try to locate wanted sequence in data
count = data.count(wanted)
if count:
offset = data.find(wanted) # only determine first occurrence's offset
info = "%d %s #%d" % (i, id.hex(), count)
print_finding(info, wanted, data, offset)
last_id, last_data = id, data
i += 1
if i % 10000 == 0:
print("%d objects processed." % i)
print("Done.")
return EXIT_SUCCESS
@with_repository(manifest=False)
def do_debug_get_obj(self, args, repository):
"""get object contents from the repository and write it into file"""
hex_id = args.id
try:
id = unhexlify(hex_id)
if len(id) != 32: # 256bit
raise ValueError("id must be 256bits or 64 hex digits")
except ValueError as err:
print(f"object id {hex_id} is invalid [{str(err)}].")
return EXIT_ERROR
try:
data = repository.get(id)
except Repository.ObjectNotFound:
print("object %s not found." % hex_id)
return EXIT_ERROR
with open(args.path, "wb") as f:
f.write(data)
print("object %s fetched." % hex_id)
return EXIT_SUCCESS
@with_repository(compatibility=Manifest.NO_OPERATION_CHECK)
def do_debug_id_hash(self, args, repository, manifest):
"""compute id-hash for file contents"""
with open(args.path, "rb") as f:
data = f.read()
key = manifest.key
id = key.id_hash(data)
print(id.hex())
return EXIT_SUCCESS
@with_repository(compatibility=Manifest.NO_OPERATION_CHECK)
def do_debug_parse_obj(self, args, repository, manifest):
"""parse borg object file into meta dict and data (decrypting, decompressing)"""
# get the object from id
hex_id = args.id
try:
id = unhexlify(hex_id)
if len(id) != 32: # 256bit
raise ValueError("id must be 256bits or 64 hex digits")
except ValueError as err:
print(f"object id {hex_id} is invalid [{str(err)}].")
return EXIT_ERROR
with open(args.object_path, "rb") as f:
cdata = f.read()
repo_objs = manifest.repo_objs
meta, data = repo_objs.parse(id=id, cdata=cdata, ro_type=ROBJ_DONTCARE)
with open(args.json_path, "w") as f:
json.dump(meta, f)
with open(args.binary_path, "wb") as f:
f.write(data)
return EXIT_SUCCESS
@with_repository(compatibility=Manifest.NO_OPERATION_CHECK)
def do_debug_format_obj(self, args, repository, manifest):
"""format file and metadata into borg object file"""
# get the object from id
hex_id = args.id
try:
id = unhexlify(hex_id)
if len(id) != 32: # 256bit
raise ValueError("id must be 256bits or 64 hex digits")
except ValueError as err:
print(f"object id {hex_id} is invalid [{str(err)}].")
return EXIT_ERROR
with open(args.binary_path, "rb") as f:
data = f.read()
with open(args.json_path) as f:
meta = json.load(f)
repo_objs = manifest.repo_objs
# TODO: support misc repo object types other than ROBJ_FILE_STREAM
data_encrypted = repo_objs.format(id=id, meta=meta, data=data, ro_type=ROBJ_FILE_STREAM)
with open(args.object_path, "wb") as f:
f.write(data_encrypted)
return EXIT_SUCCESS
@with_repository(manifest=False, exclusive=True)
def do_debug_put_obj(self, args, repository):
"""put file contents into the repository"""
with open(args.path, "rb") as f:
data = f.read()
hex_id = args.id
try:
id = unhexlify(hex_id)
if len(id) != 32: # 256bit
raise ValueError("id must be 256bits or 64 hex digits")
except ValueError as err:
print(f"object id {hex_id} is invalid [{str(err)}].")
return EXIT_ERROR
repository.put(id, data)
print("object %s put." % hex_id)
repository.commit(compact=False)
return EXIT_SUCCESS
@with_repository(manifest=False, exclusive=True)
def do_debug_delete_obj(self, args, repository):
"""delete the objects with the given IDs from the repo"""
modified = False
for hex_id in args.ids:
try:
id = unhexlify(hex_id)
except ValueError:
print("object id %s is invalid." % hex_id)
else:
try:
repository.delete(id)
modified = True
print("object %s deleted." % hex_id)
except Repository.ObjectNotFound:
print("object %s not found." % hex_id)
if modified:
repository.commit(compact=False)
print("Done.")
return EXIT_SUCCESS
@with_repository(
manifest=False,
exclusive=True,
cache=True,
compatibility=Manifest.NO_OPERATION_CHECK,
)
def do_debug_refcount_obj(self, args, repository, manifest, cache):
"""display refcounts for the objects with the given IDs"""
for hex_id in args.ids:
try:
id = unhexlify(hex_id)
except ValueError:
print("object id %s is invalid." % hex_id)
else:
try:
refcount = cache.chunks[id][0]
print("object %s has %d referrers [info from chunks cache]." % (hex_id, refcount))
except KeyError:
print("object %s not found [info from chunks cache]." % hex_id)
return EXIT_SUCCESS
@with_repository(manifest=False, exclusive=True)
def do_debug_dump_hints(self, args, repository):
"""dump repository hints"""
if not repository._active_txn:
repository.prepare_txn(repository.get_transaction_id())
try:
hints = dict(
segments=repository.segments,
compact=repository.compact,
storage_quota_use=repository.storage_quota_use,
shadow_index={hexlify(k).decode(): v for k, v in repository.shadow_index.items()},
)
with dash_open(args.path, "w") as fd:
json.dump(hints, fd, indent=4)
finally:
repository.rollback()
return EXIT_SUCCESS
def do_debug_convert_profile(self, args):
"""convert Borg profile to Python profile"""
import marshal
with args.output, args.input:
marshal.dump(msgpack.unpack(args.input, use_list=False, raw=False), args.output)
return EXIT_SUCCESS
def build_parser_debug(self, subparsers, common_parser, mid_common_parser):
debug_epilog = process_epilog(
"""
These commands are not intended for normal use and potentially very
dangerous if used incorrectly.
They exist to improve debugging capabilities without direct system access, e.g.
in case you ever run into some severe malfunction. Use them only if you know
what you are doing or if a trusted developer tells you what to do."""
)
subparser = subparsers.add_parser(
"debug",
parents=[mid_common_parser],
add_help=False,
description="debugging command (not intended for normal use)",
epilog=debug_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help="debugging command (not intended for normal use)",
)
debug_parsers = subparser.add_subparsers(title="required arguments", metavar="<command>")
subparser.set_defaults(fallback_func=functools.partial(self.do_subcommand_help, subparser))
debug_info_epilog = process_epilog(
"""
This command displays some system information that might be useful for bug
reports and debugging problems. If a traceback happens, this information is
already appended at the end of the traceback.
"""
)
subparser = debug_parsers.add_parser(
"info",
parents=[common_parser],
add_help=False,
description=self.do_debug_info.__doc__,
epilog=debug_info_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help="show system infos for debugging / bug reports (debug)",
)
subparser.set_defaults(func=self.do_debug_info)
debug_dump_archive_items_epilog = process_epilog(
"""
This command dumps raw (but decrypted and decompressed) archive items (only metadata) to files.
"""
)
subparser = debug_parsers.add_parser(
"dump-archive-items",
parents=[common_parser],
add_help=False,
description=self.do_debug_dump_archive_items.__doc__,
epilog=debug_dump_archive_items_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help="dump archive items (metadata) (debug)",
)
subparser.set_defaults(func=self.do_debug_dump_archive_items)
subparser.add_argument(
"name",
metavar="NAME",
type=archivename_validator,
help="specify the archive name",
)
debug_dump_archive_epilog = process_epilog(
"""
This command dumps all metadata of an archive in a decoded form to a file.
"""
)
subparser = debug_parsers.add_parser(
"dump-archive",
parents=[common_parser],
add_help=False,
description=self.do_debug_dump_archive.__doc__,
epilog=debug_dump_archive_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help="dump decoded archive metadata (debug)",
)
subparser.set_defaults(func=self.do_debug_dump_archive)
subparser.add_argument(
"name",
metavar="NAME",
type=archivename_validator,
help="specify the archive name",
)
subparser.add_argument("path", metavar="PATH", type=str, help="file to dump data into")
debug_dump_manifest_epilog = process_epilog(
"""
This command dumps manifest metadata of a repository in a decoded form to a file.
"""
)
subparser = debug_parsers.add_parser(
"dump-manifest",
parents=[common_parser],
add_help=False,
description=self.do_debug_dump_manifest.__doc__,
epilog=debug_dump_manifest_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help="dump decoded repository metadata (debug)",
)
subparser.set_defaults(func=self.do_debug_dump_manifest)
subparser.add_argument("path", metavar="PATH", type=str, help="file to dump data into")
debug_dump_repo_objs_epilog = process_epilog(
"""
This command dumps raw (but decrypted and decompressed) repo objects to files.
"""
)
subparser = debug_parsers.add_parser(
"dump-repo-objs",
parents=[common_parser],
add_help=False,
description=self.do_debug_dump_repo_objs.__doc__,
epilog=debug_dump_repo_objs_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help="dump repo objects (debug)",
)
subparser.set_defaults(func=self.do_debug_dump_repo_objs)
subparser.add_argument(
"--ghost",
dest="ghost",
action="store_true",
help="dump all segment file contents, including deleted/uncommitted objects and commits.",
)
subparser.add_argument(
"--segment",
metavar="SEG",
dest="segment",
type=positive_int_validator,
default=None,
action=Highlander,
help="used together with --ghost: limit processing to given segment.",
)
subparser.add_argument(
"--offset",
metavar="OFFS",
dest="offset",
type=positive_int_validator,
default=None,
action=Highlander,
help="used together with --ghost: limit processing to given offset.",
)
debug_search_repo_objs_epilog = process_epilog(
"""
This command searches raw (but decrypted and decompressed) repo objects for a specific bytes sequence.
"""
)
subparser = debug_parsers.add_parser(
"search-repo-objs",
parents=[common_parser],
add_help=False,
description=self.do_debug_search_repo_objs.__doc__,
epilog=debug_search_repo_objs_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help="search repo objects (debug)",
)
subparser.set_defaults(func=self.do_debug_search_repo_objs)
subparser.add_argument(
"wanted",
metavar="WANTED",
type=str,
action=Highlander,
help="term to search the repo for, either 0x1234abcd hex term or a string",
)
debug_id_hash_epilog = process_epilog(
"""
This command computes the id-hash for some file content.
"""
)
subparser = debug_parsers.add_parser(
"id-hash",
parents=[common_parser],
add_help=False,
description=self.do_debug_id_hash.__doc__,
epilog=debug_id_hash_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help="compute id-hash for some file content (debug)",
)
subparser.set_defaults(func=self.do_debug_id_hash)
subparser.add_argument(
"path",
metavar="PATH",
type=str,
help="content for which the id-hash shall get computed",
)
# parse_obj
debug_parse_obj_epilog = process_epilog(
"""
This command parses the object file into metadata (as json) and uncompressed data.
"""
)
subparser = debug_parsers.add_parser(
"parse-obj",
parents=[common_parser],
add_help=False,
description=self.do_debug_parse_obj.__doc__,
epilog=debug_parse_obj_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help="parse borg object file into meta dict and data",
)
subparser.set_defaults(func=self.do_debug_parse_obj)
subparser.add_argument("id", metavar="ID", type=str, help="hex object ID to get from the repo")
subparser.add_argument(
"object_path",
metavar="OBJECT_PATH",
type=str,
help="path of the object file to parse data from",
)
subparser.add_argument(
"binary_path",
metavar="BINARY_PATH",
type=str,
help="path of the file to write uncompressed data into",
)
subparser.add_argument(
"json_path",
metavar="JSON_PATH",
type=str,
help="path of the json file to write metadata into",
)
# format_obj
debug_format_obj_epilog = process_epilog(
"""
This command formats the file and metadata into objectfile.
"""
)
subparser = debug_parsers.add_parser(
"format-obj",
parents=[common_parser],
add_help=False,
description=self.do_debug_format_obj.__doc__,
epilog=debug_format_obj_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help="format file and metadata into borg objectfile",
)
subparser.set_defaults(func=self.do_debug_format_obj)
subparser.add_argument("id", metavar="ID", type=str, help="hex object ID to get from the repo")
subparser.add_argument(
"binary_path",
metavar="BINARY_PATH",
type=str,
help="path of the file to convert into objectfile",
)
subparser.add_argument(
"json_path",
metavar="JSON_PATH",
type=str,
help="path of the json file to read metadata from",
)
subparser.add_argument(
"-C",
"--compression",
metavar="COMPRESSION",
dest="compression",
type=CompressionSpec,
default=CompressionSpec("lz4"),
action=Highlander,
help="select compression algorithm, see the output of the " '"borg help compression" command for details.',
)
subparser.add_argument(
"object_path",
metavar="OBJECT_PATH",
type=str,
help="path of the objectfile to write compressed encrypted data into",
)
debug_get_obj_epilog = process_epilog(
"""
This command gets an object from the repository.
"""
)
subparser = debug_parsers.add_parser(
"get-obj",
parents=[common_parser],
add_help=False,
description=self.do_debug_get_obj.__doc__,
epilog=debug_get_obj_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help="get object from repository (debug)",
)
subparser.set_defaults(func=self.do_debug_get_obj)
subparser.add_argument("id", metavar="ID", type=str, help="hex object ID to get from the repo")
subparser.add_argument("path", metavar="PATH", type=str, help="file to write object data into")
debug_put_obj_epilog = process_epilog(
"""
This command puts an object into the repository.
"""
)
subparser = debug_parsers.add_parser(
"put-obj",
parents=[common_parser],
add_help=False,
description=self.do_debug_put_obj.__doc__,
epilog=debug_put_obj_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help="put object to repository (debug)",
)
subparser.set_defaults(func=self.do_debug_put_obj)
subparser.add_argument("id", metavar="ID", type=str, help="hex object ID to put into the repo")
subparser.add_argument("path", metavar="PATH", type=str, help="file to read and create object from")
debug_delete_obj_epilog = process_epilog(
"""
This command deletes objects from the repository.
"""
)
subparser = debug_parsers.add_parser(
"delete-obj",
parents=[common_parser],
add_help=False,
description=self.do_debug_delete_obj.__doc__,
epilog=debug_delete_obj_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help="delete object from repository (debug)",
)
subparser.set_defaults(func=self.do_debug_delete_obj)
subparser.add_argument(
"ids",
metavar="IDs",
nargs="+",
type=str,
help="hex object ID(s) to delete from the repo",
)
debug_refcount_obj_epilog = process_epilog(
"""
This command displays the reference count for objects from the repository.
"""
)
subparser = debug_parsers.add_parser(
"refcount-obj",
parents=[common_parser],
add_help=False,
description=self.do_debug_refcount_obj.__doc__,
epilog=debug_refcount_obj_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help="show refcount for object from repository (debug)",
)
subparser.set_defaults(func=self.do_debug_refcount_obj)
subparser.add_argument(
"ids",
metavar="IDs",
nargs="+",
type=str,
help="hex object ID(s) to show refcounts for",
)
debug_dump_hints_epilog = process_epilog(
"""
This command dumps the repository hints data.
"""
)
subparser = debug_parsers.add_parser(
"dump-hints",
parents=[common_parser],
add_help=False,
description=self.do_debug_dump_hints.__doc__,
epilog=debug_dump_hints_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help="dump repo hints (debug)",
)
subparser.set_defaults(func=self.do_debug_dump_hints)
subparser.add_argument("path", metavar="PATH", type=str, help="file to dump data into")
debug_convert_profile_epilog = process_epilog(
"""
Convert a Borg profile to a Python cProfile compatible profile.
"""
)
subparser = debug_parsers.add_parser(
"convert-profile",
parents=[common_parser],
add_help=False,
description=self.do_debug_convert_profile.__doc__,
epilog=debug_convert_profile_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help="convert Borg profile to Python profile (debug)",
)
subparser.set_defaults(func=self.do_debug_convert_profile)
subparser.add_argument("input", metavar="INPUT", type=argparse.FileType("rb"), help="Borg profile")
subparser.add_argument("output", metavar="OUTPUT", type=argparse.FileType("wb"), help="Output file")
|
fixtures | groups | # -*- coding: utf-8 -*-
"""
flaskbb.fixtures.groups
~~~~~~~~~~~~~~~~~~~~~~~
The fixtures module for our groups.
:copyright: (c) 2014 by the FlaskBB Team.
:license: BSD, see LICENSE for more details.
"""
from collections import OrderedDict
fixture = OrderedDict(
(
(
"Administrator",
{
"description": "The Administrator Group",
"admin": True,
"super_mod": False,
"mod": False,
"banned": False,
"guest": False,
"editpost": True,
"deletepost": True,
"deletetopic": True,
"posttopic": True,
"postreply": True,
"mod_edituser": True,
"mod_banuser": True,
"viewhidden": True,
"makehidden": True,
},
),
(
"Super Moderator",
{
"description": "The Super Moderator Group",
"admin": False,
"super_mod": True,
"mod": False,
"banned": False,
"guest": False,
"editpost": True,
"deletepost": True,
"deletetopic": True,
"posttopic": True,
"postreply": True,
"mod_edituser": True,
"mod_banuser": True,
"viewhidden": True,
"makehidden": True,
},
),
(
"Moderator",
{
"description": "The Moderator Group",
"admin": False,
"super_mod": False,
"mod": True,
"banned": False,
"guest": False,
"editpost": True,
"deletepost": True,
"deletetopic": True,
"posttopic": True,
"postreply": True,
"mod_edituser": True,
"mod_banuser": True,
"viewhidden": True,
"makehidden": False,
},
),
(
"Member",
{
"description": "The Member Group",
"admin": False,
"super_mod": False,
"mod": False,
"banned": False,
"guest": False,
"editpost": True,
"deletepost": False,
"deletetopic": False,
"posttopic": True,
"postreply": True,
"mod_edituser": False,
"mod_banuser": False,
"viewhidden": False,
"makehidden": False,
},
),
(
"Banned",
{
"description": "The Banned Group",
"admin": False,
"super_mod": False,
"mod": False,
"banned": True,
"guest": False,
"editpost": False,
"deletepost": False,
"deletetopic": False,
"posttopic": False,
"postreply": False,
"mod_edituser": False,
"mod_banuser": False,
"viewhidden": False,
"makehidden": False,
},
),
(
"Guest",
{
"description": "The Guest Group",
"admin": False,
"super_mod": False,
"mod": False,
"banned": False,
"guest": True,
"editpost": False,
"deletepost": False,
"deletetopic": False,
"posttopic": False,
"postreply": False,
"mod_edituser": False,
"mod_banuser": False,
"viewhidden": False,
"makehidden": False,
},
),
)
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.