section
stringlengths
2
30
filename
stringlengths
1
82
text
stringlengths
783
28M
Marketplace
LocalPackageList
# Copyright (c) 2022 Ultimaker B.V. # Cura is released under the terms of the LGPLv3 or higher. from typing import TYPE_CHECKING, Any, Dict, List, Optional from PyQt6.QtCore import QObject, pyqtSlot from UM.i18n import i18nCatalog from UM.Logger import Logger from UM.TaskManagement.HttpRequestManager import HttpRequestManager from UM.Version import Version from .Constants import PACKAGE_UPDATES_URL from .PackageList import PackageList from .PackageModel import PackageModel if TYPE_CHECKING: from PyQt6.QtCore import QObject from PyQt6.QtNetwork import QNetworkReply catalog = i18nCatalog("cura") class LocalPackageList(PackageList): PACKAGE_CATEGORIES = { "installed": { "plugin": catalog.i18nc("@label", "Installed Plugins"), "material": catalog.i18nc("@label", "Installed Materials"), }, "bundled": { "plugin": catalog.i18nc("@label", "Bundled Plugins"), "material": catalog.i18nc("@label", "Bundled Materials"), }, } # The section headers to be used for the different package categories def __init__(self, parent: Optional["QObject"] = None) -> None: super().__init__(parent) self._has_footer = False self._ongoing_requests["check_updates"] = None self._package_manager.packagesWithUpdateChanged.connect( self._sortSectionsOnUpdate ) self._package_manager.packageUninstalled.connect(self._removePackageModel) def _sortSectionsOnUpdate(self) -> None: section_order = dict( zip( [ i for k, v in self.PACKAGE_CATEGORIES.items() for i in self.PACKAGE_CATEGORIES[k].values() ], ["a", "b", "c", "d"], ) ) self.sort( lambda model: ( section_order[model.sectionTitle], not model.canUpdate, model.displayName.lower(), ), key="package", ) def _removePackageModel(self, package_id: str) -> None: """ Cleanup function to remove the package model from the list. Note that this is only done if the package can't be updated, it is in the to remove list and isn't in the to be installed list """ package = self.getPackageModel(package_id) if ( package and not package.canUpdate and package_id in self._package_manager.getToRemovePackageIDs() and package_id not in self._package_manager.getPackagesToInstall() ): index = self.find("package", package_id) if index < 0: Logger.error( f"Could not find card in Listview corresponding with {package_id}" ) self.updatePackages() return self.removeItem(index) @pyqtSlot() def updatePackages(self) -> None: """Update the list with local packages, these are materials or plugin, either bundled or user installed. The list will also contain **to be removed** or **to be installed** packages since the user might still want to interact with these. """ self.setErrorMessage("") # Clear any previous errors. self.setIsLoading(True) # Obtain and sort the local packages self.setItems( [ {"package": p} for p in [ self._makePackageModel(p) for p in self._package_manager.local_packages ] ] ) self._sortSectionsOnUpdate() self.checkForUpdates(self._package_manager.local_packages) self.setIsLoading(False) self.setHasMore(False) # All packages should have been loaded at this time def _makePackageModel(self, package_info: Dict[str, Any]) -> PackageModel: """Create a PackageModel from the package_info and determine its section_title""" package_id = package_info["package_id"] bundled_or_installed = ( "bundled" if self._package_manager.isBundledPackage(package_id) else "installed" ) package_type = package_info["package_type"] section_title = self.PACKAGE_CATEGORIES[bundled_or_installed][package_type] package = PackageModel(package_info, section_title=section_title, parent=self) self._connectManageButtonSignals(package) return package def checkForUpdates(self, packages: List[Dict[str, Any]]) -> None: installed_packages = "&".join( [ f"installed_packages={package['package_id']}:{package['package_version']}" for package in packages ] ) request_url = f"{PACKAGE_UPDATES_URL}?{installed_packages}" self._ongoing_requests["check_updates"] = HttpRequestManager.getInstance().get( request_url, scope=self._scope, callback=self._parseResponse ) def _parseResponse(self, reply: "QNetworkReply") -> None: """ Parse the response from the package list API request which can update. :param reply: A reply containing information about a number of packages. """ response_data = HttpRequestManager.readJSON(reply) if response_data is None or "data" not in response_data: Logger.error( f"Could not interpret the server's response. Missing 'data' from response data. Keys in response: {response_data.keys()}" ) return if len(response_data["data"]) == 0: return packages = response_data["data"] for package in packages: self._package_manager.addAvailablePackageVersion( package["package_id"], Version(package["package_version"]) ) package_model = self.getPackageModel(package["package_id"]) if package_model: # Also make sure that the local list knows where to get an update package_model.setDownloadUrl(package["download_url"]) self._ongoing_requests["check_updates"] = None
VersionUpgrade42to43
VersionUpgrade42to43
import configparser import io from typing import Dict, List, Tuple from UM.VersionUpgrade import VersionUpgrade _renamed_profiles = { "generic_pla_0.4_coarse": "jbo_generic_pla_0.4_coarse", "generic_pla_0.4_fine": "jbo_generic_pla_fine", "generic_pla_0.4_medium": "jbo_generic_pla_medium", "generic_pla_0.4_ultrafine": "jbo_generic_pla_ultrafine", "generic_petg_0.4_coarse": "jbo_generic_petg_0.4_coarse", "generic_petg_0.4_fine": "jbo_generic_petg_fine", "generic_petg_0.4_medium": "jbo_generic_petg_medium", } # - The variant "imade3d jellybox 0.4 mm 2-fans" for machine definition "imade3d_jellybox" # is now "0.4 mm" for machine definition "imade3d jellybox_2". # - Materials "imade3d_petg_green" and "imade3d_petg_pink" are now "imade3d_petg_175". # - Materials "imade3d_pla_green" and "imade3d_pla_pink" are now "imade3d_petg_175". # # Note: Theoretically, the old material profiles with "_2-fans" at the end should be updated to: # - machine definition: imade3d_jellybox_2 # - variant: 0.4 mm (for jellybox 2) # - material: (as an example) imade3d_petg_175_imade3d_jellybox_2_0.4_mm # # But this involves changing the definition of the global stack and the extruder stacks, which can cause more trouble # than what we can fix. So, here, we update all material variants, regardless of having "_2-fans" at the end or not, to # jellybox_0.4_mm. # _renamed_material_profiles = { # PETG "imade3d_petg_green": "imade3d_petg_175", "imade3d_petg_green_imade3d_jellybox": "imade3d_petg_175_imade3d_jellybox", "imade3d_petg_green_imade3d_jellybox_0.4_mm": "imade3d_petg_175_imade3d_jellybox_0.4_mm", "imade3d_petg_green_imade3d_jellybox_0.4_mm_2-fans": "imade3d_petg_175_imade3d_jellybox_0.4_mm", "imade3d_petg_pink": "imade3d_petg_175", "imade3d_petg_pink_imade3d_jellybox": "imade3d_petg_175_imade3d_jellybox", "imade3d_petg_pink_imade3d_jellybox_0.4_mm": "imade3d_petg_175_imade3d_jellybox_0.4_mm", "imade3d_petg_pink_imade3d_jellybox_0.4_mm_2-fans": "imade3d_petg_175_imade3d_jellybox_0.4_mm", # PLA "imade3d_pla_green": "imade3d_pla_175", "imade3d_pla_green_imade3d_jellybox": "imade3d_pla_175_imade3d_jellybox", "imade3d_pla_green_imade3d_jellybox_0.4_mm": "imade3d_pla_175_imade3d_jellybox_0.4_mm", "imade3d_pla_green_imade3d_jellybox_0.4_mm_2-fans": "imade3d_pla_175_imade3d_jellybox_0.4_mm", "imade3d_pla_pink": "imade3d_pla_175", "imade3d_pla_pink_imade3d_jellybox": "imade3d_pla_175_imade3d_jellybox", "imade3d_pla_pink_imade3d_jellybox_0.4_mm": "imade3d_pla_175_imade3d_jellybox_0.4_mm", "imade3d_pla_pink_imade3d_jellybox_0.4_mm_2-fans": "imade3d_pla_175_imade3d_jellybox_0.4_mm", } _removed_settings = {"start_layers_at_same_position"} _renamed_settings = {"support_infill_angle": "support_infill_angles"} # type: Dict[str, str] class VersionUpgrade42to43(VersionUpgrade): """Upgrades configurations from the state they were in at version 4.2 to the state they should be in at version 4.3. """ def upgradePreferences(self, serialized: str, filename: str): parser = configparser.ConfigParser(interpolation=None) parser.read_string(serialized) if ( "camera_perspective_mode" in parser["general"] and parser["general"]["camera_perspective_mode"] == "orthogonal" ): parser["general"]["camera_perspective_mode"] = "orthographic" # Fix renamed settings for visibility if "visible_settings" in parser["general"]: all_setting_keys = parser["general"]["visible_settings"].strip().split(";") if all_setting_keys: for idx, key in enumerate(all_setting_keys): if key in _renamed_settings: all_setting_keys[idx] = _renamed_settings[key] parser["general"]["visible_settings"] = ";".join(all_setting_keys) parser["metadata"]["setting_version"] = "9" result = io.StringIO() parser.write(result) return [filename], [result.getvalue()] def upgradeInstanceContainer( self, serialized: str, filename: str ) -> Tuple[List[str], List[str]]: """Upgrades instance containers to have the new version number. This renames the renamed settings in the containers. """ parser = configparser.ConfigParser(interpolation=None, comment_prefixes=()) parser.read_string(serialized) # Update version number. parser["metadata"]["setting_version"] = "9" if "values" in parser: for old_name, new_name in _renamed_settings.items(): if old_name in parser["values"]: parser["values"][new_name] = parser["values"][old_name] del parser["values"][old_name] for key in _removed_settings: if key in parser["values"]: del parser["values"][key] if "support_infill_angles" in parser["values"]: old_value = float(parser["values"]["support_infill_angles"]) new_value = [int(round(old_value))] parser["values"]["support_infill_angles"] = str(new_value) result = io.StringIO() parser.write(result) return [filename], [result.getvalue()] def upgradeStack( self, serialized: str, filename: str ) -> Tuple[List[str], List[str]]: """Upgrades stacks to have the new version number.""" parser = configparser.ConfigParser(interpolation=None) parser.read_string(serialized) # Update version number. parser["metadata"]["setting_version"] = "9" # Handle changes for the imade3d jellybox. The machine was split up into parts (eg; a 2 fan version and a single # fan version. Previously it used variants for this. The only upgrade we can do here is strip that variant. # This is because we only upgrade per stack (and to fully do these changes, we'd need to switch out something # in the global container based on changes made to the extruder stack) if parser["containers"]["6"] == "imade3d_jellybox_extruder_0": quality_id = parser["containers"]["2"] if quality_id.endswith("_2-fans"): parser["containers"]["2"] = quality_id.replace("_2-fans", "") if parser["containers"]["2"] in _renamed_profiles: parser["containers"]["2"] = _renamed_profiles[parser["containers"]["2"]] material_id = parser["containers"]["3"] if material_id in _renamed_material_profiles: parser["containers"]["3"] = _renamed_material_profiles[material_id] variant_id = parser["containers"]["4"] if variant_id.endswith("_2-fans"): parser["containers"]["4"] = variant_id.replace("_2-fans", "") result = io.StringIO() parser.write(result) return [filename], [result.getvalue()]
fta
dormantevent
"""Dormant Event item definition.""" from gaphas.geometry import Rectangle from gaphor.core.modeling import DrawContext from gaphor.diagram.presentation import ( Classified, ElementPresentation, from_package_str, ) from gaphor.diagram.shapes import Box, IconBox, Text, draw_diamond from gaphor.diagram.support import represents from gaphor.diagram.text import FontStyle, FontWeight from gaphor.RAAML import raaml from gaphor.RAAML.fta.undevelopedevent import draw_undeveloped_event from gaphor.UML.recipes import stereotypes_str @represents(raaml.DormantEvent) class DormantEventItem(Classified, ElementPresentation): def __init__(self, diagram, id=None): super().__init__(diagram, id, width=70, height=35) self.watch("subject[NamedElement].name").watch( "subject[NamedElement].namespace.name" ) def update_shapes(self, event=None): self.shape = IconBox( Box( draw=draw_dormant_event, ), Text( text=lambda: stereotypes_str( self.subject, [self.diagram.gettext("Dormant Event")] ), ), Text( text=lambda: self.subject.name or "", width=lambda: self.width - 4, style={ "font-weight": FontWeight.BOLD, "font-style": FontStyle.NORMAL, }, ), Text( text=lambda: from_package_str(self), style={"font-size": "x-small"}, ), ) def draw_dormant_event(box, context: DrawContext, bounding_box: Rectangle): draw_undeveloped_event(box, context, bounding_box) x1 = bounding_box.width / 5.0 x2 = bounding_box.width * 4.0 / 5.0 y1 = bounding_box.height / 5.0 y2 = bounding_box.height * 4.0 / 5.0 draw_diamond(context, x1, x2, y1, y2)
Scripts
subclassing_objective_c
#!/usr/bin/env python # This is a doctest """ ========================================= Subclassing Objective-C classes in Python ========================================= It is possible to subclass any existing Objective-C class in python. We start by importing the interface to the Objective-C runtime, although you'd normally use wrappers for the various frameworks, and then locate the class we'd like to subclass:: >>> import objc >>> NSEnumerator = objc.lookUpClass('NSEnumerator') >>> NSEnumerator <objective-c class NSEnumerator at 0xa0a039a8> You can then define a subclass of this class using the usual syntax:: >>> class MyEnumerator (NSEnumerator): ... __slots__ = ('cnt',) ... # ... # Start of the method definitions: ... def init(self): ... self.cnt = 10 ... return self ... # ... def nextObject(self): ... if self.cnt == 0: ... return None ... self.cnt -= 1 ... return self.cnt ... # ... def __del__(self): ... global DEALLOC_COUNT ... DEALLOC_COUNT = DEALLOC_COUNT + 1 To check that our instances our deallocated we maintain a ``DEALLOC_COUNT``:: >>> DEALLOC_COUNT=0 As always, the creation of instances of Objective-C classes looks a bit odd for Python programs: >>> obj = MyEnumerator.alloc().init() >>> obj.allObjects() (9, 8, 7, 6, 5, 4, 3, 2, 1, 0) Destroy our reference to the object, to check if it will be deallocated:: >>> del obj >>> DEALLOC_COUNT 1 """ import doctest import __main__ doctest.testmod(__main__, verbose=1)
calculix
write_constraint_tie
# *************************************************************************** # * Copyright (c) 2021 Bernd Hahnebach <bernd@bimstatik.org> * # * * # * This file is part of the FreeCAD CAx development system. * # * * # * This program is free software; you can redistribute it and/or modify * # * it under the terms of the GNU Lesser General Public License (LGPL) * # * as published by the Free Software Foundation; either version 2 of * # * the License, or (at your option) any later version. * # * for detail see the LICENCE text file. * # * * # * This program is distributed in the hope that it will be useful, * # * but WITHOUT ANY WARRANTY; without even the implied warranty of * # * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * # * GNU Library General Public License for more details. * # * * # * You should have received a copy of the GNU Library General Public * # * License along with this program; if not, write to the Free Software * # * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 * # * USA * # * * # *************************************************************************** __title__ = "FreeCAD FEM calculix constraint tie" __author__ = "Bernd Hahnebach" __url__ = "https://www.freecad.org" def get_analysis_types(): return "all" # write for all analysis types def get_sets_name(): return "constraints_tie_surface_sets" def get_constraint_title(): return "Tie Constraints" def get_before_write_meshdata_constraint(): return "" def get_after_write_meshdata_constraint(): return "" def get_before_write_constraint(): return "" def get_after_write_constraint(): return "" def write_meshdata_constraint(f, femobj, tie_obj, ccxwriter): # slave DEP f.write("*SURFACE, NAME=TIE_DEP{}\n".format(tie_obj.Name)) for i in femobj["TieSlaveFaces"]: f.write("{},S{}\n".format(i[0], i[1])) # master IND f.write("*SURFACE, NAME=TIE_IND{}\n".format(tie_obj.Name)) for i in femobj["TieMasterFaces"]: f.write("{},S{}\n".format(i[0], i[1])) def write_constraint(f, femobj, tie_obj, ccxwriter): # floats read from ccx should use {:.13G}, see comment in writer module tolerance = tie_obj.Tolerance.getValueAs("mm").Value f.write( "*TIE, POSITION TOLERANCE={:.13G}, ADJUST=NO, NAME=TIE{}\n".format( tolerance, tie_obj.Name ) ) ind_surf = "TIE_IND{}".format(tie_obj.Name) dep_surf = "TIE_DEP{}".format(tie_obj.Name) f.write("{},{}\n".format(dep_surf, ind_surf))
qltk
shortcuts
# Copyright 2016 Christoph Reiter # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. from gi.repository import Gtk from quodlibet import _, const, util SHORTCUTS = [ ( _("Main Window"), [ ("<Alt>Left", _("Seek backwards by 10 seconds")), ("<Alt>Right", _("Seek forward by 10 seconds")), ("<Primary>L", _("Focus the search entry")), ], ), ( _("Browsers"), [ ("<Primary><Shift>J", _("Reset filters and jump to the playing song")), ], ), ( _("Song List"), [ ("<Primary>I", _("Open the information window for the selected songs")), ("<Alt>Return", _("Open the tag editor for the selected songs")), ("<Primary>Return", _("Queue the selected songs")), ("<Primary>Delete", _("Delete the selected songs")), ("<Primary>F", _("Show the inline search entry")), ( "<Ctrl>", "+ " + _("Left click on a column header") + ":\n" + _("Add the column to the list of columns to sort by"), ), ], ), ( _("Tree View"), [ ( "Left <Primary>Left", _("Collapses the element or select the parent element"), ), ("Right <Primary>Right", _("Expands the element")), ], ), ( _("Text Entries"), [ ("<Primary>Z", _("Undo the last change")), ("<Primary><Shift>Z", _("Redo the last undone change")), ], ), ( _("Paned Browser"), [ ("<Primary>Home", _("Select all songs in all panes")), ], ), ] def build_shortcut_window(data): """Returns a filled Gtk.ShortcutsWindow""" assert has_shortcut_window() # Note: gtk+ is picky about the order of adding/showing things because # this is usually done through XML. e.g. adding shortcuts after a section # won't make them show up in the search etc.. w = Gtk.ShortcutsWindow() section = Gtk.ShortcutsSection() section.show() for group_title, shortcuts in data: group = Gtk.ShortcutsGroup(title=group_title) group.show() for accel, shortcut_title in shortcuts: short = Gtk.ShortcutsShortcut(title=shortcut_title, accelerator=accel) short.show() group.add(short) section.add(group) w.add(section) return w def has_shortcut_window(): """Returns if the current Gtk+ supports ShortcutsWindow. Gtk+ >= 3.20""" return hasattr(Gtk, "ShortcutsWindow") def show_shortcuts(parent): """Either opens a window showing keyboard shortcuts or a website in the default browser, depending on the Gtk+ version """ if has_shortcut_window(): window = build_shortcut_window(SHORTCUTS) window.set_transient_for(parent) window.set_position(Gtk.WindowPosition.CENTER_ON_PARENT) window.set_modal(True) window.show() # XXX: The windows does some weird size changes on start which confuses # window placement. This fixes the jumping around and wrong position # with some WMs and under Windows. window.hide() window.unrealize() window.show() else: util.website(const.SHORTCUTS_HELP)
femobjects
mesh_gmsh
# *************************************************************************** # * Copyright (c) 2016 Bernd Hahnebach <bernd@bimstatik.org> * # * * # * This file is part of the FreeCAD CAx development system. * # * * # * This program is free software; you can redistribute it and/or modify * # * it under the terms of the GNU Lesser General Public License (LGPL) * # * as published by the Free Software Foundation; either version 2 of * # * the License, or (at your option) any later version. * # * for detail see the LICENCE text file. * # * * # * This program is distributed in the hope that it will be useful, * # * but WITHOUT ANY WARRANTY; without even the implied warranty of * # * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * # * GNU Library General Public License for more details. * # * * # * You should have received a copy of the GNU Library General Public * # * License along with this program; if not, write to the Free Software * # * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 * # * USA * # * * # *************************************************************************** __title__ = "FreeCAD FEM mesh gmsh document object" __author__ = "Bernd Hahnebach" __url__ = "https://www.freecad.org" ## @package mesh_gmsh # \ingroup FEM # \brief mesh gmsh object from . import base_fempythonobject class MeshGmsh(base_fempythonobject.BaseFemPythonObject): """ A Fem::FemMeshObject python type, add Gmsh specific properties """ Type = "Fem::FemMeshGmsh" # they will be used from the task panel too, thus they need to be outside of the __init__ known_element_dimensions = ["From Shape", "1D", "2D", "3D"] known_element_orders = ["1st", "2nd"] known_mesh_algorithm_2D = [ "Automatic", "MeshAdapt", "Delaunay", "Frontal", "BAMG", "DelQuad", "Packing Parallelograms", ] known_mesh_algorithm_3D = [ "Automatic", "Delaunay", "New Delaunay", "Frontal", "MMG3D", "R-tree", "HXT", ] known_mesh_RecombinationAlgorithms = [ "Simple", "Blossom", "Simple full-quad", "Blossom full-quad", ] known_mesh_HighOrderOptimizers = [ "None", "Optimization", "Elastic+Optimization", "Elastic", "Fast curving", ] def __init__(self, obj): super(MeshGmsh, self).__init__(obj) self.add_properties(obj) def onDocumentRestored(self, obj): # HighOrderOptimize # was once App::PropertyBool, so check this high_order_optimizer = "" if obj.HighOrderOptimize is True: high_order_optimizer = "Optimization" elif obj.HighOrderOptimize is False: high_order_optimizer = "None" obj.removeProperty("HighOrderOptimize") # add new HighOrderOptimize property self.add_properties(obj) # write the stored high_order_optimizer if high_order_optimizer: obj.HighOrderOptimize = high_order_optimizer # Algorithm3D # refresh the list of known 3D algorithms for existing meshes # since some algos are meanwhile deprecated and new algos are available obj.Algorithm3D = MeshGmsh.known_mesh_algorithm_3D def add_properties(self, obj): # this method is called from onDocumentRestored # thus only add and or set a attribute # if the attribute does not exist if not hasattr(obj, "MeshBoundaryLayerList"): obj.addProperty( "App::PropertyLinkList", "MeshBoundaryLayerList", "Base", "Mesh boundaries need inflation layers", ) obj.MeshBoundaryLayerList = [] if not hasattr(obj, "MeshRegionList"): obj.addProperty( "App::PropertyLinkList", "MeshRegionList", "Base", "Mesh regions of the mesh", ) obj.MeshRegionList = [] if not hasattr(obj, "MeshGroupList"): obj.addProperty( "App::PropertyLinkList", "MeshGroupList", "Base", "Mesh groups of the mesh", ) obj.MeshGroupList = [] if not hasattr(obj, "Part"): obj.addProperty( "App::PropertyLink", "Part", "FEM Mesh", "Geometry object, the mesh is made from. The geometry object has to have a Shape.", ) obj.Part = None if not hasattr(obj, "CharacteristicLengthMax"): obj.addProperty( "App::PropertyLength", "CharacteristicLengthMax", "FEM Gmsh Mesh Params", "Max mesh element size (0.0 = infinity)", ) obj.CharacteristicLengthMax = 0.0 # will be 1e+22 if not hasattr(obj, "CharacteristicLengthMin"): obj.addProperty( "App::PropertyLength", "CharacteristicLengthMin", "FEM Gmsh Mesh Params", "Min mesh element size", ) obj.CharacteristicLengthMin = 0.0 if not hasattr(obj, "ElementDimension"): obj.addProperty( "App::PropertyEnumeration", "ElementDimension", "FEM Gmsh Mesh Params", "Dimension of mesh elements (Auto = according ShapeType of part to mesh)", ) obj.ElementDimension = MeshGmsh.known_element_dimensions obj.ElementDimension = "From Shape" # according ShapeType of Part to mesh if not hasattr(obj, "ElementOrder"): obj.addProperty( "App::PropertyEnumeration", "ElementOrder", "FEM Gmsh Mesh Params", "Order of mesh elements", ) obj.ElementOrder = MeshGmsh.known_element_orders obj.ElementOrder = "2nd" if not hasattr(obj, "OptimizeStd"): obj.addProperty( "App::PropertyBool", "OptimizeStd", "FEM Gmsh Mesh Params", "Optimize tetrahedral elements", ) obj.OptimizeStd = True if not hasattr(obj, "OptimizeNetgen"): obj.addProperty( "App::PropertyBool", "OptimizeNetgen", "FEM Gmsh Mesh Params", "Optimize tetra elements by use of Netgen", ) obj.OptimizeNetgen = False if not hasattr(obj, "HighOrderOptimize"): obj.addProperty( "App::PropertyEnumeration", "HighOrderOptimize", "FEM Gmsh Mesh Params", "Optimization of high order meshes", ) obj.HighOrderOptimize = MeshGmsh.known_mesh_HighOrderOptimizers obj.HighOrderOptimize = "None" if not hasattr(obj, "RecombineAll"): obj.addProperty( "App::PropertyBool", "RecombineAll", "FEM Gmsh Mesh Params", "Apply recombination algorithm to all surfaces", ) obj.RecombineAll = False if not hasattr(obj, "Recombine3DAll"): obj.addProperty( "App::PropertyBool", "Recombine3DAll", "FEM Gmsh Mesh Params", "Apply recombination algorithm to all volumes", ) obj.Recombine3DAll = False if not hasattr(obj, "RecombinationAlgorithm"): obj.addProperty( "App::PropertyEnumeration", "RecombinationAlgorithm", "FEM Gmsh Mesh Params", "Recombination algorithm", ) obj.RecombinationAlgorithm = MeshGmsh.known_mesh_RecombinationAlgorithms obj.RecombinationAlgorithm = "Simple" if not hasattr(obj, "CoherenceMesh"): obj.addProperty( "App::PropertyBool", "CoherenceMesh", "FEM Gmsh Mesh Params", "Removes all duplicate mesh vertices", ) obj.CoherenceMesh = True if not hasattr(obj, "GeometryTolerance"): obj.addProperty( "App::PropertyFloat", "GeometryTolerance", "FEM Gmsh Mesh Params", "Geometrical Tolerance (0.0 = GMSH std = 1e-08)", ) obj.GeometryTolerance = 1e-06 if not hasattr(obj, "SecondOrderLinear"): obj.addProperty( "App::PropertyBool", "SecondOrderLinear", "FEM Gmsh Mesh Params", "Second order nodes are created by linear interpolation", ) obj.SecondOrderLinear = False # gives much better meshes in the regard of nonpositive jacobians # but # on curved faces the constraint nodes will no longer found # thus standard will be False # https://forum.freecad.org/viewtopic.php?t=41738 # https://forum.freecad.org/viewtopic.php?f=18&t=45260&start=20#p389494 if not hasattr(obj, "MeshSizeFromCurvature"): obj.addProperty( "App::PropertyIntegerConstraint", "MeshSizeFromCurvature", "FEM Gmsh Mesh Params", "number of elements per 2*pi radians, 0 to deactivate", ) obj.MeshSizeFromCurvature = (12, 0, 10000, 1) if not hasattr(obj, "Algorithm2D"): obj.addProperty( "App::PropertyEnumeration", "Algorithm2D", "FEM Gmsh Mesh Params", "mesh algorithm 2D", ) obj.Algorithm2D = MeshGmsh.known_mesh_algorithm_2D obj.Algorithm2D = "Automatic" if not hasattr(obj, "Algorithm3D"): obj.addProperty( "App::PropertyEnumeration", "Algorithm3D", "FEM Gmsh Mesh Params", "mesh algorithm 3D", ) obj.Algorithm3D = MeshGmsh.known_mesh_algorithm_3D obj.Algorithm3D = "Automatic" if not hasattr(obj, "GroupsOfNodes"): obj.addProperty( "App::PropertyBool", "GroupsOfNodes", "FEM Gmsh Mesh Params", "For each group create not only the elements but the nodes too.", ) obj.GroupsOfNodes = False
extractor
historicfilms
from __future__ import unicode_literals from ..utils import parse_duration from .common import InfoExtractor class HistoricFilmsIE(InfoExtractor): _VALID_URL = r"https?://(?:www\.)?historicfilms\.com/(?:tapes/|play)(?P<id>\d+)" _TEST = { "url": "http://www.historicfilms.com/tapes/4728", "md5": "d4a437aec45d8d796a38a215db064e9a", "info_dict": { "id": "4728", "ext": "mov", "title": "Historic Films: GP-7", "description": "md5:1a86a0f3ac54024e419aba97210d959a", "thumbnail": r"re:^https?://.*\.jpg$", "duration": 2096, }, } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) tape_id = self._search_regex( [r'class="tapeId"[^>]*>([^<]+)<', r'tapeId\s*:\s*"([^"]+)"'], webpage, "tape id", ) title = self._og_search_title(webpage) description = self._og_search_description(webpage) thumbnail = self._html_search_meta( "thumbnailUrl", webpage, "thumbnails" ) or self._og_search_thumbnail(webpage) duration = parse_duration( self._html_search_meta("duration", webpage, "duration") ) video_url = "http://www.historicfilms.com/video/%s_%s_web.mov" % ( tape_id, video_id, ) return { "id": video_id, "url": video_url, "title": title, "description": description, "thumbnail": thumbnail, "duration": duration, }
operon
commands
# Copyright 2012,2013 Christoph Reiter # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # TODO: # RenameCommand # FillTracknumberCommand import os import re import shutil import subprocess import tempfile from quodlibet import _, util from quodlibet.formats import AudioFileError, EmbeddedImage from quodlibet.pattern import Pattern from quodlibet.pattern import error as PatternError from quodlibet.util.path import mtime from quodlibet.util.tags import MACHINE_TAGS, USER_TAGS, sortkey from quodlibet.util.tagsfrompath import TagsFromPattern from senf import fsn2text from .base import Command, CommandError from .util import copy_mtime, get_editor_args, list_tags, print_table, print_terse_table @Command.register class ListCommand(Command): NAME = "list" DESCRIPTION = _("List tags") USAGE = "[-a] [-t] [-c <c1>,<c2>...] <file>" def _add_options(self, p): p.add_option("-t", "--terse", action="store_true", help=_("Print terse output")) p.add_option( "-c", "--columns", action="store", type="string", help=_("Columns to display and order in terse mode (%s)") % "desc,value,tag", ) p.add_option( "-a", "--all", action="store_true", help=_("Also list programmatic tags") ) def _execute(self, options, args): if len(args) < 1: raise CommandError(_("Not enough arguments")) elif len(args) > 1: raise CommandError(_("Too many arguments")) path = args[0] headers = [_("Description"), _("Value"), _("Tag")] nicks = ["desc", "value", "tag"] if not options.columns: order = nicks else: order = [n.strip() for n in options.columns.split(",")] song = self.load_song(path) tags = list_tags(song, machine=options.all, terse=options.terse) if options.terse: print_terse_table(tags, nicks, order) else: print_table(tags, headers, nicks, order) @Command.register class TagsCommand(Command): NAME = "tags" DESCRIPTION = _("List all common tags") USAGE = "[-t] [-c <c1>,<c2>...]" def _add_options(self, p): p.add_option("-t", "--terse", action="store_true", help=_("Print terse output")) p.add_option( "-c", "--columns", action="store", type="string", help=_("Columns to display and order in terse mode (%s)") % "tag,desc", ) p.add_option( "-a", "--all", action="store_true", help=_("Also list programmatic tags") ) def _execute(self, options, args): if len(args) != 0: raise CommandError(_("Too many arguments")) headers = [_("Tag"), _("Description")] nicks = ["tag", "desc"] if not options.columns: order = nicks else: order = [n.strip() for n in options.columns.split(",")] tag_names = list(USER_TAGS) if options.all: tag_names.extend(MACHINE_TAGS) tags = [] for key in tag_names: tags.append((key, util.tag(key))) tags.sort() if not options.terse: print_table(tags, headers, nicks, order) else: print_terse_table(tags, nicks, order) @Command.register class CopyCommand(Command): NAME = "copy" DESCRIPTION = _("Copy tags from one file to another") USAGE = "[--dry-run] [--ignore-errors] <source> <dest>" def _add_options(self, p): p.add_option( "--dry-run", action="store_true", help=_("Show changes, don't apply them") ) p.add_option( "--ignore-errors", action="store_true", help=_("Skip tags that can't be written"), ) def _execute(self, options, args): if len(args) < 2: raise CommandError(_("Not enough arguments")) elif len(args) > 2: raise CommandError(_("Too many arguments")) if options.dry_run: self.verbose = True source_path = args[0] dest_path = args[1] source = self.load_song(source_path) dest = self.load_song(dest_path) for key in source.realkeys(): self.log("Copy %r" % key) if not options.ignore_errors and not dest.can_change(key): raise CommandError( _("Can't copy tag {tagname} to file: {filename}").format( tagname=repr(key), filename=repr(dest_path) ) ) for value in source.list(key): dest.add(key, value) if not options.dry_run: self.save_songs([dest]) @Command.register class EditCommand(Command): NAME = "edit" DESCRIPTION = _("Edit tags in a text editor") USAGE = "[--dry-run] <file>" # TODO: support editing multiple files def _add_options(self, p): p.add_option( "--dry-run", action="store_true", help=_("Show changes, don't apply them") ) def _song_to_text(self, song): # to text lines = [] for key in sorted(song.realkeys(), key=sortkey): for value in song.list(key): lines.append("%s=%s" % (key, value)) lines += [ "", "#" * 80, "# Lines that are empty or start with '#' will be ignored", "# File: %r" % fsn2text(song("~filename")), ] return "\n".join(lines) def _text_to_song(self, text, song): assert isinstance(text, str) # parse tags = {} for line in text.splitlines(): if not line.strip() or line.startswith("#"): continue try: key, value = line.split("=", 1) except ValueError: continue tags.setdefault(key, []).append(value) # apply changes, sort to always have the same output for key in sorted(song.realkeys(), key=sortkey): new = tags.pop(key, []) old = song.list(key) for value in old: if value not in new: self.log("Remove %s=%s" % (key, value)) song.remove(key, value) for value in new: if value not in old: self.log("Add %s=%s" % (key, value)) song.add(key, value) for key, values in tags.items(): if not song.can_change(key): raise CommandError( "Can't change key '%(key-name)s'." % {"key-name": key} ) for value in values: self.log("Add %s=%s" % (key, value)) song.add(key, value) def _execute(self, options, args): if len(args) < 1: raise CommandError(_("Not enough arguments")) elif len(args) > 1: raise CommandError(_("Too many arguments")) song = self.load_song(args[0]) dump = self._song_to_text(song).encode("utf-8") # write to tmp file fd, path = tempfile.mkstemp(suffix=".txt") try: try: os.write(fd, dump) finally: os.close(fd) # XXX: copy mtime here so we can test for changes in tests by # setting a out of date mtime on the source song file copy_mtime(args[0], path) # only parse the result if the editor returns 0 and the mtime has # changed old_mtime = mtime(path) editor_args = get_editor_args() self.log("Using editor: %r" % editor_args) try: subprocess.check_call(editor_args + [path]) except subprocess.CalledProcessError as e: self.log(str(e)) raise CommandError(_("Editing aborted")) except OSError as e: self.log(str(e)) raise CommandError( _("Starting text editor '%(editor-name)s' failed.") % {"editor-name": editor_args[0]} ) was_changed = mtime(path) != old_mtime if not was_changed: raise CommandError(_("No changes detected")) with open(path, "rb") as h: data = h.read() finally: os.unlink(path) try: text = data.decode("utf-8") except ValueError as e: raise CommandError("Invalid data: %r" % e) if options.dry_run: self.verbose = True self._text_to_song(text, song) if not options.dry_run: self.save_songs([song]) @Command.register class SetCommand(Command): NAME = "set" DESCRIPTION = _("Set a tag and remove existing values") USAGE = "[--dry-run] <tag> <value> <file> [<files>]" def _add_options(self, p): p.add_option( "--dry-run", action="store_true", help=_("Show changes, don't apply them") ) def _execute(self, options, args): if len(args) < 3: raise CommandError(_("Not enough arguments")) tag = fsn2text(args[0]) value = fsn2text(args[1]) paths = args[2:] songs = [] for path in paths: song = self.load_song(path) if not song.can_change(tag): vars = dict(tag=tag, format=type(song).format, file=song("~filename")) raise CommandError( _("Can not set %(tag)r for %(format)s file %(file)r") % vars ) self.log("Set %r to %r" % (value, tag)) if tag in song: del song[tag] song.add(tag, value) songs.append(song) if not options.dry_run: self.save_songs(songs) @Command.register class ClearCommand(Command): NAME = "clear" DESCRIPTION = _("Remove tags") USAGE = "[--dry-run] [-a | -e <pattern> | <tag>] <file> [<files>]" def _add_options(self, p): p.add_option( "--dry-run", action="store_true", help=_("Show changes, don't apply them") ) p.add_option( "-e", "--regexp", action="store", type="string", help=_("Value is a regular expression"), ) p.add_option("-a", "--all", action="store_true", help=_("Remove all tags")) def _execute(self, options, args): if options.all and options.regexp is not None: raise CommandError(_("Can't combine '--all' with '--regexp'")) if options.regexp is not None or options.all: if len(args) < 1: raise CommandError(_("Not enough arguments")) paths = args else: if len(args) < 2: raise CommandError(_("Not enough arguments")) paths = args[1:] if options.dry_run: self.verbose = True songs = [] for path in paths: song = self.load_song(path) tags = [] realkeys = song.realkeys() if options.all: tags.extend(realkeys) elif options.regexp is not None: e = re.compile(options.regexp) tags.extend(filter(e.match, realkeys)) else: tag = args[0] if tag in realkeys: tags.append(tag) for tag in tags: self.log("Remove tag %r" % tag) if not song.can_change(tag): raise CommandError( _("Can't remove {tagname} from {filename}").format( tagname=repr(tag), filename=repr(path) ) ) del song[tag] if tags: songs.append(song) if not options.dry_run: self.save_songs(songs) @Command.register class RemoveCommand(Command): NAME = "remove" DESCRIPTION = _("Remove a tag value") USAGE = "[--dry-run] <tag> [-e <pattern> | <value>] <file> [<files>]" def _add_options(self, p): p.add_option( "--dry-run", action="store_true", help=_("Show changes, don't apply them") ) p.add_option( "-e", "--regexp", action="store", type="string", help=_("Value is a regular expression"), ) def _execute(self, options, args): if options.regexp is None: if len(args) < 3: raise CommandError(_("Not enough arguments")) else: if len(args) < 2: raise CommandError(_("Not enough arguments")) if options.dry_run: self.verbose = True tag = args[0] if options.regexp is not None: match = re.compile(options.regexp).match paths = args[1:] else: value = args[1] paths = args[2:] match = lambda v: v == value songs = [] for path in paths: song = self.load_song(path) if tag not in song: continue for v in song.list(tag): if match(v): self.log("Remove %r from %r" % (v, tag)) song.remove(tag, v) songs.append(song) if not options.dry_run: self.save_songs(songs) @Command.register class AddCommand(Command): NAME = "add" DESCRIPTION = _("Add a tag value") USAGE = "<tag> <value> <file> [<files>]" def _execute(self, options, args): if len(args) < 3: raise CommandError(_("Not enough arguments")) tag = fsn2text(args[0]) value = fsn2text(args[1]) paths = args[2:] songs = [] for path in paths: song = self.load_song(path) if not song.can_change(tag): raise CommandError(_("Can not set %r") % tag) self.log("Add %r to %r" % (value, tag)) song.add(tag, value) songs.append(song) self.save_songs(songs) @Command.register class InfoCommand(Command): NAME = "info" DESCRIPTION = _("List file information") USAGE = "[-t] [-c <c1>,<c2>...] <file>" def _add_options(self, p): p.add_option("-t", "--terse", action="store_true", help=_("Print terse output")) p.add_option( "-c", "--columns", action="store", type="string", help=_("Columns to display and order in terse mode (%s)") % "desc,value", ) def _execute(self, options, args): if len(args) < 1: raise CommandError(_("Not enough arguments")) elif len(args) > 1: raise CommandError(_("Too many arguments")) path = args[0] song = self.load_song(path) headers = [_("Description"), _("Value")] nicks = ["desc", "value"] if not options.columns: order = nicks else: order = [n.strip() for n in options.columns.split(",")] if not options.terse: tags = [] for key in [ "~format", "~codec", "~encoding", "~length", "~bitrate", "~filesize", ]: tags.append((util.tag(key), str(song.comma(key)))) print_table(tags, headers, nicks, order) else: tags = [] for key in [ "~format", "~codec", "~encoding", "~#length", "~#bitrate", "~#filesize", ]: tags.append((key.lstrip("#~"), str(song(key)))) print_terse_table(tags, nicks, order) @Command.register class ImageSetCommand(Command): NAME = "image-set" DESCRIPTION = _( "Set the provided image as primary embedded image and " "remove all other embedded images" ) USAGE = "<image-file> <file> [<files>]" def _execute(self, options, args): if len(args) < 2: raise CommandError(_("Not enough arguments")) image_path = args[0] paths = args[1:] image = EmbeddedImage.from_path(image_path) if not image: raise CommandError(_("Failed to load image file: %r") % image_path) songs = [self.load_song(p) for p in paths] for song in songs: if not song.can_change_images: raise CommandError( _( "Image editing not supported for %(file_name)s " "(%(file_format)s)" ) % {"file_name": song("~filename"), "file_format": song("~format")} ) for song in songs: try: song.set_image(image) except AudioFileError as e: raise CommandError(e) @Command.register class ImageClearCommand(Command): NAME = "image-clear" DESCRIPTION = _("Remove all embedded images") USAGE = "<file> [<files>]" def _execute(self, options, args): if len(args) < 1: raise CommandError(_("Not enough arguments")) paths = args songs = [self.load_song(p) for p in paths] for song in songs: if not song.can_change_images: raise CommandError( _( "Image editing not supported for %(file_name)s " "(%(file_format)s)" ) % {"file_name": song("~filename"), "file_format": song("~format")} ) for song in songs: try: song.clear_images() except AudioFileError as e: raise CommandError(e) @Command.register class ImageExtractCommand(Command): NAME = "image-extract" DESCRIPTION = _("Extract embedded images to %(filepath)s") % { "filepath": "<destination>/<filename>-<index>.(jpeg|png|..)" } USAGE = "[--dry-run] [--primary] [-d <destination>] <file> [<files>]" def _add_options(self, p): p.add_option("--dry-run", action="store_true", help="don't save images") p.add_option( "--primary", action="store_true", help="only extract the primary image" ) p.add_option( "-d", "--destination", action="store", type="string", help=_( "Path to where the images will be saved to " "(defaults to the working directory)" ), ) def _execute(self, options, args): if len(args) < 1: raise CommandError(_("Not enough arguments")) # dry run implies verbose if options.dry_run: self.verbose = True paths = args for path in paths: song = self.load_song(path) # get the primary one or all of them if options.primary: image = song.get_primary_image() images = [image] if image else [] else: images = song.get_images() self.log("Images for %r: %r" % (path, images)) if not images: continue # get the basename from the song without the extension basename = os.path.basename(path) name = os.path.splitext(basename)[0] # at least two places, but same length for all images number_pattern = "%%0%dd" % (max(2, len(images) - 1)) for i, image in enumerate(images): # get a appropriate file extension or use fallback extensions = image.extensions ext = extensions[0] if extensions else ".image" if options.primary: # mysong.mp3 -> mysong.jpeg filename = "%s.%s" % (name, ext) else: # mysong.mp3 -> mysong-00.jpeg pattern = "%s-" + number_pattern + ".%s" filename = pattern % (name, i, ext) if options.destination is not None: filename = os.path.join(options.destination, filename) self.log("Saving image %r" % filename) if not options.dry_run: with open(filename, "wb") as h: shutil.copyfileobj(image.file, h) # @Command.register class RenameCommand(Command): NAME = "rename" DESCRIPTION = _("Rename files based on tags") USAGE = "[--dry-run] <pattern> <file> [<files>]" def _add_options(self, p): p.add_option( "--dry-run", action="store_true", help="show changes, don't apply them" ) def _execute(self, options, args): if len(args) < 1: raise CommandError("Not enough arguments") @Command.register class FillCommand(Command): NAME = "fill" DESCRIPTION = _("Fill tags based on the file path") USAGE = "[--dry-run] <pattern> <file> [<files>]" def _add_options(self, p): p.add_option( "--dry-run", action="store_true", help="show changes, don't apply them" ) def _execute(self, options, args): if len(args) < 2: raise CommandError("Not enough arguments") pattern_text = args[0] self.log("Using pattern: %r" % pattern_text) paths = args[1:] pattern = TagsFromPattern(pattern_text) songs = [] for path in paths: song = self.load_song(path) for header in pattern.headers: if not song.can_change(header): raise CommandError(_("Can not set %r") % header) songs.append(song) if options.dry_run: self.__preview(pattern, songs) else: self.__apply(pattern, songs) def __apply(self, pattern, songs): for song in songs: match = pattern.match(song) self.log("%r: %r" % (song("~basename"), match)) for header in pattern.headers: if header in match: value = match[header] song[header] = value self.save_songs(songs) def __preview(self, pattern, songs): rows = [] for song in songs: match = pattern.match(song) row = [fsn2text(song("~basename"))] for header in pattern.headers: row.append(match.get(header, "")) rows.append(row) headers = [_("File")] + pattern.headers nicks = ["file"] + pattern.headers print_table(rows, headers, nicks, nicks) # @Command.register class FillTracknumberCommand(Command): NAME = "fill-tracknumber" DESCRIPTION = _("Fill tracknumbers for all files") USAGE = "[--dry-run] [--start] [--total] <file> [<files>]" def _add_options(self, p): p.add_option( "--dry-run", action="store_true", help="show changes, don't apply them" ) p.add_option("--start", action="store_true", help="tracknumber to start with") p.add_option("--total", action="store_true", help="total number of tracks") def _execute(self, options, args): if len(args) < 1: raise CommandError("Not enough arguments") @Command.register class PrintCommand(Command): NAME = "print" DESCRIPTION = _("Print tags based on the given pattern") USAGE = "[-p <pattern>] <file> [<files>]" def _add_options(self, p): p.add_option( "-p", "--pattern", action="store", type="string", help="use a custom pattern", ) def _execute(self, options, args): if len(args) < 1: raise CommandError("Not enough arguments") pattern = options.pattern if pattern is None: pattern = "<artist~album~tracknumber~title>" self.log("Using pattern: %r" % pattern) try: pattern = Pattern(pattern) except PatternError: raise CommandError("Invalid pattern: %r" % pattern) paths = args error = False for path in paths: try: util.print_(pattern % self.load_song(path)) except CommandError: error = True if error: raise CommandError("One or more files failed to load.") @Command.register class HelpCommand(Command): NAME = "help" DESCRIPTION = _("Display help information") USAGE = "[<command>]" def _execute(self, options, args): if len(args) > 1: raise CommandError("Too many arguments") for cmd in Command.COMMANDS: if cmd.NAME == args[0]: cmd(self._main_cmd).print_help() break else: raise CommandError("Unknown command")
widgets
main
from sglib.lib.translate import _ from sgui.sgqt import * from . import _shared from .control import * from .note_selector import NoteSelectorWidget class main_widget: def __init__( self, a_size, a_rel_callback, a_val_callback, a_vol_port, a_glide_port, a_pitchbend_port, a_port_dict, a_title=_("Main"), a_uni_voices_port=None, a_uni_spread_port=None, a_preset_mgr=None, a_poly_port=None, a_min_note_port=None, a_max_note_port=None, a_pitch_port=None, a_pb_min=1, knob_kwargs={}, ): self.group_box = QGroupBox() self.group_box.setObjectName("plugin_groupbox") self.group_box.setTitle(str(a_title)) self.layout = QGridLayout(self.group_box) self.layout.setContentsMargins(3, 3, 3, 3) self.vol_knob = knob_control( a_size, _("Vol"), a_vol_port, a_rel_callback, a_val_callback, -30, 12, -6, _shared.KC_INTEGER, a_port_dict, a_preset_mgr, knob_kwargs=knob_kwargs, tooltip="Master volume for the entire plugin", ) self.vol_knob.add_to_grid_layout(self.layout, 0) if a_uni_voices_port is not None and a_uni_spread_port is not None: self.uni_voices_knob = knob_control( a_size, _("Unison"), a_uni_voices_port, a_rel_callback, a_val_callback, 1, 7, 1, _shared.KC_INTEGER, a_port_dict, a_preset_mgr, knob_kwargs=knob_kwargs, tooltip=( "Unison voices for the entire plugin. Gives a thicker " "sound" ), ) self.uni_voices_knob.add_to_grid_layout(self.layout, 1) self.uni_spread_knob = knob_control( a_size, _("Spread"), a_uni_spread_port, a_rel_callback, a_val_callback, 10, 100, 50, _shared.KC_DECIMAL, a_port_dict, a_preset_mgr, knob_kwargs=knob_kwargs, tooltip=( "Unison detune. Lower values sound thinner, larger " "values sound bigger but more discordant" ), ) self.uni_spread_knob.add_to_grid_layout(self.layout, 2) if a_pitch_port is not None: self.pitch_knob = knob_control( a_size, _("Pitch"), a_pitch_port, a_rel_callback, a_val_callback, -36, 36, 0, _shared.KC_INTEGER, a_port_dict, a_preset_mgr, knob_kwargs=knob_kwargs, tooltip=( "Instrument pitch offset in semitones. Adjust the " "pitch of all incoming MIDI notes" ), ) self.pitch_knob.add_to_grid_layout(self.layout, 4) self.glide_knob = knob_control( a_size, _("Glide"), a_glide_port, a_rel_callback, a_val_callback, 0, 200, 0, _shared.KC_TIME_DECIMAL, a_port_dict, a_preset_mgr, knob_kwargs=knob_kwargs, tooltip=( "Glide time. Values greater than 0.0 causes consecutive " "MIDI notes to take that much time to glide to the next " "pitch" ), ) self.glide_knob.add_to_grid_layout(self.layout, 5) self.pb_knob = knob_control( a_size, _("Pitchbend"), a_pitchbend_port, a_rel_callback, a_val_callback, a_pb_min, 36, 18, _shared.KC_INTEGER, a_port_dict, a_preset_mgr, knob_kwargs=knob_kwargs, tooltip=( "Pitchbend in semitones. The amount that pitch should bend " "when the pitchbend wheel is fully up or down" ), ) self.pb_knob.add_to_grid_layout(self.layout, 6) if a_poly_port is not None: self.mono_combobox = combobox_control( 90, "Poly Mode", a_poly_port, a_rel_callback, a_val_callback, ["Retrig.", "Free", "Mono", "Mono2"], a_port_dict, 0, a_preset_mgr, tooltip=( "Polyphony mode. Retrig: Oscillators retrigger their " "phase on a new note. Free: Oscillators do not retrigger" "on a new note. Mono/2: For instruments that only play " "one note at a time" ), ) self.mono_combobox.add_to_grid_layout(self.layout, 7) if a_min_note_port or a_max_note_port: assert a_min_note_port and a_max_note_port self.min_note = NoteSelectorWidget( a_min_note_port, a_rel_callback, a_val_callback, a_port_dict, 0, a_preset_mgr, ) self.max_note = NoteSelectorWidget( a_max_note_port, a_rel_callback, a_val_callback, a_port_dict, 120, a_preset_mgr, ) self.min_note.widget.setObjectName("transparent") self.max_note.widget.setObjectName("transparent") self.range_label = QLabel(_("Range")) self.range_label.setObjectName("plugin_name_label") self.layout.addWidget( self.range_label, 0, 9, alignment=QtCore.Qt.AlignmentFlag.AlignHCenter, ) self.layout.addWidget(self.min_note.widget, 1, 9) self.layout.addWidget(self.max_note.widget, 2, 9)
commands
populate_streams
""" Re-create user streams """ from bookwyrm import activitystreams, lists_stream, models from django.core.management.base import BaseCommand def populate_streams(stream=None): """build all the streams for all the users""" streams = [stream] if stream else activitystreams.streams.keys() print("Populating streams", streams) users = models.User.objects.filter( local=True, is_active=True, ).order_by("-last_active_date") print("This may take a long time! Please be patient.") for user in users: print(".", end="") lists_stream.populate_lists_task.delay(user.id) for stream_key in streams: print(".", end="") activitystreams.populate_stream_task.delay(stream_key, user.id) class Command(BaseCommand): """start all over with user streams""" help = "Populate streams for all users" def add_arguments(self, parser): parser.add_argument( "--stream", default=None, help="Specifies which time of stream to populate", ) # pylint: disable=no-self-use,unused-argument def handle(self, *args, **options): """run feed builder""" stream = options.get("stream") populate_streams(stream=stream)
dataset-tools
create_pet_tf_record
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== r"""Convert the Oxford pet dataset to TFRecord for object_detection. See: O. M. Parkhi, A. Vedaldi, A. Zisserman, C. V. Jawahar Cats and Dogs IEEE Conference on Computer Vision and Pattern Recognition, 2012 http://www.robots.ox.ac.uk/~vgg/data/pets/ Example usage: python object_detection/dataset_tools/create_pet_tf_record.py \ --data_dir=/home/user/pet \ --output_dir=/home/user/pet/output """ import hashlib import io import logging import os import random import re import numpy as np import PIL.Image import tensorflow as tf from app.object_detection.utils import dataset_util, label_map_util from lxml import etree flags = tf.app.flags flags.DEFINE_string("data_dir", "", "Root directory to raw pet dataset.") flags.DEFINE_string("output_dir", "", "Path to directory to output TFRecords.") flags.DEFINE_string( "label_map_path", "data/pet_label_map.pbtxt", "Path to label map proto" ) flags.DEFINE_boolean( "faces_only", True, "If True, generates bounding boxes " "for pet faces. Otherwise generates bounding boxes (as " "well as segmentations for full pet bodies). Note that " "in the latter case, the resulting files are much larger.", ) FLAGS = flags.FLAGS def get_class_name_from_filename(file_name): """Gets the class name from a file. Args: file_name: The file name to get the class name from. ie. "american_pit_bull_terrier_105.jpg" Returns: A string of the class name. """ match = re.match(r"([A-Za-z_]+)(_[0-9]+\.jpg)", file_name, re.I) return match.groups()[0] def dict_to_tf_example( data, mask_path, label_map_dict, image_subdirectory, ignore_difficult_instances=False, faces_only=True, ): """Convert XML derived dict to tf.Example proto. Notice that this function normalizes the bounding box coordinates provided by the raw data. Args: data: dict holding PASCAL XML fields for a single image (obtained by running dataset_util.recursive_parse_xml_to_dict) mask_path: String path to PNG encoded mask. label_map_dict: A map from string label names to integers ids. image_subdirectory: String specifying subdirectory within the Pascal dataset directory holding the actual image data. ignore_difficult_instances: Whether to skip difficult instances in the dataset (default: False). faces_only: If True, generates bounding boxes for pet faces. Otherwise generates bounding boxes (as well as segmentations for full pet bodies). Returns: example: The converted tf.Example. Raises: ValueError: if the image pointed to by data['filename'] is not a valid JPEG """ img_path = os.path.join(image_subdirectory, data["filename"]) with tf.gfile.GFile(img_path, "rb") as fid: encoded_jpg = fid.read() encoded_jpg_io = io.BytesIO(encoded_jpg) image = PIL.Image.open(encoded_jpg_io) if image.format != "JPEG": raise ValueError("Image format not JPEG") key = hashlib.sha256(encoded_jpg).hexdigest() with tf.gfile.GFile(mask_path, "rb") as fid: encoded_mask_png = fid.read() encoded_png_io = io.BytesIO(encoded_mask_png) mask = PIL.Image.open(encoded_png_io) if mask.format != "PNG": raise ValueError("Mask format not PNG") mask_np = np.asarray(mask) nonbackground_indices_x = np.any(mask_np != 2, axis=0) nonbackground_indices_y = np.any(mask_np != 2, axis=1) nonzero_x_indices = np.where(nonbackground_indices_x) nonzero_y_indices = np.where(nonbackground_indices_y) width = int(data["size"]["width"]) height = int(data["size"]["height"]) xmins = [] ymins = [] xmaxs = [] ymaxs = [] classes = [] classes_text = [] truncated = [] poses = [] difficult_obj = [] masks = [] for obj in data["object"]: difficult = bool(int(obj["difficult"])) if ignore_difficult_instances and difficult: continue difficult_obj.append(int(difficult)) if faces_only: xmin = float(obj["bndbox"]["xmin"]) xmax = float(obj["bndbox"]["xmax"]) ymin = float(obj["bndbox"]["ymin"]) ymax = float(obj["bndbox"]["ymax"]) else: xmin = float(np.min(nonzero_x_indices)) xmax = float(np.max(nonzero_x_indices)) ymin = float(np.min(nonzero_y_indices)) ymax = float(np.max(nonzero_y_indices)) xmins.append(xmin / width) ymins.append(ymin / height) xmaxs.append(xmax / width) ymaxs.append(ymax / height) class_name = get_class_name_from_filename(data["filename"]) classes_text.append(class_name.encode("utf8")) classes.append(label_map_dict[class_name]) truncated.append(int(obj["truncated"])) poses.append(obj["pose"].encode("utf8")) if not faces_only: mask_remapped = mask_np != 2 masks.append(mask_remapped) mask_stack = np.stack(masks).astype(np.float32) masks_flattened = np.reshape(mask_stack, [-1]) feature_dict = { "image/height": dataset_util.int64_feature(height), "image/width": dataset_util.int64_feature(width), "image/filename": dataset_util.bytes_feature(data["filename"].encode("utf8")), "image/source_id": dataset_util.bytes_feature(data["filename"].encode("utf8")), "image/key/sha256": dataset_util.bytes_feature(key.encode("utf8")), "image/encoded": dataset_util.bytes_feature(encoded_jpg), "image/format": dataset_util.bytes_feature("jpeg".encode("utf8")), "image/object/bbox/xmin": dataset_util.float_list_feature(xmins), "image/object/bbox/xmax": dataset_util.float_list_feature(xmaxs), "image/object/bbox/ymin": dataset_util.float_list_feature(ymins), "image/object/bbox/ymax": dataset_util.float_list_feature(ymaxs), "image/object/class/text": dataset_util.bytes_list_feature(classes_text), "image/object/class/label": dataset_util.int64_list_feature(classes), "image/object/difficult": dataset_util.int64_list_feature(difficult_obj), "image/object/truncated": dataset_util.int64_list_feature(truncated), "image/object/view": dataset_util.bytes_list_feature(poses), } if not faces_only: feature_dict["image/object/mask"] = dataset_util.float_list_feature( masks_flattened.tolist() ) example = tf.train.Example(features=tf.train.Features(feature=feature_dict)) return example def create_tf_record( output_filename, label_map_dict, annotations_dir, image_dir, examples, faces_only=True, ): """Creates a TFRecord file from examples. Args: output_filename: Path to where output file is saved. label_map_dict: The label map dictionary. annotations_dir: Directory where annotation files are stored. image_dir: Directory where image files are stored. examples: Examples to parse and save to tf record. faces_only: If True, generates bounding boxes for pet faces. Otherwise generates bounding boxes (as well as segmentations for full pet bodies). """ writer = tf.python_io.TFRecordWriter(output_filename) for idx, example in enumerate(examples): if idx % 100 == 0: logging.info("On image %d of %d", idx, len(examples)) xml_path = os.path.join(annotations_dir, "xmls", example + ".xml") mask_path = os.path.join(annotations_dir, "trimaps", example + ".png") if not os.path.exists(xml_path): logging.warning("Could not find %s, ignoring example.", xml_path) continue with tf.gfile.GFile(xml_path, "r") as fid: xml_str = fid.read() xml = etree.fromstring(xml_str) data = dataset_util.recursive_parse_xml_to_dict(xml)["annotation"] try: tf_example = dict_to_tf_example( data, mask_path, label_map_dict, image_dir, faces_only=faces_only ) writer.write(tf_example.SerializeToString()) except ValueError: logging.warning("Invalid example: %s, ignoring.", xml_path) writer.close() # TODO(derekjchow): Add test for pet/PASCAL main files. def main(_): data_dir = FLAGS.data_dir label_map_dict = label_map_util.get_label_map_dict(FLAGS.label_map_path) logging.info("Reading from Pet dataset.") image_dir = os.path.join(data_dir, "images") annotations_dir = os.path.join(data_dir, "annotations") examples_path = os.path.join(annotations_dir, "trainval.txt") examples_list = dataset_util.read_examples_list(examples_path) # Test images are not included in the downloaded data set, so we shall perform # our own split. random.seed(42) random.shuffle(examples_list) num_examples = len(examples_list) num_train = int(0.7 * num_examples) train_examples = examples_list[:num_train] val_examples = examples_list[num_train:] logging.info( "%d training and %d validation examples.", len(train_examples), len(val_examples), ) train_output_path = os.path.join(FLAGS.output_dir, "pet_train.record") val_output_path = os.path.join(FLAGS.output_dir, "pet_val.record") if FLAGS.faces_only: train_output_path = os.path.join( FLAGS.output_dir, "pet_train_with_masks.record" ) val_output_path = os.path.join(FLAGS.output_dir, "pet_val_with_masks.record") create_tf_record( train_output_path, label_map_dict, annotations_dir, image_dir, train_examples, faces_only=FLAGS.faces_only, ) create_tf_record( val_output_path, label_map_dict, annotations_dir, image_dir, val_examples, faces_only=FLAGS.faces_only, ) if __name__ == "__main__": tf.app.run()
cli
client
__license__ = "GNU Affero General Public License http://www.gnu.org/licenses/agpl.html" __copyright__ = "Copyright (C) 2015 The OctoPrint Project - Released under terms of the AGPLv3 License" import json import click import octoprint_client from octoprint import FatalStartupError, init_settings from octoprint.cli import bulk_options, get_ctx_obj_option from octoprint.util import yaml click.disable_unicode_literals_warning = True class JsonStringParamType(click.ParamType): name = "json" def convert(self, value, param, ctx): try: return json.loads(value) except Exception: self.fail("%s is not a valid json string" % value, param, ctx) def create_client( settings=None, apikey=None, host=None, port=None, httpuser=None, httppass=None, https=False, prefix=None, ): assert host is not None or settings is not None assert port is not None or settings is not None assert apikey is not None or settings is not None if not host: host = settings.get(["server", "host"]) host = host if host != "0.0.0.0" else "127.0.0.1" if not port: port = settings.getInt(["server", "port"]) if not apikey: apikey = settings.get(["api", "key"]) baseurl = octoprint_client.build_base_url( https=https, httpuser=httpuser, httppass=httppass, host=host, port=port, prefix=prefix, ) return octoprint_client.Client(baseurl, apikey) client_options = bulk_options( [ click.option("--apikey", "-a", type=click.STRING), click.option("--host", "-h", type=click.STRING), click.option("--port", "-p", type=click.INT), click.option("--httpuser", type=click.STRING), click.option("--httppass", type=click.STRING), click.option("--https", is_flag=True), click.option("--prefix", type=click.STRING), ] ) """Common options to configure an API client.""" @click.group(context_settings={"ignore_unknown_options": True}) @client_options @click.pass_context def cli(ctx, apikey, host, port, httpuser, httppass, https, prefix): """Basic API client.""" try: settings = None if not host or not port or not apikey: settings = init_settings( get_ctx_obj_option(ctx, "basedir", None), get_ctx_obj_option(ctx, "configfile", None), ) ctx.obj.client = create_client( settings=settings, apikey=apikey, host=host, port=port, httpuser=httpuser, httppass=httppass, https=https, prefix=prefix, ) except FatalStartupError as e: click.echo(str(e), err=True) click.echo("There was a fatal error initializing the client.", err=True) ctx.exit(-1) def log_response(response, status_code=True, body=True, headers=False): if status_code: click.echo(f"Status Code: {response.status_code}") if headers: for header, value in response.headers.items(): click.echo(f"{header}: {value}") click.echo() if body: click.echo(response.text) @cli.command("get") @click.argument("path") @click.option("--timeout", type=float, default=None) @click.pass_context def get(ctx, path, timeout): """Performs a GET request against the specified server path.""" r = ctx.obj.client.get(path, timeout=timeout) log_response(r) @cli.command("post_json") @click.argument("path") @click.argument("data", type=JsonStringParamType()) @click.option("--timeout", type=float, default=None) @click.pass_context def post_json(ctx, path, data, timeout): """POSTs JSON data to the specified server path.""" r = ctx.obj.client.post_json(path, data, timeout=timeout) log_response(r) @cli.command("patch_json") @click.argument("path") @click.argument("data", type=JsonStringParamType()) @click.option("--timeout", type=float, default=None, help="Request timeout in seconds") @click.pass_context def patch_json(ctx, path, data, timeout): """PATCHes JSON data to the specified server path.""" r = ctx.obj.client.patch(path, data, encoding="json", timeout=timeout) log_response(r) @cli.command("post_from_file") @click.argument("path") @click.argument( "file_path", type=click.Path(exists=True, dir_okay=False, resolve_path=True) ) @click.option("--json", is_flag=True) @click.option("--yaml", is_flag=True) @click.option("--timeout", type=float, default=None, help="Request timeout in seconds") @click.pass_context def post_from_file(ctx, path, file_path, json_flag, yaml_flag, timeout): """POSTs JSON data to the specified server path, taking the data from the specified file.""" if json_flag or yaml_flag: if json_flag: with open(file_path) as fp: data = json.load(fp) else: data = yaml.load_from_file(path=file_path) r = ctx.obj.client.post_json(path, data, timeout=timeout) else: with open(file_path, "rb") as fp: data = fp.read() r = ctx.obj.client.post(path, data, timeout=timeout) log_response(r) @cli.command("command") @click.argument("path") @click.argument("command") @click.option( "--str", "-s", "str_params", multiple=True, nargs=2, type=click.Tuple([str, str]), ) @click.option( "--int", "-i", "int_params", multiple=True, nargs=2, type=click.Tuple([str, int]) ) @click.option( "--float", "-f", "float_params", multiple=True, nargs=2, type=click.Tuple([str, float]), ) @click.option( "--bool", "-b", "bool_params", multiple=True, nargs=2, type=click.Tuple([str, bool]), ) @click.option("--timeout", type=float, default=None, help="Request timeout in seconds") @click.pass_context def command( ctx, path, command, str_params, int_params, float_params, bool_params, timeout ): """Sends a JSON command to the specified server path.""" data = {} params = str_params + int_params + float_params + bool_params for param in params: data[param[0]] = param[1] r = ctx.obj.client.post_command(path, command, additional=data, timeout=timeout) log_response(r, body=False) @cli.command("upload") @click.argument("path") @click.argument( "file_path", type=click.Path(exists=True, dir_okay=False, resolve_path=True) ) @click.option( "--parameter", "-P", "params", multiple=True, nargs=2, type=click.Tuple([str, str]), ) @click.option("--file-name", type=click.STRING) @click.option("--content-type", type=click.STRING) @click.option("--timeout", type=float, default=None, help="Request timeout in seconds") @click.pass_context def upload(ctx, path, file_path, params, file_name, content_type, timeout): """Uploads the specified file to the specified server path.""" data = {} for param in params: data[param[0]] = param[1] r = ctx.obj.client.upload( path, file_path, additional=data, file_name=file_name, content_type=content_type, timeout=timeout, ) log_response(r) @cli.command("delete") @click.argument("path") @click.option("--timeout", type=float, default=None, help="Request timeout in seconds") @click.pass_context def delete(ctx, path, timeout): """Sends a DELETE request to the specified server path.""" r = ctx.obj.client.delete(path, timeout=timeout) log_response(r) @cli.command("listen") @click.pass_context def listen(ctx): def on_connect(ws): click.echo("--- Connected!") def on_close(ws): click.echo("--- Connection closed!") def on_error(ws, error): click.echo(f"!!! Error: {error}") def on_sent(ws, data): click.echo(f">>> {json.dumps(data)}") def on_heartbeat(ws): click.echo("<3") def on_message(ws, message_type, message_payload): click.echo(f"<<< {message_type}, Payload: {json.dumps(message_payload)}") socket = ctx.obj.client.create_socket( on_connect=on_connect, on_close=on_close, on_error=on_error, on_sent=on_sent, on_heartbeat=on_heartbeat, on_message=on_message, ) click.echo("--- Waiting for client to exit") try: socket.wait() finally: click.echo("--- Goodbye...")
vocoder
qa_gsm_full_rate
#!/usr/bin/env python # # Copyright 2004,2007,2010,2013 Free Software Foundation, Inc. # # This file is part of GNU Radio # # SPDX-License-Identifier: GPL-3.0-or-later # # from gnuradio import blocks, gr, gr_unittest, vocoder class test_gsm_vocoder(gr_unittest.TestCase): def setUp(self): self.tb = gr.top_block() def tearDown(self): self.tb = None def test001_module_load(self): data = 20 * (100, 200, 300, 400, 500, 600, 700, 800) expected_data = [ 0, 0, 360, 304, 256, 720, 600, 504, 200, 144, 128, 464, 376, 384, 680, 576, 440, 264, 176, 176, 640, 520, 480, 464, 384, 288, 432, 296, 328, 760, 624, 504, 176, 96, 96, 416, 312, 360, 808, 672, 216, 104, 136, 504, 376, 448, 720, 608, 296, 304, 176, 336, 576, 456, 560, 512, 320, 216, 344, 264, 456, 672, 576, 488, 192, 80, 152, 424, 368, 552, 688, 560, 280, 200, 104, 256, 520, 464, 608, 488, 184, 104, 16, 472, 456, 360, 696, 568, 208, 136, 88, 376, 480, 456, 616, 472, 232, 224, 264, 320, 512, 624, 632, 520, 176, 80, 192, 304, 400, 592, 664, 552, 248, 152, 144, 336, 440, 520, 616, 664, 304, 176, 80, 536, 448, 376, 680, 600, 240, 168, 112, 408, 488, 472, 608, 480, 240, 232, 208, 288, 480, 600, 616, 520, 176, 88, 184, 296, 392, 584, 656, 552, 248, 160, 144, 336, 432, 512, 608, 664, ] src = blocks.vector_source_s(data) enc = vocoder.gsm_fr_encode_sp() dec = vocoder.gsm_fr_decode_ps() snk = blocks.vector_sink_s() self.tb.connect(src, enc, dec, snk) self.tb.run() actual_result = snk.data() self.assertEqual(expected_data, actual_result) if __name__ == "__main__": gr_unittest.run(test_gsm_vocoder)
util
textencoding
# -*- coding: utf-8 -*- # # Picard, the next-generation MusicBrainz tagger # # Copyright (C) 2004 Robert Kaye # Copyright (C) 2006 Lukáš Lalinský # Copyright (C) 2014 Sophist-UK # Copyright (C) 2014, 2018, 2020-2021 Laurent Monin # Copyright (C) 2017 Sambhav Kothari # Copyright (C) 2018-2019, 2021 Philipp Wolfer # Copyright (C) 2020 Gabriel Ferreira # Copyright (C) 2020 Undearius # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # This modules provides functionality for simplifying unicode strings. # The unicode character set (of over 1m codepoints and 24,000 characters) includes: # Normal ascii (latin) non-accented characters # Combined latin characters e.g. ae in normal usage # Compatibility combined latin characters (retained for compatibility with other character sets) # These can look very similar to normal characters and can be confusing for searches, sort orders etc. # Non-latin (e.g. japanese, greek, hebrew etc.) characters # Both latin and non-latin characters can be accented. Accents can be either: # Provided by separate nonspacing_mark characters which are visually overlaid (visually 1 character is actually 2); or # Integrated accented characters (i.e. non-accented characters combined with a nonspace_mark into a single character) # Again these can be confusing for searches, sort orders etc. # Punctuation can also be confusing in unicode e.g. several types of single or double quote mark. # For latin script: # Combined characters, accents and punctuation can be visually similar but look different to search engines, # sort orders etc. and the number of ways to use similar looking characters can (does) result in inconsistent # usage inside Music metadata. # # Simplifying # the unicode character sets by many-to-one mappings can improve consistency and reduce confusion, # however sometimes the choice of specific characters can be a deliberate part of an album, song title or artist name # (and should not therefore be changed without careful thought) and occasionally the choice of characters can be # malicious (i.e. to defeat firewalls or spam filters or to appear to be something else). # # Finally, given the size of the unicode character set, fonts are unlikely to display all characters, # making simplification a necessity. # # Simplification may also be needed to make tags conform to ISO-8859-1 (extended ascii) or to make tags or filenames # into ascii, perhaps because the file system or player cannot support unicode. # # Non-latin scripts may also need to be converted to latin scripts through: # Translation (e.g. hebrew word for mother is translated to "mother"); or # Transliteration (e.g. the SOUND of the hebrew letter or word is spelt out in latin) # These are non-trivial, and the software to do these is far from comprehensive. # This module provides utility functions to enable simplification of latin and punctuation unicode: # 1. simplify compatibility characters; # 2. split combined characters; # 3. remove accents (entirely or if not in ISO-8859-1 as applicable); # 4. replace remaining non-ascii or non-ISO-8859-1 characters with a default character # This module also provides an extension infrastructure to allow translation and / or transliteration plugins to be added. import codecs import unicodedata from functools import partial from picard.util import sanitize_filename # LATIN SIMPLIFICATION # The translation tables for punctuation and latin combined-characters are taken from # http://unicode.org/repos/cldr/trunk/common/transforms/Latin-ASCII.xml # Various bugs and mistakes in this have been ironed out during testing. _additional_compatibility = { "\u0276": "Œ", # LATIN LETTER SMALL CAPITAL OE "\u1D00": "A", # LATIN LETTER SMALL CAPITAL A "\u1D01": "Æ", # LATIN LETTER SMALL CAPITAL AE "\u1D04": "C", # LATIN LETTER SMALL CAPITAL C "\u1D05": "D", # LATIN LETTER SMALL CAPITAL D "\u1D07": "E", # LATIN LETTER SMALL CAPITAL E "\u1D0A": "J", # LATIN LETTER SMALL CAPITAL J "\u1D0B": "K", # LATIN LETTER SMALL CAPITAL K "\u1D0D": "M", # LATIN LETTER SMALL CAPITAL M "\u1D0F": "O", # LATIN LETTER SMALL CAPITAL O "\u1D18": "P", # LATIN LETTER SMALL CAPITAL P "\u1D1B": "T", # LATIN LETTER SMALL CAPITAL T "\u1D1C": "U", # LATIN LETTER SMALL CAPITAL U "\u1D20": "V", # LATIN LETTER SMALL CAPITAL V "\u1D21": "W", # LATIN LETTER SMALL CAPITAL W "\u1D22": "Z", # LATIN LETTER SMALL CAPITAL Z "\u3007": "0", # IDEOGRAPHIC NUMBER ZERO "\u00A0": " ", # NO-BREAK SPACE "\u3000": " ", # IDEOGRAPHIC SPACE (from ‹character-fallback›) "\u2033": "”", # DOUBLE PRIME "\uff0f": "/", # FULLWIDTH SOLIDUS } def unicode_simplify_compatibility(string, pathsave=False, win_compat=False): interim = "".join( _replace_char(_additional_compatibility, ch, pathsave, win_compat) for ch in string ) return unicodedata.normalize("NFKC", interim) _simplify_punctuation = { "\u013F": "L", # LATIN CAPITAL LETTER L WITH MIDDLE DOT (compat) "\u0140": "l", # LATIN SMALL LETTER L WITH MIDDLE DOT (compat) "\u2018": "'", # LEFT SINGLE QUOTATION MARK (from ‹character-fallback›) "\u2019": "'", # RIGHT SINGLE QUOTATION MARK (from ‹character-fallback›) "\u201A": "'", # SINGLE LOW-9 QUOTATION MARK (from ‹character-fallback›) "\u201B": "'", # SINGLE HIGH-REVERSED-9 QUOTATION MARK (from ‹character-fallback›) "\u201C": '"', # LEFT DOUBLE QUOTATION MARK (from ‹character-fallback›) "\u201D": '"', # RIGHT DOUBLE QUOTATION MARK (from ‹character-fallback›) "\u201E": '"', # DOUBLE LOW-9 QUOTATION MARK (from ‹character-fallback›) "\u201F": '"', # DOUBLE HIGH-REVERSED-9 QUOTATION MARK (from ‹character-fallback›) "\u2032": "'", # PRIME "\u2033": '"', # DOUBLE PRIME "\u301D": '"', # REVERSED DOUBLE PRIME QUOTATION MARK "\u301E": '"', # DOUBLE PRIME QUOTATION MARK "\u00AB": "<<", # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK (from ‹character-fallback›) "\u00BB": ">>", # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK (from ‹character-fallback›) "\u2039": "<", # SINGLE LEFT-POINTING ANGLE QUOTATION MARK "\u203A": ">", # SINGLE RIGHT-POINTING ANGLE QUOTATION MARK "\u00AD": "", # SOFT HYPHEN (from ‹character-fallback›) "\u2010": "-", # HYPHEN (from ‹character-fallback›) "\u2011": "-", # NON-BREAKING HYPHEN (from ‹character-fallback›) "\u2012": "-", # FIGURE DASH (from ‹character-fallback›) "\u2013": "-", # EN DASH (from ‹character-fallback›) "\u2014": "-", # EM DASH (from ‹character-fallback›) "\u2015": "-", # HORIZONTAL BAR (from ‹character-fallback›) "\uFE31": "|", # PRESENTATION FORM FOR VERTICAL EM DASH (compat) "\uFE32": "|", # PRESENTATION FORM FOR VERTICAL EN DASH (compat) "\uFE58": "-", # SMALL EM DASH (compat) "\u2016": "||", # DOUBLE VERTICAL LINE "\u2044": "/", # FRACTION SLASH (from ‹character-fallback›) "\u2045": "[", # LEFT SQUARE BRACKET WITH QUILL "\u2046": "]", # RIGHT SQUARE BRACKET WITH QUILL "\u204E": "*", # LOW ASTERISK "\u3008": "<", # LEFT ANGLE BRACKET "\u3009": ">", # RIGHT ANGLE BRACKET "\u300A": "<<", # LEFT DOUBLE ANGLE BRACKET "\u300B": ">>", # RIGHT DOUBLE ANGLE BRACKET "\u3014": "[", # LEFT TORTOISE SHELL BRACKET "\u3015": "]", # RIGHT TORTOISE SHELL BRACKET "\u3018": "[", # LEFT WHITE TORTOISE SHELL BRACKET "\u3019": "]", # RIGHT WHITE TORTOISE SHELL BRACKET "\u301A": "[", # LEFT WHITE SQUARE BRACKET "\u301B": "]", # RIGHT WHITE SQUARE BRACKET "\uFE11": ",", # PRESENTATION FORM FOR VERTICAL IDEOGRAPHIC COMMA (compat) "\uFE12": ".", # PRESENTATION FORM FOR VERTICAL IDEOGRAPHIC FULL STOP (compat) "\uFE39": "[", # PRESENTATION FORM FOR VERTICAL LEFT TORTOISE SHELL BRACKET (compat) "\uFE3A": "]", # PRESENTATION FORM FOR VERTICAL RIGHT TORTOISE SHELL BRACKET (compat) "\uFE3D": "<<", # PRESENTATION FORM FOR VERTICAL LEFT DOUBLE ANGLE BRACKET (compat) "\uFE3E": ">>", # PRESENTATION FORM FOR VERTICAL RIGHT DOUBLE ANGLE BRACKET (compat) "\uFE3F": "<", # PRESENTATION FORM FOR VERTICAL LEFT ANGLE BRACKET (compat) "\uFE40": ">", # PRESENTATION FORM FOR VERTICAL RIGHT ANGLE BRACKET (compat) "\uFE51": ",", # SMALL IDEOGRAPHIC COMMA (compat) "\uFE5D": "[", # SMALL LEFT TORTOISE SHELL BRACKET (compat) "\uFE5E": "]", # SMALL RIGHT TORTOISE SHELL BRACKET (compat) "\uFF5F": "((", # FULLWIDTH LEFT WHITE PARENTHESIS (compat)(from ‹character-fallback›) "\uFF60": "))", # FULLWIDTH RIGHT WHITE PARENTHESIS (compat)(from ‹character-fallback›) "\uFF61": ".", # HALFWIDTH IDEOGRAPHIC FULL STOP (compat) "\uFF64": ",", # HALFWIDTH IDEOGRAPHIC COMMA (compat) "\u2212": "-", # MINUS SIGN (from ‹character-fallback›) "\u2215": "/", # DIVISION SLASH (from ‹character-fallback›) "\u2216": "\\", # SET MINUS (from ‹character-fallback›) "\u2223": "|", # DIVIDES (from ‹character-fallback›) "\u2225": "||", # PARALLEL TO (from ‹character-fallback›) "\u226A": "<<", # MUCH LESS-THAN "\u226B": ">>", # MUCH GREATER-THAN "\u2985": "((", # LEFT WHITE PARENTHESIS "\u2986": "))", # RIGHT WHITE PARENTHESIS "\u2022": "-", # BULLET "\u200B": "", # Zero Width Space } def unicode_simplify_punctuation(string, pathsave=False, win_compat=False): return "".join( _replace_char(_simplify_punctuation, ch, pathsave, win_compat) for ch in string ) _simplify_combinations = { "\u00C6": "AE", # LATIN CAPITAL LETTER AE (from ‹character-fallback›) "\u00D0": "D", # LATIN CAPITAL LETTER ETH "\u00D8": "OE", # LATIN CAPITAL LETTER O WITH STROKE (see https://en.wikipedia.org/wiki/%C3%98) "\u00DE": "TH", # LATIN CAPITAL LETTER THORN "\u00DF": "ss", # LATIN SMALL LETTER SHARP S (from ‹character-fallback›) "\u00E6": "ae", # LATIN SMALL LETTER AE (from ‹character-fallback›) "\u00F0": "d", # LATIN SMALL LETTER ETH "\u00F8": "oe", # LATIN SMALL LETTER O WITH STROKE (see https://en.wikipedia.org/wiki/%C3%98) "\u00FE": "th", # LATIN SMALL LETTER THORN "\u0110": "D", # LATIN CAPITAL LETTER D WITH STROKE "\u0111": "d", # LATIN SMALL LETTER D WITH STROKE "\u0126": "H", # LATIN CAPITAL LETTER H WITH STROKE "\u0127": "h", # LATIN CAPITAL LETTER H WITH STROKE "\u0131": "i", # LATIN SMALL LETTER DOTLESS I "\u0138": "q", # LATIN SMALL LETTER KRA (collates with q in DUCET) "\u0141": "L", # LATIN CAPITAL LETTER L WITH STROKE "\u0142": "l", # LATIN SMALL LETTER L WITH STROKE "\u0149": "'n", # LATIN SMALL LETTER N PRECEDED BY APOSTROPHE (from ‹character-fallback›) "\u014A": "N", # LATIN CAPITAL LETTER ENG "\u014B": "n", # LATIN SMALL LETTER ENG "\u0152": "OE", # LATIN CAPITAL LIGATURE OE (from ‹character-fallback›) "\u0153": "oe", # LATIN SMALL LIGATURE OE (from ‹character-fallback›) "\u0166": "T", # LATIN CAPITAL LETTER T WITH STROKE "\u0167": "t", # LATIN SMALL LETTER T WITH STROKE "\u0180": "b", # LATIN SMALL LETTER B WITH STROKE "\u0181": "B", # LATIN CAPITAL LETTER B WITH HOOK "\u0182": "B", # LATIN CAPITAL LETTER B WITH TOPBAR "\u0183": "b", # LATIN SMALL LETTER B WITH TOPBAR "\u0187": "C", # LATIN CAPITAL LETTER C WITH HOOK "\u0188": "c", # LATIN SMALL LETTER C WITH HOOK "\u0189": "D", # LATIN CAPITAL LETTER AFRICAN D "\u018A": "D", # LATIN CAPITAL LETTER D WITH HOOK "\u018B": "D", # LATIN CAPITAL LETTER D WITH TOPBAR "\u018C": "d", # LATIN SMALL LETTER D WITH TOPBAR "\u0190": "E", # LATIN CAPITAL LETTER OPEN E "\u0191": "F", # LATIN CAPITAL LETTER F WITH HOOK "\u0192": "f", # LATIN SMALL LETTER F WITH HOOK "\u0193": "G", # LATIN CAPITAL LETTER G WITH HOOK "\u0195": "hv", # LATIN SMALL LETTER HV "\u0196": "I", # LATIN CAPITAL LETTER IOTA "\u0197": "I", # LATIN CAPITAL LETTER I WITH STROKE "\u0198": "K", # LATIN CAPITAL LETTER K WITH HOOK "\u0199": "k", # LATIN SMALL LETTER K WITH HOOK "\u019A": "l", # LATIN SMALL LETTER L WITH BAR "\u019D": "N", # LATIN CAPITAL LETTER N WITH LEFT HOOK "\u019E": "n", # LATIN SMALL LETTER N WITH LONG RIGHT LEG "\u01A2": "GH", # LATIN CAPITAL LETTER GHA (see http://unicode.org/notes/tn27/) "\u01A3": "gh", # LATIN SMALL LETTER GHA (see http://unicode.org/notes/tn27/) "\u01A4": "P", # LATIN CAPITAL LETTER P WITH HOOK "\u01A5": "p", # LATIN SMALL LETTER P WITH HOOK "\u01AB": "t", # LATIN SMALL LETTER T WITH PALATAL HOOK "\u01AC": "T", # LATIN CAPITAL LETTER T WITH HOOK "\u01AD": "t", # LATIN SMALL LETTER T WITH HOOK "\u01AE": "T", # LATIN CAPITAL LETTER T WITH RETROFLEX HOOK "\u01B2": "V", # LATIN CAPITAL LETTER V WITH HOOK "\u01B3": "Y", # LATIN CAPITAL LETTER Y WITH HOOK "\u01B4": "y", # LATIN SMALL LETTER Y WITH HOOK "\u01B5": "Z", # LATIN CAPITAL LETTER Z WITH STROKE "\u01B6": "z", # LATIN SMALL LETTER Z WITH STROKE "\u01C4": "DZ", # LATIN CAPITAL LETTER DZ WITH CARON (compat) "\u01C5": "Dz", # LATIN CAPITAL LETTER D WITH SMALL LETTER Z WITH CARON (compat) "\u01C6": "dz", # LATIN SMALL LETTER DZ WITH CARON (compat) "\u01E4": "G", # LATIN CAPITAL LETTER G WITH STROKE "\u01E5": "g", # LATIN SMALL LETTER G WITH STROKE "\u0221": "d", # LATIN SMALL LETTER D WITH CURL "\u0224": "Z", # LATIN CAPITAL LETTER Z WITH HOOK "\u0225": "z", # LATIN SMALL LETTER Z WITH HOOK "\u0234": "l", # LATIN SMALL LETTER L WITH CURL "\u0235": "n", # LATIN SMALL LETTER N WITH CURL "\u0236": "t", # LATIN SMALL LETTER T WITH CURL "\u0237": "j", # LATIN SMALL LETTER DOTLESS J "\u0238": "db", # LATIN SMALL LETTER DB DIGRAPH "\u0239": "qp", # LATIN SMALL LETTER QP DIGRAPH "\u023A": "A", # LATIN CAPITAL LETTER A WITH STROKE "\u023B": "C", # LATIN CAPITAL LETTER C WITH STROKE "\u023C": "c", # LATIN SMALL LETTER C WITH STROKE "\u023D": "L", # LATIN CAPITAL LETTER L WITH BAR "\u023E": "T", # LATIN CAPITAL LETTER T WITH DIAGONAL STROKE "\u023F": "s", # LATIN SMALL LETTER S WITH SWASH TAIL "\u0240": "z", # LATIN SMALL LETTER Z WITH SWASH TAIL "\u0243": "B", # LATIN CAPITAL LETTER B WITH STROKE "\u0244": "U", # LATIN CAPITAL LETTER U BAR "\u0246": "E", # LATIN CAPITAL LETTER E WITH STROKE "\u0247": "e", # LATIN SMALL LETTER E WITH STROKE "\u0248": "J", # LATIN CAPITAL LETTER J WITH STROKE "\u0249": "j", # LATIN SMALL LETTER J WITH STROKE "\u024C": "R", # LATIN CAPITAL LETTER R WITH STROKE "\u024D": "r", # LATIN SMALL LETTER R WITH STROKE "\u024E": "Y", # LATIN CAPITAL LETTER Y WITH STROKE "\u024F": "y", # LATIN SMALL LETTER Y WITH STROKE "\u0253": "b", # LATIN SMALL LETTER B WITH HOOK "\u0255": "c", # LATIN SMALL LETTER C WITH CURL "\u0256": "d", # LATIN SMALL LETTER D WITH TAIL "\u0257": "d", # LATIN SMALL LETTER D WITH HOOK "\u025B": "e", # LATIN SMALL LETTER OPEN E "\u025F": "j", # LATIN SMALL LETTER DOTLESS J WITH STROKE "\u0260": "g", # LATIN SMALL LETTER G WITH HOOK "\u0261": "g", # LATIN SMALL LETTER SCRIPT G "\u0262": "G", # LATIN LETTER SMALL CAPITAL G "\u0266": "h", # LATIN SMALL LETTER H WITH HOOK "\u0267": "h", # LATIN SMALL LETTER HENG WITH HOOK "\u0268": "i", # LATIN SMALL LETTER I WITH STROKE "\u026A": "I", # LATIN LETTER SMALL CAPITAL I "\u026B": "l", # LATIN SMALL LETTER L WITH MIDDLE TILDE "\u026C": "l", # LATIN SMALL LETTER L WITH BELT "\u026D": "l", # LATIN SMALL LETTER L WITH RETROFLEX HOOK "\u0271": "m", # LATIN SMALL LETTER M WITH HOOK "\u0272": "n", # LATIN SMALL LETTER N WITH LEFT HOOK "\u0273": "n", # LATIN SMALL LETTER N WITH RETROFLEX HOOK "\u0274": "N", # LATIN LETTER SMALL CAPITAL N "\u0276": "OE", # LATIN LETTER SMALL CAPITAL OE "\u027C": "r", # LATIN SMALL LETTER R WITH LONG LEG "\u027D": "r", # LATIN SMALL LETTER R WITH TAIL "\u027E": "r", # LATIN SMALL LETTER R WITH FISHHOOK "\u0280": "R", # LATIN LETTER SMALL CAPITAL R "\u0282": "s", # LATIN SMALL LETTER S WITH HOOK "\u0288": "t", # LATIN SMALL LETTER T WITH RETROFLEX HOOK "\u0289": "u", # LATIN SMALL LETTER U BAR "\u028B": "v", # LATIN SMALL LETTER V WITH HOOK "\u028F": "Y", # LATIN LETTER SMALL CAPITAL Y "\u0290": "z", # LATIN SMALL LETTER Z WITH RETROFLEX HOOK "\u0291": "z", # LATIN SMALL LETTER Z WITH CURL "\u0299": "B", # LATIN LETTER SMALL CAPITAL B "\u029B": "G", # LATIN LETTER SMALL CAPITAL G WITH HOOK "\u029C": "H", # LATIN LETTER SMALL CAPITAL H "\u029D": "j", # LATIN SMALL LETTER J WITH CROSSED-TAIL "\u029F": "L", # LATIN LETTER SMALL CAPITAL L "\u02A0": "q", # LATIN SMALL LETTER Q WITH HOOK "\u02A3": "dz", # LATIN SMALL LETTER DZ DIGRAPH "\u02A5": "dz", # LATIN SMALL LETTER DZ DIGRAPH WITH CURL "\u02A6": "ts", # LATIN SMALL LETTER TS DIGRAPH "\u02AA": "ls", # LATIN SMALL LETTER LS DIGRAPH "\u02AB": "lz", # LATIN SMALL LETTER LZ DIGRAPH "\u1D01": "AE", # LATIN LETTER SMALL CAPITAL AE "\u1D03": "B", # LATIN LETTER SMALL CAPITAL BARRED B "\u1D06": "D", # LATIN LETTER SMALL CAPITAL ETH "\u1D0C": "L", # LATIN LETTER SMALL CAPITAL L WITH STROKE "\u1D6B": "ue", # LATIN SMALL LETTER UE "\u1D6C": "b", # LATIN SMALL LETTER B WITH MIDDLE TILDE "\u1D6D": "d", # LATIN SMALL LETTER D WITH MIDDLE TILDE "\u1D6E": "f", # LATIN SMALL LETTER F WITH MIDDLE TILDE "\u1D6F": "m", # LATIN SMALL LETTER M WITH MIDDLE TILDE "\u1D70": "n", # LATIN SMALL LETTER N WITH MIDDLE TILDE "\u1D71": "p", # LATIN SMALL LETTER P WITH MIDDLE TILDE "\u1D72": "r", # LATIN SMALL LETTER R WITH MIDDLE TILDE "\u1D73": "r", # LATIN SMALL LETTER R WITH FISHHOOK AND MIDDLE TILDE "\u1D74": "s", # LATIN SMALL LETTER S WITH MIDDLE TILDE "\u1D75": "t", # LATIN SMALL LETTER T WITH MIDDLE TILDE "\u1D76": "z", # LATIN SMALL LETTER Z WITH MIDDLE TILDE "\u1D7A": "th", # LATIN SMALL LETTER TH WITH STRIKETHROUGH "\u1D7B": "I", # LATIN SMALL CAPITAL LETTER I WITH STROKE "\u1D7D": "p", # LATIN SMALL LETTER P WITH STROKE "\u1D7E": "U", # LATIN SMALL CAPITAL LETTER U WITH STROKE "\u1D80": "b", # LATIN SMALL LETTER B WITH PALATAL HOOK "\u1D81": "d", # LATIN SMALL LETTER D WITH PALATAL HOOK "\u1D82": "f", # LATIN SMALL LETTER F WITH PALATAL HOOK "\u1D83": "g", # LATIN SMALL LETTER G WITH PALATAL HOOK "\u1D84": "k", # LATIN SMALL LETTER K WITH PALATAL HOOK "\u1D85": "l", # LATIN SMALL LETTER L WITH PALATAL HOOK "\u1D86": "m", # LATIN SMALL LETTER M WITH PALATAL HOOK "\u1D87": "n", # LATIN SMALL LETTER N WITH PALATAL HOOK "\u1D88": "p", # LATIN SMALL LETTER P WITH PALATAL HOOK "\u1D89": "r", # LATIN SMALL LETTER R WITH PALATAL HOOK "\u1D8A": "s", # LATIN SMALL LETTER S WITH PALATAL HOOK "\u1D8C": "v", # LATIN SMALL LETTER V WITH PALATAL HOOK "\u1D8D": "x", # LATIN SMALL LETTER X WITH PALATAL HOOK "\u1D8E": "z", # LATIN SMALL LETTER Z WITH PALATAL HOOK "\u1D8F": "a", # LATIN SMALL LETTER A WITH RETROFLEX HOOK "\u1D91": "d", # LATIN SMALL LETTER D WITH HOOK AND TAIL "\u1D92": "e", # LATIN SMALL LETTER E WITH RETROFLEX HOOK "\u1D93": "e", # LATIN SMALL LETTER OPEN E WITH RETROFLEX HOOK "\u1D96": "i", # LATIN SMALL LETTER I WITH RETROFLEX HOOK "\u1D99": "u", # LATIN SMALL LETTER U WITH RETROFLEX HOOK "\u1E9A": "a", # LATIN SMALL LETTER A WITH RIGHT HALF RING "\u1E9C": "s", # LATIN SMALL LETTER LONG S WITH DIAGONAL STROKE "\u1E9D": "s", # LATIN SMALL LETTER LONG S WITH HIGH STROKE "\u1E9E": "SS", # LATIN CAPITAL LETTER SHARP S "\u1EFA": "LL", # LATIN CAPITAL LETTER MIDDLE-WELSH LL "\u1EFB": "ll", # LATIN SMALL LETTER MIDDLE-WELSH LL "\u1EFC": "V", # LATIN CAPITAL LETTER MIDDLE-WELSH V "\u1EFD": "v", # LATIN SMALL LETTER MIDDLE-WELSH V "\u1EFE": "Y", # LATIN CAPITAL LETTER Y WITH LOOP "\u1EFF": "y", # LATIN SMALL LETTER Y WITH LOOP "\u00A9": "(C)", # COPYRIGHT SIGN (from ‹character-fallback›) "\u00AE": "(R)", # REGISTERED SIGN (from ‹character-fallback›) "\u20A0": "CE", # EURO-CURRENCY SIGN (from ‹character-fallback›) "\u20A2": "Cr", # CRUZEIRO SIGN (from ‹character-fallback›) "\u20A3": "Fr.", # FRENCH FRANC SIGN (from ‹character-fallback›) "\u20A4": "L.", # LIRA SIGN (from ‹character-fallback›) "\u20A7": "Pts", # PESETA SIGN (from ‹character-fallback›) "\u20BA": "TL", # TURKISH LIRA SIGN (from ‹character-fallback›) "\u20B9": "Rs", # INDIAN RUPEE SIGN (from ‹character-fallback›) "\u211E": "Rx", # PRESCRIPTION TAKE (from ‹character-fallback›) "\u33A7": "m/s", # SQUARE M OVER S (compat) (from ‹character-fallback›) "\u33AE": "rad/s", # SQUARE RAD OVER S (compat) (from ‹character-fallback›) "\u33C6": "C/kg", # SQUARE C OVER KG (compat) (from ‹character-fallback›) "\u33DE": "V/m", # SQUARE V OVER M (compat) (from ‹character-fallback›) "\u33DF": "A/m", # SQUARE A OVER M (compat) (from ‹character-fallback›) "\u00BC": " 1/4", # VULGAR FRACTION ONE QUARTER (from ‹character-fallback›) "\u00BD": " 1/2", # VULGAR FRACTION ONE HALF (from ‹character-fallback›) "\u00BE": " 3/4", # VULGAR FRACTION THREE QUARTERS (from ‹character-fallback›) "\u2153": " 1/3", # VULGAR FRACTION ONE THIRD (from ‹character-fallback›) "\u2154": " 2/3", # VULGAR FRACTION TWO THIRDS (from ‹character-fallback›) "\u2155": " 1/5", # VULGAR FRACTION ONE FIFTH (from ‹character-fallback›) "\u2156": " 2/5", # VULGAR FRACTION TWO FIFTHS (from ‹character-fallback›) "\u2157": " 3/5", # VULGAR FRACTION THREE FIFTHS (from ‹character-fallback›) "\u2158": " 4/5", # VULGAR FRACTION FOUR FIFTHS (from ‹character-fallback›) "\u2159": " 1/6", # VULGAR FRACTION ONE SIXTH (from ‹character-fallback›) "\u215A": " 5/6", # VULGAR FRACTION FIVE SIXTHS (from ‹character-fallback›) "\u215B": " 1/8", # VULGAR FRACTION ONE EIGHTH (from ‹character-fallback›) "\u215C": " 3/8", # VULGAR FRACTION THREE EIGHTHS (from ‹character-fallback›) "\u215D": " 5/8", # VULGAR FRACTION FIVE EIGHTHS (from ‹character-fallback›) "\u215E": " 7/8", # VULGAR FRACTION SEVEN EIGHTHS (from ‹character-fallback›) "\u215F": " 1/", # FRACTION NUMERATOR ONE (from ‹character-fallback›) "\u3001": ",", # IDEOGRAPHIC COMMA "\u3002": ".", # IDEOGRAPHIC FULL STOP "\u00D7": "x", # MULTIPLICATION SIGN "\u00F7": "/", # DIVISION SIGN "\u00B7": ".", # MIDDLE DOT "\u1E9F": "dd", # LATIN SMALL LETTER DELTA "\u0184": "H", # LATIN CAPITAL LETTER TONE SIX "\u0185": "h", # LATIN SMALL LETTER TONE SIX "\u01BE": "ts", # LATIN LETTER TS LIGATION (see http://unicode.org/notes/tn27/) } def _replace_unicode_simplify_combinations(char, pathsave, win_compat): result = _simplify_combinations.get(char) if result is None: return char elif not pathsave: return result else: return sanitize_filename(result, win_compat=win_compat) def unicode_simplify_combinations(string, pathsave=False, win_compat=False): return "".join( _replace_unicode_simplify_combinations(c, pathsave, win_compat) for c in string ) def unicode_simplify_accents(string): result = "".join( c for c in unicodedata.normalize("NFKD", string) if not unicodedata.combining(c) ) return result def asciipunct(string): interim = unicode_simplify_compatibility(string) return unicode_simplify_punctuation(interim) def unaccent(string): """Remove accents ``string``.""" return unicode_simplify_accents(string) def replace_non_ascii(string, repl="_", pathsave=False, win_compat=False): """Replace non-ASCII characters from ``string`` by ``repl``.""" interim = unicode_simplify_combinations(string, pathsave, win_compat) interim = unicode_simplify_punctuation(interim, pathsave, win_compat) interim = unicode_simplify_compatibility(interim, pathsave, win_compat) interim = unicode_simplify_accents(interim) def error_repl(e, repl="_"): return (repl, e.start + 1) codecs.register_error("repl", partial(error_repl, repl=repl)) # Decoding and encoding to allow replacements return interim.encode("ascii", "repl").decode("ascii") def _replace_char(map, ch, pathsave=False, win_compat=False): try: result = map[ch] if ch != result and pathsave: result = sanitize_filename(result, win_compat=win_compat) return result except KeyError: return ch
dialogs
addtopersonalchanneldialog
import json from PyQt5 import QtWidgets, uic from PyQt5.QtCore import pyqtSignal from tribler.core.components.metadata_store.db.serialization import ( CHANNEL_TORRENT, COLLECTION_NODE, ) from tribler.gui.dialogs.dialogcontainer import DialogContainer from tribler.gui.dialogs.new_channel_dialog import NewChannelDialog from tribler.gui.network.request_manager import request_manager from tribler.gui.utilities import connect, get_ui_file_path class ChannelQTreeWidgetItem(QtWidgets.QTreeWidgetItem): def __init__(self, *args, **kwargs): self.id_ = kwargs.pop("id_") if "id_" in kwargs else 0 QtWidgets.QTreeWidgetItem.__init__(self, *args, **kwargs) class AddToChannelDialog(DialogContainer): create_torrent_notification = pyqtSignal(dict) def __init__(self, parent): DialogContainer.__init__(self, parent) uic.loadUi(get_ui_file_path("addtochanneldialog.ui"), self.dialog_widget) connect(self.dialog_widget.btn_cancel.clicked, self.close_dialog) connect(self.dialog_widget.btn_confirm.clicked, self.on_confirm_clicked) connect( self.dialog_widget.btn_new_channel.clicked, self.on_create_new_channel_clicked, ) connect( self.dialog_widget.btn_new_folder.clicked, self.on_create_new_folder_clicked ) self.confirm_clicked_callback = None self.root_requests_list = [] self.channels_tree = {} self.id2wt_mapping = {0: self.dialog_widget.channels_tree_wt} connect(self.dialog_widget.channels_tree_wt.itemExpanded, self.on_item_expanded) self.dialog_widget.channels_tree_wt.setHeaderLabels(["Name"]) self.on_main_window_resize() def on_new_channel_response(self, response): if not response or not response.get("results", None): return self.window().channels_menu_list.reload_if_necessary(response["results"]) self.load_channel(response["results"][0]["origin_id"]) def on_create_new_channel_clicked(self, checked): def create_channel_callback(channel_name=None): request_manager.post( "channels/mychannel/0/channels", self.on_new_channel_response, data=json.dumps({"name": channel_name}) if channel_name else None, ) NewChannelDialog(self, create_channel_callback) def on_create_new_folder_clicked(self, checked): selected = self.dialog_widget.channels_tree_wt.selectedItems() if not selected: return channel_id = selected[0].id_ postfix = "channels" if not channel_id else "collections" endpoint = f"channels/mychannel/{channel_id}/{postfix}" def create_channel_callback(channel_name=None): request_manager.post( endpoint, self.on_new_channel_response, data=json.dumps({"name": channel_name}) if channel_name else None, ) NewChannelDialog(self, create_channel_callback) def clear_channels_tree(self): # ACHTUNG! All running requests must always be cancelled first to prevent race condition! for rq in self.root_requests_list: rq.cancel() self.dialog_widget.channels_tree_wt.clear() self.id2wt_mapping = {0: self.dialog_widget.channels_tree_wt} self.load_channel(0) def show_dialog(self, on_confirm, confirm_button_text="CONFIRM_BUTTON"): self.dialog_widget.btn_confirm.setText(confirm_button_text) self.show() self.confirm_clicked_callback = on_confirm def on_item_expanded(self, item): # Load the grand-children for channel_id in self.channels_tree.get(item.id_, None): # "None" means that the node was previously loaded and has no children # Empty set means it is still not known if it has children or not # Non-empty set means it was already loaded before subchannels_set = self.channels_tree.get(channel_id, set()) if subchannels_set is None or subchannels_set: continue self.load_channel(channel_id) def load_channel(self, channel_id): request = request_manager.get( f"channels/mychannel/{channel_id}", on_success=lambda result: self.on_channel_contents(result, channel_id), url_params={ "metadata_type": [CHANNEL_TORRENT, COLLECTION_NODE], "first": 1, "last": 1000, "exclude_deleted": True, }, ) if request: self.root_requests_list.append(request) def get_selected_channel_id(self): selected = self.dialog_widget.channels_tree_wt.selectedItems() return None if not selected else selected[0].id_ def on_confirm_clicked(self, checked): channel_id = self.get_selected_channel_id() if channel_id is None: return if self.confirm_clicked_callback: self.confirm_clicked_callback(channel_id) self.close_dialog() def on_channel_contents(self, response, channel_id): if not response: return # No results means this node is a leaf self.channels_tree[channel_id] = set() if response.get("results") else None for subchannel in response.get("results", []): subchannel_id = subchannel["id"] if subchannel_id in self.id2wt_mapping: continue wt = ChannelQTreeWidgetItem( self.id2wt_mapping[channel_id], [subchannel["name"]], id_=subchannel_id ) self.id2wt_mapping[subchannel_id] = wt # Add the received node to the tree self.channels_tree[channel_id].add(subchannel_id) # For top-level channels, we want to immediately load their children so "expand" arrows are shown if channel_id == 0: self.load_channel(subchannel_id) def close_dialog(self, checked=False): # Instead of deleting the dialog, hide it. We do this for two reasons: # a. we do not want to lose the channels tree structure loaded from the core. # b. we want the tree state (open subtrees, selection) to stay the same, as the user is # likely to put stuff into the same channel they did before. self.hide()
mobi
mobiml
''' Transform XHTML/OPS-ish content into Mobipocket HTML 3.2. ''' from __future__ import with_statement __license__ = 'GPL v3' __copyright__ = '2008, Marshall T. Vandegrift <llasram@gmail.cam>' import copy import re from calibre.ebooks.oeb.base import (XHTML, XHTML_NS, barename, namespace, urlnormalize) from calibre.ebooks.oeb.stylizer import Stylizer from calibre.ebooks.oeb.transforms.flatcss import KeyMapper from calibre.utils.img import identify_data from lxml import etree MBP_NS = 'http://mobipocket.com/ns/mbp' def MBP(name): return '{%s}%s' % (MBP_NS, name) MOBI_NSMAP = {None: XHTML_NS, 'mbp': MBP_NS} INLINE_TAGS = {'span', 'a', 'code', 'u', 's', 'big', 'strike', 'tt', 'font', 'q', 'i', 'b', 'em', 'strong', 'sup', 'sub'} HEADER_TAGS = set(['h1', 'h2', 'h3', 'h4', 'h5', 'h6']) # GR: Added 'caption' to both sets NESTABLE_TAGS = set(['ol', 'ul', 'li', 'table', 'tr', 'td', 'th', 'caption']) TABLE_TAGS = set(['table', 'tr', 'td', 'th', 'caption']) SPECIAL_TAGS = set(['hr', 'br']) CONTENT_TAGS = set(['img', 'hr', 'br']) NOT_VTAGS = HEADER_TAGS | NESTABLE_TAGS | TABLE_TAGS | SPECIAL_TAGS | \ CONTENT_TAGS LEAF_TAGS = set(['base', 'basefont', 'frame', 'link', 'meta', 'area', 'br', 'col', 'hr', 'img', 'input', 'param']) PAGE_BREAKS = set(['always', 'left', 'right']) COLLAPSE = re.compile(r'[ \t\r\n\v]+') def asfloat(value): if not isinstance(value, (int, long, float)): return 0.0 return float(value) def isspace(text): if not text: return True if u'\xa0' in text: return False return text.isspace() class BlockState(object): def __init__(self, body): self.body = body self.nested = [] self.para = None self.inline = None self.anchor = None self.vpadding = 0. self.vmargin = 0. self.pbreak = False self.istate = None self.content = False class FormatState(object): def __init__(self): self.rendered = False self.left = 0. self.halign = 'auto' self.indent = 0. self.fsize = 3 self.ids = set() self.italic = False self.bold = False self.strikethrough = False self.underline = False self.preserve = False self.family = 'serif' self.bgcolor = 'transparent' self.fgcolor = 'black' self.href = None self.list_num = 0 self.attrib = {} def __eq__(self, other): return self.fsize == other.fsize \ and self.italic == other.italic \ and self.bold == other.bold \ and self.href == other.href \ and self.preserve == other.preserve \ and self.family == other.family \ and self.bgcolor == other.bgcolor \ and self.fgcolor == other.fgcolor \ and self.strikethrough == other.strikethrough \ and self.underline == other.underline def __ne__(self, other): return not self.__eq__(other) class MobiMLizer(object): def __init__(self, ignore_tables=False): self.ignore_tables = ignore_tables def __call__(self, oeb, context): oeb.logger.info('Converting XHTML to Mobipocket markup...') self.oeb = oeb self.log = self.oeb.logger self.opts = context self.profile = profile = context.dest self.fnums = fnums = dict((v, k) for k, v in profile.fnums.items()) self.fmap = KeyMapper(profile.fbase, profile.fbase, fnums.keys()) self.mobimlize_spine() def mobimlize_spine(self): 'Iterate over the spine and convert it to MOBIML' for item in self.oeb.spine: stylizer = Stylizer(item.data, item.href, self.oeb, self.opts, self.profile) body = item.data.find(XHTML('body')) nroot = etree.Element(XHTML('html'), nsmap=MOBI_NSMAP) nbody = etree.SubElement(nroot, XHTML('body')) self.current_spine_item = item self.mobimlize_elem(body, stylizer, BlockState(nbody), [FormatState()]) item.data = nroot #print etree.tostring(nroot) def mobimlize_font(self, ptsize): return self.fnums[self.fmap[ptsize]] def mobimlize_measure(self, ptsize): if isinstance(ptsize, basestring): return ptsize embase = self.profile.fbase if round(ptsize) < embase: return "%dpt" % int(round(ptsize)) return "%dem" % int(round(ptsize / embase)) def preize_text(self, text): text = unicode(text).replace(u' ', u'\xa0') text = text.replace('\r\n', '\n') text = text.replace('\r', '\n') lines = text.split('\n') result = lines[:1] for line in lines[1:]: result.append(etree.Element(XHTML('br'))) if line: result.append(line) return result def mobimlize_content(self, tag, text, bstate, istates): 'Convert text content' if text or tag != 'br': bstate.content = True istate = istates[-1] para = bstate.para if tag in SPECIAL_TAGS and not text: para = para if para is not None else bstate.body elif para is None or tag in ('td', 'th'): body = bstate.body if bstate.pbreak: etree.SubElement(body, MBP('pagebreak')) bstate.pbreak = False bstate.istate = None bstate.anchor = None parent = bstate.nested[-1] if bstate.nested else bstate.body indent = istate.indent left = istate.left if isinstance(indent, basestring): indent = 0 if indent < 0 and abs(indent) < left: left += indent indent = 0 elif indent != 0 and abs(indent) < self.profile.fbase: indent = (indent / abs(indent)) * self.profile.fbase if tag in NESTABLE_TAGS and not istate.rendered: para = wrapper = etree.SubElement( parent, XHTML(tag), attrib=istate.attrib) bstate.nested.append(para) if tag == 'li' and len(istates) > 1: istates[-2].list_num += 1 para.attrib['value'] = str(istates[-2].list_num) elif tag in NESTABLE_TAGS and istate.rendered: para = wrapper = bstate.nested[-1] elif not self.opts.mobi_ignore_margins and left > 0 and indent >= 0: ems = self.profile.mobi_ems_per_blockquote para = wrapper = etree.SubElement(parent, XHTML('blockquote')) para = wrapper emleft = int(round(left / self.profile.fbase)) - ems emleft = min((emleft, 10)) while emleft > ems/2.0: para = etree.SubElement(para, XHTML('blockquote')) emleft -= ems else: para = wrapper = etree.SubElement(parent, XHTML('p')) bstate.inline = bstate.para = para vspace = bstate.vpadding + bstate.vmargin bstate.vpadding = bstate.vmargin = 0 if tag not in TABLE_TAGS: if tag in ('ul', 'ol') and vspace > 0: wrapper.addprevious(etree.Element(XHTML('div'), height=self.mobimlize_measure(vspace))) else: wrapper.attrib['height'] = self.mobimlize_measure(vspace) para.attrib['width'] = self.mobimlize_measure(indent) elif tag == 'table' and vspace > 0: vspace = int(round(vspace / self.profile.fbase)) while vspace > 0: wrapper.addprevious(etree.Element(XHTML('br'))) vspace -= 1 if istate.halign != 'auto' and isinstance(istate.halign, (str, unicode)): para.attrib['align'] = istate.halign istate.rendered = True pstate = bstate.istate if tag in CONTENT_TAGS: bstate.inline = para pstate = bstate.istate = None try: etree.SubElement(para, XHTML(tag), attrib=istate.attrib) except: print 'Invalid subelement:', para, tag, istate.attrib raise elif tag in TABLE_TAGS: para.attrib['valign'] = 'top' if istate.ids: for id_ in istate.ids: anchor = etree.Element(XHTML('a'), attrib={'id': id_}) if tag == 'li': try: last = bstate.body[-1][-1] except: break last.insert(0, anchor) anchor.tail = last.text last.text = None else: last = bstate.body[-1] # We use append instead of addprevious so that inline # anchors in large blocks point to the correct place. See # https://bugs.launchpad.net/calibre/+bug/899831 # This could potentially break if inserting an anchor at # this point in the markup is illegal, but I cannot think # of such a case offhand. if barename(last.tag) in LEAF_TAGS: last.addprevious(anchor) else: last.append(anchor) istate.ids.clear() if not text: return if not pstate or istate != pstate: inline = para fsize = istate.fsize href = istate.href if not href: bstate.anchor = None elif pstate and pstate.href == href: inline = bstate.anchor else: inline = etree.SubElement(inline, XHTML('a'), href=href) bstate.anchor = inline if fsize != 3: inline = etree.SubElement(inline, XHTML('font'), size=str(fsize)) if istate.family == 'monospace': inline = etree.SubElement(inline, XHTML('tt')) if istate.italic: inline = etree.SubElement(inline, XHTML('i')) if istate.bold: inline = etree.SubElement(inline, XHTML('b')) if istate.bgcolor is not None and istate.bgcolor != 'transparent' : inline = etree.SubElement(inline, XHTML('span'), bgcolor=istate.bgcolor) if istate.fgcolor != 'black': inline = etree.SubElement(inline, XHTML('font'), color=unicode(istate.fgcolor)) if istate.strikethrough: inline = etree.SubElement(inline, XHTML('s')) if istate.underline: inline = etree.SubElement(inline, XHTML('u')) bstate.inline = inline bstate.istate = istate inline = bstate.inline content = self.preize_text(text) if istate.preserve else [text] for item in content: if isinstance(item, basestring): if len(inline) == 0: inline.text = (inline.text or '') + item else: last = inline[-1] last.tail = (last.tail or '') + item else: inline.append(item) def mobimlize_elem(self, elem, stylizer, bstate, istates, ignore_valign=False): if not isinstance(elem.tag, basestring) \ or namespace(elem.tag) != XHTML_NS: return style = stylizer.style(elem) # <mbp:frame-set/> does not exist lalalala if style['display'] in ('none', 'oeb-page-head', 'oeb-page-foot') \ or style['visibility'] == 'hidden': id_ = elem.get('id', None) if id_: # Keep anchors so people can use display:none # to generate hidden TOCs tail = elem.tail elem.clear() elem.text = None elem.set('id', id_) elem.tail = tail elem.tag = XHTML('a') else: return tag = barename(elem.tag) istate = copy.copy(istates[-1]) istate.rendered = False istate.list_num = 0 if tag == 'ol' and 'start' in elem.attrib: try: istate.list_num = int(elem.attrib['start'])-1 except: pass istates.append(istate) left = 0 display = style['display'] if display == 'table-cell': display = 'inline' elif display.startswith('table'): display = 'block' isblock = (not display.startswith('inline') and style['display'] != 'none') isblock = isblock and style['float'] == 'none' isblock = isblock and tag != 'br' if isblock: bstate.para = None istate.halign = style['text-align'] istate.indent = style['text-indent'] if style['margin-left'] == 'auto' \ and style['margin-right'] == 'auto': istate.halign = 'center' margin = asfloat(style['margin-left']) padding = asfloat(style['padding-left']) if tag != 'body': left = margin + padding istate.left += left vmargin = asfloat(style['margin-top']) bstate.vmargin = max((bstate.vmargin, vmargin)) vpadding = asfloat(style['padding-top']) if vpadding > 0: bstate.vpadding += bstate.vmargin bstate.vmargin = 0 bstate.vpadding += vpadding elif not istate.href: margin = asfloat(style['margin-left']) padding = asfloat(style['padding-left']) lspace = margin + padding if lspace > 0: spaces = int(round((lspace * 3) / style['font-size'])) elem.text = (u'\xa0' * spaces) + (elem.text or '') margin = asfloat(style['margin-right']) padding = asfloat(style['padding-right']) rspace = margin + padding if rspace > 0: spaces = int(round((rspace * 3) / style['font-size'])) if len(elem) == 0: elem.text = (elem.text or '') + (u'\xa0' * spaces) else: last = elem[-1] last.text = (last.text or '') + (u'\xa0' * spaces) if bstate.content and style['page-break-before'] in PAGE_BREAKS: bstate.pbreak = True istate.fsize = self.mobimlize_font(style['font-size']) istate.italic = True if style['font-style'] == 'italic' else False weight = style['font-weight'] istate.bold = weight in ('bold', 'bolder') or asfloat(weight) > 400 istate.preserve = (style['white-space'] in ('pre', 'pre-wrap')) istate.bgcolor = style['background-color'] istate.fgcolor = style['color'] istate.strikethrough = style.effective_text_decoration == 'line-through' istate.underline = style.effective_text_decoration == 'underline' ff = style['font-family'].lower() if style['font-family'] else '' if 'monospace' in ff or 'courier' in ff or ff.endswith(' mono'): istate.family = 'monospace' elif ('sans-serif' in ff or 'sansserif' in ff or 'verdana' in ff or 'arial' in ff or 'helvetica' in ff): istate.family = 'sans-serif' else: istate.family = 'serif' if 'id' in elem.attrib: istate.ids.add(elem.attrib['id']) if 'name' in elem.attrib: istate.ids.add(elem.attrib['name']) if tag == 'a' and 'href' in elem.attrib: istate.href = elem.attrib['href'] istate.attrib.clear() if tag == 'img' and 'src' in elem.attrib: istate.attrib['src'] = elem.attrib['src'] istate.attrib['align'] = 'baseline' cssdict = style.cssdict() valign = cssdict.get('vertical-align', None) if valign in ('top', 'bottom', 'middle'): istate.attrib['align'] = valign for prop in ('width', 'height'): if cssdict.get(prop, None) != 'auto': value = style[prop] if value == getattr(self.profile, prop): result = '100%' else: # Amazon's renderer does not support # img sizes in units other than px # See #7520 for test case try: pixs = int(round(float(value) / (72./self.profile.dpi))) except: continue result = str(pixs) istate.attrib[prop] = result if 'width' not in istate.attrib or 'height' not in istate.attrib: href = self.current_spine_item.abshref(elem.attrib['src']) try: item = self.oeb.manifest.hrefs[urlnormalize(href)] except: self.oeb.logger.warn('Failed to find image:%s' % str(href)) else: try: width, height = identify_data(item.data)[:2] except: self.oeb.logger.warn('Invalid image:%s' % str(href)) else: if 'width' not in istate.attrib and 'height' not in \ istate.attrib: istate.attrib['width'] = str(width) istate.attrib['height'] = str(height) else: ar = float(width)/float(height) if 'width' not in istate.attrib: try: width = int(istate.attrib['height'])*ar except: pass istate.attrib['width'] = str(int(width)) else: try: height = int(istate.attrib['width'])/ar except: pass istate.attrib['height'] = str(int(height)) item.unload_data_from_memory() elif tag == 'hr' and asfloat(style['width']) > 0: prop = style['width'] / self.profile.width istate.attrib['width'] = "%d%%" % int(round(prop * 100)) elif display == 'table': tag = 'table' elif display == 'table-row': tag = 'tr' elif display == 'table-cell': tag = 'td' if tag in TABLE_TAGS and self.ignore_tables: tag = 'span' if tag == 'td' else 'div' if tag in ('table', 'td', 'tr'): col = style.backgroundColor if col: elem.set('bgcolor', col) css = style.cssdict() if 'border' in css or 'border-width' in css: elem.set('border', '1') if tag in TABLE_TAGS: for attr in ('rowspan', 'colspan', 'width', 'border', 'scope', 'bgcolor'): if attr in elem.attrib: istate.attrib[attr] = elem.attrib[attr] if tag == 'q': t = elem.text if not t: t = '' elem.text = u'\u201c' + t t = elem.tail if not t: t = '' elem.tail = u'\u201d' + t text = None if elem.text: if istate.preserve: text = elem.text elif (len(elem) > 0 and isspace(elem.text) and hasattr(elem[0].tag, 'rpartition') and elem[0].tag.rpartition('}')[-1] not in INLINE_TAGS): text = None else: text = COLLAPSE.sub(' ', elem.text) valign = style['vertical-align'] not_baseline = valign in ('super', 'sub', 'text-top', 'text-bottom', 'top', 'bottom') or ( isinstance(valign, (float, int)) and abs(valign) != 0) issup = valign in ('super', 'text-top', 'top') or ( isinstance(valign, (float, int)) and valign > 0) vtag = 'sup' if issup else 'sub' if not_baseline and not ignore_valign and tag not in NOT_VTAGS and not isblock: nroot = etree.Element(XHTML('html'), nsmap=MOBI_NSMAP) vbstate = BlockState(etree.SubElement(nroot, XHTML('body'))) vbstate.para = etree.SubElement(vbstate.body, XHTML('p')) self.mobimlize_elem(elem, stylizer, vbstate, istates, ignore_valign=True) if len(istates) > 0: istates.pop() if len(istates) == 0: istates.append(FormatState()) at_start = bstate.para is None if at_start: self.mobimlize_content('span', '', bstate, istates) parent = bstate.para if bstate.inline is None else bstate.inline if parent is not None: vtag = etree.SubElement(parent, XHTML(vtag)) vtag = etree.SubElement(vtag, XHTML('small')) # Add anchors for child in vbstate.body: if child is not vbstate.para: vtag.append(child) else: break if vbstate.para is not None: for child in vbstate.para: vtag.append(child) return if tag == 'blockquote': old_mim = self.opts.mobi_ignore_margins self.opts.mobi_ignore_margins = False if (text or tag in CONTENT_TAGS or tag in NESTABLE_TAGS or ( # We have an id but no text and no children, the id should still # be added. istate.ids and tag in ('a', 'span', 'i', 'b', 'u') and len(elem)==0)): self.mobimlize_content(tag, text, bstate, istates) for child in elem: self.mobimlize_elem(child, stylizer, bstate, istates) tail = None if child.tail: if istate.preserve: tail = child.tail elif bstate.para is None and isspace(child.tail): tail = None else: tail = COLLAPSE.sub(' ', child.tail) if tail: self.mobimlize_content(tag, tail, bstate, istates) if tag == 'blockquote': self.opts.mobi_ignore_margins = old_mim if bstate.content and style['page-break-after'] in PAGE_BREAKS: bstate.pbreak = True if isblock: para = bstate.para if para is not None and para.text == u'\xa0' and len(para) < 1: if style.height > 2: para.getparent().replace(para, etree.Element(XHTML('br'))) else: # This is too small to be rendered effectively, drop it para.getparent().remove(para) bstate.para = None bstate.istate = None vmargin = asfloat(style['margin-bottom']) bstate.vmargin = max((bstate.vmargin, vmargin)) vpadding = asfloat(style['padding-bottom']) if vpadding > 0: bstate.vpadding += bstate.vmargin bstate.vmargin = 0 bstate.vpadding += vpadding if bstate.nested and bstate.nested[-1].tag == elem.tag: bstate.nested.pop() istates.pop()
models
faster_rcnn_inception_resnet_v2_feature_extractor
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Inception Resnet v2 Faster R-CNN implementation. See "Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning" by Szegedy et al. (https://arxiv.org/abs/1602.07261) as well as "Speed/accuracy trade-offs for modern convolutional object detectors" by Huang et al. (https://arxiv.org/abs/1611.10012) """ import tensorflow as tf from app.object_detection.meta_architectures import faster_rcnn_meta_arch from nets import inception_resnet_v2 slim = tf.contrib.slim class FasterRCNNInceptionResnetV2FeatureExtractor( faster_rcnn_meta_arch.FasterRCNNFeatureExtractor ): """Faster R-CNN with Inception Resnet v2 feature extractor implementation.""" def __init__( self, is_training, first_stage_features_stride, batch_norm_trainable=False, reuse_weights=None, weight_decay=0.0, ): """Constructor. Args: is_training: See base class. first_stage_features_stride: See base class. batch_norm_trainable: See base class. reuse_weights: See base class. weight_decay: See base class. Raises: ValueError: If `first_stage_features_stride` is not 8 or 16. """ if first_stage_features_stride != 8 and first_stage_features_stride != 16: raise ValueError("`first_stage_features_stride` must be 8 or 16.") super(FasterRCNNInceptionResnetV2FeatureExtractor, self).__init__( is_training, first_stage_features_stride, batch_norm_trainable, reuse_weights, weight_decay, ) def preprocess(self, resized_inputs): """Faster R-CNN with Inception Resnet v2 preprocessing. Maps pixel values to the range [-1, 1]. Args: resized_inputs: A [batch, height_in, width_in, channels] float32 tensor representing a batch of images with values between 0 and 255.0. Returns: preprocessed_inputs: A [batch, height_out, width_out, channels] float32 tensor representing a batch of images. """ return (2.0 / 255.0) * resized_inputs - 1.0 def _extract_proposal_features(self, preprocessed_inputs, scope): """Extracts first stage RPN features. Extracts features using the first half of the Inception Resnet v2 network. We construct the network in `align_feature_maps=True` mode, which means that all VALID paddings in the network are changed to SAME padding so that the feature maps are aligned. Args: preprocessed_inputs: A [batch, height, width, channels] float32 tensor representing a batch of images. scope: A scope name. Returns: rpn_feature_map: A tensor with shape [batch, height, width, depth] Raises: InvalidArgumentError: If the spatial size of `preprocessed_inputs` (height or width) is less than 33. ValueError: If the created network is missing the required activation. """ if len(preprocessed_inputs.get_shape().as_list()) != 4: raise ValueError( "`preprocessed_inputs` must be 4 dimensional, got a " "tensor of shape %s" % preprocessed_inputs.get_shape() ) with slim.arg_scope( inception_resnet_v2.inception_resnet_v2_arg_scope( weight_decay=self._weight_decay ) ): # Forces is_training to False to disable batch norm update. with slim.arg_scope([slim.batch_norm], is_training=self._train_batch_norm): with tf.variable_scope( "InceptionResnetV2", reuse=self._reuse_weights ) as scope: rpn_feature_map, _ = inception_resnet_v2.inception_resnet_v2_base( preprocessed_inputs, final_endpoint="PreAuxLogits", scope=scope, output_stride=self._first_stage_features_stride, align_feature_maps=True, ) return rpn_feature_map def _extract_box_classifier_features(self, proposal_feature_maps, scope): """Extracts second stage box classifier features. This function reconstructs the "second half" of the Inception ResNet v2 network after the part defined in `_extract_proposal_features`. Args: proposal_feature_maps: A 4-D float tensor with shape [batch_size * self.max_num_proposals, crop_height, crop_width, depth] representing the feature map cropped to each proposal. scope: A scope name. Returns: proposal_classifier_features: A 4-D float tensor with shape [batch_size * self.max_num_proposals, height, width, depth] representing box classifier features for each proposal. """ with tf.variable_scope("InceptionResnetV2", reuse=self._reuse_weights): with slim.arg_scope( inception_resnet_v2.inception_resnet_v2_arg_scope( weight_decay=self._weight_decay ) ): # Forces is_training to False to disable batch norm update. with slim.arg_scope( [slim.batch_norm], is_training=self._train_batch_norm ): with slim.arg_scope( [slim.conv2d, slim.max_pool2d, slim.avg_pool2d], stride=1, padding="SAME", ): with tf.variable_scope("Mixed_7a"): with tf.variable_scope("Branch_0"): tower_conv = slim.conv2d( proposal_feature_maps, 256, 1, scope="Conv2d_0a_1x1" ) tower_conv_1 = slim.conv2d( tower_conv, 384, 3, stride=2, padding="VALID", scope="Conv2d_1a_3x3", ) with tf.variable_scope("Branch_1"): tower_conv1 = slim.conv2d( proposal_feature_maps, 256, 1, scope="Conv2d_0a_1x1" ) tower_conv1_1 = slim.conv2d( tower_conv1, 288, 3, stride=2, padding="VALID", scope="Conv2d_1a_3x3", ) with tf.variable_scope("Branch_2"): tower_conv2 = slim.conv2d( proposal_feature_maps, 256, 1, scope="Conv2d_0a_1x1" ) tower_conv2_1 = slim.conv2d( tower_conv2, 288, 3, scope="Conv2d_0b_3x3" ) tower_conv2_2 = slim.conv2d( tower_conv2_1, 320, 3, stride=2, padding="VALID", scope="Conv2d_1a_3x3", ) with tf.variable_scope("Branch_3"): tower_pool = slim.max_pool2d( proposal_feature_maps, 3, stride=2, padding="VALID", scope="MaxPool_1a_3x3", ) net = tf.concat( [ tower_conv_1, tower_conv1_1, tower_conv2_2, tower_pool, ], 3, ) net = slim.repeat( net, 9, inception_resnet_v2.block8, scale=0.20 ) net = inception_resnet_v2.block8(net, activation_fn=None) proposal_classifier_features = slim.conv2d( net, 1536, 1, scope="Conv2d_7b_1x1" ) return proposal_classifier_features def restore_from_classification_checkpoint_fn( self, first_stage_feature_extractor_scope, second_stage_feature_extractor_scope ): """Returns a map of variables to load from a foreign checkpoint. Note that this overrides the default implementation in faster_rcnn_meta_arch.FasterRCNNFeatureExtractor which does not work for InceptionResnetV2 checkpoints. TODO: revisit whether it's possible to force the `Repeat` namescope as created in `_extract_box_classifier_features` to start counting at 2 (e.g. `Repeat_2`) so that the default restore_fn can be used. Args: first_stage_feature_extractor_scope: A scope name for the first stage feature extractor. second_stage_feature_extractor_scope: A scope name for the second stage feature extractor. Returns: A dict mapping variable names (to load from a checkpoint) to variables in the model graph. """ variables_to_restore = {} for variable in tf.global_variables(): if variable.op.name.startswith(first_stage_feature_extractor_scope): var_name = variable.op.name.replace( first_stage_feature_extractor_scope + "/", "" ) variables_to_restore[var_name] = variable if variable.op.name.startswith(second_stage_feature_extractor_scope): var_name = variable.op.name.replace( second_stage_feature_extractor_scope + "/InceptionResnetV2/Repeat", "InceptionResnetV2/Repeat_2", ) var_name = var_name.replace( second_stage_feature_extractor_scope + "/", "" ) variables_to_restore[var_name] = variable return variables_to_restore
modeling
stylesheet
from __future__ import annotations import textwrap from gaphor.core.modeling.element import Element from gaphor.core.modeling.event import AttributeUpdated from gaphor.core.modeling.properties import attribute from gaphor.core.styling import CompiledStyleSheet, Style, StyleNode SYSTEM_STYLE_SHEET = textwrap.dedent( """\ /* --start-system-style-sheet-- */ * { --opaque-background-color: white; background-color: transparent; color: black; font-size: 14; line-width: 2; padding: 0; } *:drop { color: #1a5fb4; line-width: 3; } *:disabled { opacity: 0.5; } @media light-mode { * { --opaque-background-color: #fafafa; } } @media dark-mode { * { --opaque-background-color: #242424; color: white; } *:drop { color: #62a0ea; } } dependency, interfacerealization { dash-style: 7 5; } dependency[on_folded_interface = true], interfacerealization[on_folded_interface = true] { dash-style: 0; } controlflow { dash-style: 9 3; } proxyport, activityparameternode, executionspecification { background-color: var(--opaque-background-color); } /* --end-system-style-sheet-- */ """ ) DEFAULT_STYLE_SHEET = textwrap.dedent( """\ diagram { /* line-style: sloppy 0.3; */ } """ ) class StyleSheet(Element): _compiled_style_sheet: CompiledStyleSheet def __init__(self, id=None, model=None): super().__init__(id, model) self._system_font_family = "sans" self.compile_style_sheet() styleSheet: attribute[str] = attribute("styleSheet", str, DEFAULT_STYLE_SHEET) naturalLanguage: attribute[str] = attribute("naturalLanguage", str) @property def system_font_family(self) -> str: return self._system_font_family @system_font_family.setter def system_font_family(self, font_family: str): self._system_font_family = font_family self.compile_style_sheet() def compile_style_sheet(self) -> None: self._compiled_style_sheet = CompiledStyleSheet( SYSTEM_STYLE_SHEET, f"* {{ font-family: {self._system_font_family} }}", self.styleSheet, ) def match(self, node: StyleNode) -> Style: return self._compiled_style_sheet.match(node) def postload(self): super().postload() self.compile_style_sheet() def handle(self, event): # Ensure compiled style sheet is always up-to-date: if ( isinstance(event, AttributeUpdated) and event.property is StyleSheet.styleSheet ): self.compile_style_sheet() super().handle(event)
events
synchronize_to_device
# Copyright 2018 Jan Korte # 2020 Daniel Petrescu # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. import os import shutil import unicodedata from pathlib import Path from gi.repository import Gtk, Pango from quodlibet import _, app, config, get_user_dir, ngettext, qltk, util from quodlibet.pattern import FileFromPattern from quodlibet.plugins import PM, PluginConfigMixin from quodlibet.plugins.events import EventPlugin from quodlibet.qltk import Icons from quodlibet.qltk.cbes import ComboBoxEntrySave from quodlibet.qltk.ccb import ConfigCheckButton from quodlibet.qltk.views import HintedTreeView from quodlibet.query import Query from quodlibet.util import print_d, print_e, print_exc from quodlibet.util.enum import enum from quodlibet.util.path import strip_win32_incompat_from_path from senf import fsn2text PLUGIN_CONFIG_SECTION = "synchronize_to_device" class Entry: """ An entry in the tree of previewed export paths. """ @enum class Tags(str): """ Various tags that will be used in the output. """ EMPTY = "" PENDING_COPY = _("Pending copy") PENDING_DELETE = _("Pending delete") DELETE = _("delete") SKIP = _("Skip") SKIP_DUPLICATE = _("DUPLICATE") IN_PROGRESS_SYNC = _("Synchronizing") IN_PROGRESS_DELETE = _("Deleting") RESULT_SUCCESS = _("Success") RESULT_FAILURE = _("FAILURE") RESULT_SKIP_EXISTING = _("Skipped existing file") def __init__(self, song, export_path=None): self._song = song self.export_path = export_path or "" self.tag = self.Tags.EMPTY self._filename = None @property def filename(self): if self._song is not None: return fsn2text(self._song("~filename")) else: return self._filename @filename.setter def filename(self, name): if self._song is None: self._filename = name else: raise ValueError(_("Cannot set the filename of a song.")) class SyncToDevice(EventPlugin, PluginConfigMixin): PLUGIN_ICON = Icons.NETWORK_TRANSMIT PLUGIN_ID = PLUGIN_CONFIG_SECTION PLUGIN_NAME = _("Synchronize to Device") PLUGIN_DESC = _( "Synchronizes all songs from the selected saved searches " "with the specified folder." ) CONFIG_SECTION = PLUGIN_CONFIG_SECTION CONFIG_QUERY_PREFIX = "query_" CONFIG_PATH_KEY = "{}_{}".format(PLUGIN_CONFIG_SECTION, "path") CONFIG_PATTERN_KEY = "{}_{}".format(PLUGIN_CONFIG_SECTION, "pattern") path_query = os.path.join(get_user_dir(), "lists", "queries.saved") path_pattern = os.path.join(get_user_dir(), "lists", "renamepatterns") spacing_main = 20 spacing_large = 6 spacing_small = 3 summary_sep = " " * 2 summary_sep_list = "," + summary_sep default_export_pattern = os.path.join("<artist>", "<album>", "<title>") model_cols = { "entry": (0, object), "tag": (1, str), "filename": (2, str), "export": (3, str), } def PluginPreferences(self, parent): # Check if the queries file exists if not os.path.exists(self.path_query): return self._no_queries_frame() # Read saved searches from file self.queries = {} with open(self.path_query, "r", encoding="utf-8") as query_file: for query_string in query_file: name = next(query_file).strip() self.queries[name] = Query(query_string.strip()) if not self.queries: # query_file is empty return self._no_queries_frame() main_vbox = Gtk.VBox(spacing=self.spacing_main) self.main_vbox = main_vbox # Saved search selection frame saved_search_vbox = Gtk.VBox(spacing=self.spacing_large) self.saved_search_vbox = saved_search_vbox for query_name, query in self.queries.items(): query_config = self.CONFIG_QUERY_PREFIX + query_name check_button = ConfigCheckButton( query_name, PM.CONFIG_SECTION, self._config_key(query_config) ) check_button.set_active(self.config_get_bool(query_config)) saved_search_vbox.pack_start(check_button, False, False, 0) saved_search_scroll = self._expandable_scroll(min_h=0, max_h=300) saved_search_scroll.add(saved_search_vbox) frame = qltk.Frame( label=_("Synchronize the following saved searches:"), child=saved_search_scroll, ) main_vbox.pack_start(frame, False, False, 0) # Destination path entry field destination_entry = Gtk.Entry( placeholder_text=_("The absolute path to your export location"), text=config.get(PM.CONFIG_SECTION, self.CONFIG_PATH_KEY, ""), ) destination_entry.connect("changed", self._destination_path_changed) self.destination_entry = destination_entry # Destination path selection button destination_button = qltk.Button(label="", icon_name=Icons.FOLDER_OPEN) destination_button.connect("clicked", self._select_destination_path) # Destination path hbox destination_path_hbox = Gtk.HBox(spacing=self.spacing_small) destination_path_hbox.pack_start(destination_entry, True, True, 0) destination_path_hbox.pack_start(destination_button, False, False, 0) # Destination path information destination_warn_label = self._label_with_icon( _( "All pre-existing files in the destination folder that aren't in " "the saved searches will be deleted." ), Icons.DIALOG_WARNING, ) destination_info_label = self._label_with_icon( _( "For devices mounted with MTP, export to a local destination " "folder, then transfer it to your device with rsync. " "Or, when syncing many files to an Android Device, use adb-sync, " "which is much faster." ), Icons.DIALOG_INFORMATION, ) # Destination path frame destination_vbox = Gtk.VBox(spacing=self.spacing_large) destination_vbox.pack_start(destination_path_hbox, False, False, 0) destination_vbox.pack_start(destination_warn_label, False, False, 0) destination_vbox.pack_start(destination_info_label, False, False, 0) frame = qltk.Frame(label=_("Destination path:"), child=destination_vbox) main_vbox.pack_start(frame, False, False, 0) # Export pattern frame export_pattern_combo = ComboBoxEntrySave( self.path_pattern, [self.default_export_pattern], title=_("Path Patterns"), edit_title=_("Edit saved patterns…"), ) export_pattern_combo.enable_clear_button() export_pattern_combo.show_all() export_pattern_entry = export_pattern_combo.get_child() export_pattern_entry.set_placeholder_text( _("The structure of the exported filenames, based on their tags") ) export_pattern_entry.set_text( config.get( PM.CONFIG_SECTION, self.CONFIG_PATTERN_KEY, self.default_export_pattern ) ) export_pattern_entry.connect("changed", self._export_pattern_changed) self.export_pattern_entry = export_pattern_entry frame = qltk.Frame(label=_("Export pattern:"), child=export_pattern_combo) main_vbox.pack_start(frame, False, False, 0) # Start preview button preview_start_button = qltk.Button( label=_("Preview"), icon_name=Icons.VIEW_REFRESH ) preview_start_button.set_visible(True) preview_start_button.connect("clicked", self._start_preview) self.preview_start_button = preview_start_button # Stop preview button preview_stop_button = qltk.Button( label=_("Stop preview"), icon_name=Icons.PROCESS_STOP ) preview_stop_button.set_visible(False) preview_stop_button.set_no_show_all(True) preview_stop_button.connect("clicked", self._stop_preview) self.preview_stop_button = preview_stop_button # Details view column_types = [column[1] for column in self.model_cols.values()] self.model = Gtk.ListStore(*column_types) self.details_tree = details_tree = HintedTreeView(model=self.model) details_scroll = self._expandable_scroll() details_scroll.set_shadow_type(Gtk.ShadowType.IN) details_scroll.add(details_tree) self.renders = {} # Preview column: status render = Gtk.CellRendererText() column = self._tree_view_column( render, self._cdf_status, title=_("Status"), expand=False, sort=self._model_col_id("tag"), ) details_tree.append_column(column) # Preview column: file render = Gtk.CellRendererText() column = self._tree_view_column( render, self._cdf_source_path, title=_("Source File"), sort=self._model_col_id("filename"), ) details_tree.append_column(column) # Preview column: export path render = Gtk.CellRendererText() render.set_property("editable", True) render.connect("edited", self._row_edited) column = self._tree_view_column( render, self._cdf_export_path, title=_("Export Path"), sort=self._model_col_id("export"), ) details_tree.append_column(column) # Status labels self.status_operation = Gtk.Label( xalign=0.0, yalign=0.5, wrap=True, visible=False, no_show_all=True ) self.status_progress = Gtk.Label( xalign=0.0, yalign=0.5, wrap=True, visible=False, no_show_all=True ) self.status_duplicates = self._label_with_icon( _( "Duplicate export paths detected! The export paths above can be " "edited before starting the synchronization." ), Icons.DIALOG_WARNING, visible=False, ) self.status_deletions = self._label_with_icon( _( "Existing files in the destination path will be deleted (except " "files named 'cover.jpg')!" ), Icons.DIALOG_WARNING, visible=False, ) # Section for previewing exported files preview_vbox = Gtk.VBox(spacing=self.spacing_large) preview_vbox.pack_start(preview_start_button, False, False, 0) preview_vbox.pack_start(preview_stop_button, False, False, 0) preview_vbox.pack_start(details_scroll, True, True, 0) preview_vbox.pack_start(self.status_operation, False, False, 0) preview_vbox.pack_start(self.status_progress, False, False, 0) preview_vbox.pack_start(self.status_duplicates, False, False, 0) preview_vbox.pack_start(self.status_deletions, False, False, 0) main_vbox.pack_start(preview_vbox, True, True, 0) # Start sync button sync_start_button = qltk.Button( label=_("Start synchronization"), icon_name=Icons.DOCUMENT_SAVE ) sync_start_button.set_sensitive(False) sync_start_button.set_visible(True) sync_start_button.connect("clicked", self._start_sync) self.sync_start_button = sync_start_button # Stop sync button sync_stop_button = qltk.Button( label=_("Stop synchronization"), icon_name=Icons.PROCESS_STOP ) sync_stop_button.set_visible(False) sync_stop_button.set_no_show_all(True) sync_stop_button.connect("clicked", self._stop_sync) self.sync_stop_button = sync_stop_button # Section for the sync buttons sync_vbox = Gtk.VBox(spacing=self.spacing_large) sync_vbox.pack_start(sync_start_button, False, False, 0) sync_vbox.pack_start(sync_stop_button, False, False, 0) main_vbox.pack_start(sync_vbox, False, False, 0) return main_vbox @staticmethod def _no_queries_frame(): """ Create a frame to use when there are no saved searches. :return: A new Frame. """ return qltk.Frame(_("No saved searches yet, create some and come back!")) def _expandable_scroll(self, min_h=50, max_h=-1, expand=True): """ Create a ScrolledWindow that expands as content is added. :param min_h: The minimum height of the window, in pixels. :param max_h: The maximum height of the window, in pixels. It will grow up to this height before it starts scrolling the content. :param expand: Whether the window should expand. :return: A new ScrolledWindow. """ return Gtk.ScrolledWindow( min_content_height=min_h, max_content_height=max_h, propagate_natural_height=expand, ) def _label_with_icon(self, text, icon_name, visible=True): """ Create a new label with an icon to the left of the text. :param text: The new text to set for the label. :param icon_name: An icon name or None. :return: A HBox containing an icon followed by a label. """ image = Gtk.Image.new_from_icon_name(icon_name, Gtk.IconSize.BUTTON) label = Gtk.Label(label=text, xalign=0.0, yalign=0.5, wrap=True) hbox = Gtk.HBox(spacing=self.spacing_large) if not visible: hbox.set_visible(False) hbox.set_no_show_all(True) hbox.pack_start(image, False, False, 0) hbox.pack_start(label, True, True, 0) return hbox def _tree_view_column( self, render, cdf, title=None, sort=None, expand=True, resize=True, reorder=True ): """ Create a new TreeViewColumn with the given properties. :param render: The A Gtk.CellRenderer of this cell. :param cdf: The Gtk.TreeCellDataFunc to use for updating content. :param title: The column's title. :param sort: The model column to use when sorting this column. :param expand: Whether the column width should automatically expand. :param resize: Whether the column can be resized. :param reorder: Whether the column can be reordered. :return: The new TreeViewColumn. """ tvc = Gtk.TreeViewColumn() tvc.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE) tvc.set_expand(expand) tvc.set_resizable(resize) tvc.set_reorderable(reorder) if title: tvc.set_title(title) if resize: render.set_property("ellipsize", Pango.EllipsizeMode.END) if sort: tvc.set_sort_column_id(sort) tvc.set_cell_data_func(render, cdf) tvc.pack_start(render, True) self.renders[tvc] = render return tvc def _destination_path_changed(self, entry): """ Save the destination path to the global config when the path changes. :param entry: The destination path entry field. """ config.set(PM.CONFIG_SECTION, self.CONFIG_PATH_KEY, entry.get_text()) def _select_destination_path(self, button): """ Show a folder selection dialog to select the destination path from the file system. :param button: The destination path selection button. """ dialog = Gtk.FileChooserDialog( title=_("Choose destination path"), action=Gtk.FileChooserAction.SELECT_FOLDER, select_multiple=False, create_folders=True, local_only=False, show_hidden=True, ) dialog.add_buttons( _("_Cancel"), Gtk.ResponseType.CANCEL, _("_Save"), Gtk.ResponseType.OK ) dialog.set_default_response(Gtk.ResponseType.OK) # If there is an existing path in the entry field, # make that path the default destination_entry_text = self.destination_entry.get_text() if destination_entry_text != "": dialog.set_current_folder(destination_entry_text) # Show the dialog and get the selected path response = dialog.run() response_path = dialog.get_filename() # Close the dialog and save the selected path dialog.destroy() if response == Gtk.ResponseType.OK and response_path != destination_entry_text: self.destination_entry.set_text(response_path) def _export_pattern_changed(self, entry): """ Save the export pattern to the global config when the pattern changes. :param entry: The export pattern entry field. """ config.set(PM.CONFIG_SECTION, self.CONFIG_PATTERN_KEY, entry.get_text()) def _cdf_status(self, column, cell, model, iter_, data): """ Handle entering data into the "Status" column of the sync previews. """ cell.set_property("text", model[iter_][self._model_col_id("tag")]) def _cdf_source_path(self, column, cell, model, iter_, data): """ Handle entering data into the "File" column of the sync previews. """ cell.set_property("text", model[iter_][self._model_col_id("filename")]) def _cdf_export_path(self, column, cell, model, iter_, data): """ Handle entering data into the "Export" column of the sync previews. """ cell.set_property("text", model[iter_][self._model_col_id("export")]) def _row_edited(self, renderer, path, entered_path): """ Handle a manual edit of a previewed export path. :param renderer: The object which received the signal. :param path: The path identifying the edited cell. :param entered_path: The new path entered by the user. """ def _update_warnings(): """ Toggle the visibility of the status warning labels based on the song counts. """ if self.c_song_dupes == 0: self.status_duplicates.set_visible(False) else: self.status_duplicates.set_visible(True) if self.c_songs_delete == 0: self.status_deletions.set_visible(False) else: self.status_deletions.set_visible(True) def _make_duplicate(entry, old_unique): """Mark the given entry as a duplicate.""" print_d(entry.filename) entry.tag = Entry.Tags.SKIP_DUPLICATE self.c_song_dupes += 1 if old_unique: self.c_songs_copy -= 1 _update_warnings() def _make_unique(entry, old_duplicate): """Mark the given entry as a unique file.""" print_d(entry.filename) entry.tag = Entry.Tags.PENDING_COPY self.c_songs_copy += 1 if old_duplicate: self.c_song_dupes -= 1 _update_warnings() def _make_skip(entry, counter): """Skip the given entry during synchronization.""" print_d(entry.filename) entry.tag = Entry.Tags.SKIP entry.export_path = "" return counter - 1 def _update_other_song(model, path, iter_, *data): """ Update a previewed path based on the current change. This is a callback function passed to Gtk.TreeModel.foreach() to iterate over the rows in a tree model. :return: True to stop iterating, False to continue. """ model_entry = model[path][self._model_col_id("entry")] if ( model_entry is entry or model_entry.tag == Entry.Tags.DELETE or model_entry.export_path == "" ): pass elif ( model_entry.export_path == entered_path and model_entry.tag == Entry.Tags.PENDING_COPY ): _make_duplicate(model_entry, True) self._update_model_value(iter_, "tag", model_entry.tag) elif ( model_entry.tag == Entry.Tags.SKIP_DUPLICATE and model_entry.export_path != entered_path and self._get_paths()[model_entry.export_path] == 1 ): _make_unique(model_entry, True) self._update_model_value(iter_, "tag", model_entry.tag) return False path = Gtk.TreePath.new_from_string(path) entry = self.model[path][self._model_col_id("entry")] if entry.export_path != entered_path: old_path, new_path = {}, {} old_path["duplicate"] = entry.tag == Entry.Tags.SKIP_DUPLICATE old_path["delete"] = entry.tag == Entry.Tags.PENDING_DELETE old_path["empty"] = not entry.export_path and not old_path["delete"] old_path["unique"] = not ( old_path["duplicate"] or old_path["delete"] or old_path["empty"] ) old_path_inv = {} for key, value in old_path.items(): old_path_inv.setdefault(value, []).append(key) previewed_paths = self._get_paths().keys() new_path["duplicate"] = entered_path in previewed_paths new_path["delete"] = entered_path.lower() == Entry.Tags.DELETE new_path["empty"] = not entered_path and not new_path["delete"] new_path["unique"] = not ( new_path["duplicate"] or new_path["delete"] or new_path["empty"] ) new_path_inv = {} for key, value in new_path.items(): new_path_inv.setdefault(value, []).append(key) print_d( _( "Export path changed from [{old_path}] to [{new_path}] " "for file [{filename}]" ).format( filename=entry.filename, old_path=" ".join(old_path_inv[True]), new_path=" ".join(new_path_inv[True]), ) ) # If the old path was empty... if old_path["empty"] and new_path["empty"]: pass elif old_path["empty"] and new_path["delete"]: try: Path(entry.filename).relative_to(self.expanded_destination) entry.tag = Entry.Tags.PENDING_DELETE self.c_songs_delete += 1 _update_warnings() except ValueError: pass elif old_path["empty"] and new_path["duplicate"]: _make_duplicate(entry, False) entry.export_path = entered_path elif old_path["empty"] and new_path["unique"]: _make_unique(entry, False) entry.export_path = entered_path # If the old path was a deletion... elif old_path["delete"] and new_path["empty"]: pass elif old_path["delete"] and new_path["delete"]: self.c_songs_delete = _make_skip(entry, self.c_songs_delete) _update_warnings() elif old_path["delete"] and new_path["duplicate"]: pass elif old_path["delete"] and new_path["unique"]: pass # If the old path was a duplicate... elif old_path["duplicate"] and new_path["empty"]: self.c_song_dupes = _make_skip(entry, self.c_song_dupes) self.model.foreach(_update_other_song) _update_warnings() elif old_path["duplicate"] and new_path["delete"]: self.c_song_dupes = _make_skip(entry, self.c_song_dupes) self.model.foreach(_update_other_song) _update_warnings() elif old_path["duplicate"] and new_path["duplicate"]: entry.export_path = entered_path elif old_path["duplicate"] and new_path["unique"]: _make_unique(entry, True) entry.export_path = entered_path self.model.foreach(_update_other_song) # If the old path was unique... elif old_path["unique"] and new_path["empty"]: self.c_songs_copy = _make_skip(entry, self.c_songs_copy) self.model.foreach(_update_other_song) _update_warnings() elif old_path["unique"] and new_path["delete"]: self.c_songs_copy = _make_skip(entry, self.c_songs_copy) self.model.foreach(_update_other_song) _update_warnings() elif old_path["unique"] and new_path["duplicate"]: _make_duplicate(entry, True) entry.export_path = entered_path elif old_path["unique"] and new_path["unique"]: entry.export_path = entered_path self.model.foreach(_update_other_song) # Update the model and the summary field self.model.set_row(self.model.get_iter(path), self._make_model_row(entry)) self._update_preview_summary() def _update_model_value(self, iter_, column, value): """ Set the data in a since cell of the ListStore model. :param iter_: A Gtk.TreeIter for the row being modified. :param column: The name of the column to modify. :param value: The new value for the cell. """ self.model.set_value(iter_, self._model_col_id(column), value) def _model_col_id(self, name): """ Get the column ID from the given name. :param name: The column name to search for. :raises: KeyError if a column with the given name does not exist. """ return self.model_cols[name][0] @staticmethod def _make_model_row(entry): """ Create a new row to insert into the ListStore model. :param entry: The Entry to insert. """ return [entry, entry.tag, entry.filename, entry.export_path] @staticmethod def _run_pending_events(): """ Prevent the application from becoming unresponsive. """ while Gtk.events_pending(): Gtk.main_iteration() def _start_preview(self, button): """ Start the generation of export paths for all songs. :param button: The start preview button. """ print_d(_("Starting synchronization preview")) self.running = True # Summary labels self.status_operation.set_label(_("Synchronization preview in progress.")) self.status_operation.set_visible(True) self.status_progress.set_visible(False) self.status_duplicates.set_visible(False) self.status_deletions.set_visible(False) # Change button visibility self.preview_start_button.set_visible(False) self.preview_stop_button.set_visible(True) self.c_songs_copy = self.c_song_dupes = self.c_songs_delete = 0 if self._run_preview() is None: return self._stop_preview() self.sync_start_button.set_sensitive(True) print_d(_("Finished synchronization preview")) def _stop_preview(self, button=None): """ Stop the generation of export paths for all songs. :param button: The stop preview button. """ if button: print_d(_("Stopping synchronization preview")) self.status_operation.set_label(_("Synchronization preview was stopped.")) else: self.status_operation.set_label(_("Synchronization preview has finished.")) self.status_operation.set_visible(True) self.running = False # Change button visibility self.preview_start_button.set_visible(True) self.preview_stop_button.set_visible(False) self._update_preview_summary() def _run_preview(self): """ Show the export paths for all songs to be synchronized. :return: Whether the generation of preview paths was successful. """ destination_path, pattern = self._get_valid_inputs() if None in {destination_path, pattern}: return False self.expanded_destination = os.path.expanduser(destination_path) # Get a list containing all songs to export songs = self._get_songs_from_queries() if not songs: return False self.model.clear() export_paths = [] for song in songs: if not self.running: print_d(_("Stopped synchronization preview")) return None self._run_pending_events() if not self.destination_entry.get_text(): print_d(_("A different plugin was selected - stop preview")) return False export_path = self._get_export_path(song, destination_path, pattern) if not export_path: return False entry = Entry(song, export_path) expanded_path = os.path.expanduser(export_path) if expanded_path in export_paths: entry.tag = Entry.Tags.SKIP_DUPLICATE self.c_song_dupes += 1 else: entry.tag = Entry.Tags.PENDING_COPY self.c_songs_copy += 1 export_paths.append(expanded_path) self.model.append(row=self._make_model_row(entry)) # List files to delete for root, __, files in os.walk(self.expanded_destination): for name in files: file_path = os.path.join(root, name) if file_path not in export_paths and "cover.jpg" not in file_path: entry = Entry(None) entry.filename = file_path entry.tag = Entry.Tags.PENDING_DELETE self.model.append(row=self._make_model_row(entry)) self.c_songs_delete += 1 return True def _update_preview_summary(self): """ Update the preview summary text field. """ prefix = _("Synchronization will:") + self.summary_sep preview_progress = [] if self.c_songs_copy > 0: counter = self.c_songs_copy preview_progress.append( ngettext( "attempt to write {count} file", "attempt to write {count} files", counter, ).format(count=counter) ) if self.c_song_dupes > 0: counter = self.c_song_dupes preview_progress.append( ngettext( "skip {count} duplicate file", "skip {count} duplicate files", counter, ).format(count=counter) ) for child in self.status_duplicates.get_children(): child.set_visible(True) self.status_duplicates.set_visible(True) if self.c_songs_delete > 0: counter = self.c_songs_delete preview_progress.append( ngettext("delete {count} file", "delete {count} files", counter).format( count=counter ) ) for child in self.status_deletions.get_children(): child.set_visible(True) self.status_deletions.set_visible(True) preview_progress_text = self.summary_sep_list.join(preview_progress) if preview_progress_text: preview_progress_text = prefix + preview_progress_text self.status_progress.set_label(preview_progress_text) self.status_progress.set_visible(True) print_d(preview_progress_text) def _get_paths(self): """ Build a list of all current export paths for the songs to be synchronized. """ paths = {} for row in self.model: entry = row[self._model_col_id("entry")] if entry.tag != Entry.Tags.PENDING_DELETE and entry.export_path: if entry.export_path not in paths.keys(): paths[entry.export_path] = 1 else: paths[entry.export_path] += 1 return paths def _show_sync_error(self, title, message): """ Show an error message whenever a synchronization error occurs. :param title: The title of the message popup. :param message: The error message. """ qltk.ErrorMessage(self.main_vbox, title, message).run() print_e(title) def _get_valid_inputs(self): """ Ensure that all user inputs have been given. Shows a popup error message if values are not as expected. :return: The entered destination path and an fsnative pattern, or None if an error occurred. """ # Get text from the destination path entry destination_path = self.destination_entry.get_text() if not destination_path: self._show_sync_error( _("No destination path provided"), _("Please specify the directory where songs " "should be exported."), ) return None, None # Get text from the export pattern entry export_pattern = self.export_pattern_entry.get_text() if not export_pattern: self._show_sync_error( _("No export pattern provided"), _( "Please specify an export pattern for the " "names of the exported songs." ), ) return None, None # Combine destination path and export pattern to form the full pattern full_export_path = os.path.join(destination_path, export_pattern) try: pattern = FileFromPattern(full_export_path) except ValueError: self._show_sync_error( _("Export path is not absolute"), _( 'The pattern\n\n{}\n\ncontains "/" but does not start ' "from root. Please provide an absolute destination path by " "making sure it starts with / or ~/." ).format(util.bold(full_export_path)), ) return None, None return destination_path, pattern def _get_songs_from_queries(self): """ Build a list of songs to be synchronized, filtered using the selected saved searches. :return: A list of the selected songs. """ enabled_queries = [] for query_name, query in self.queries.items(): query_config = self.CONFIG_QUERY_PREFIX + query_name if self.config_get_bool(query_config): enabled_queries.append(query) if not enabled_queries: self._show_sync_error( _("No saved searches selected"), _("Please select at least one saved search."), ) return [] selected_songs = [] for song in app.library.itervalues(): if any(query.search(song) for query in enabled_queries): selected_songs.append(song) if not selected_songs: self._show_sync_error( _("No songs in the selected saved searches"), _("All selected saved searches are empty."), ) return [] print_d(_("Found {} songs to synchronize").format(len(selected_songs))) return selected_songs def _get_export_path(self, song, destination_path, export_pattern): """ Use the given pattern of song tags to build the destination path for a song. :param song: The song for which to build the export path. :param destination_path: The user-entered destination path. :param export_pattern: An fsnative file path pattern. :return: A safe full destination path for the song. """ new_name = Path(export_pattern.format(song)) try: relative_name = new_name.relative_to(self.expanded_destination) except ValueError as ex: self._show_sync_error( _("Mismatch between destination path and export " "pattern"), _( "The export pattern starts with a path that " "differs from the destination path. Please " "correct the pattern.\n\nError:\n{}" ).format(ex), ) return None return os.path.join(destination_path, self._make_safe_name(relative_name)) def _make_safe_name(self, input_path): """ Make a file path safe by replacing unsafe characters. :param input_path: A relative Path. :return: The given path, with any unsafe characters replaced. Returned as a string. """ # Remove diacritics (accents) safe_filename = unicodedata.normalize("NFKD", str(input_path)) safe_filename = "".join( [c for c in safe_filename if not unicodedata.combining(c)] ) if os.name != "nt": # Ensure that Win32-incompatible chars are always removed. # On Windows, this is called during `FileFromPattern`. safe_filename = strip_win32_incompat_from_path(safe_filename) return safe_filename def _start_sync(self, button): """ Start the song synchronization. :param button: The start sync button. """ # Check sort column sort_columns = [ c.get_title() for c in self.details_tree.get_columns() if c.get_sort_indicator() ] if "Status" in sort_columns: self._show_sync_error( _("Unable to sync"), _("Cannot start synchronization while " "sorting by <b>Status</b>."), ) return print_d(_("Starting song synchronization")) self.running = True # Summary labels self.status_operation.set_label(_("Synchronization in progress.")) self.status_duplicates.set_visible(False) self.status_deletions.set_visible(False) # Change button visibility self.sync_start_button.set_visible(False) self.sync_stop_button.set_visible(True) if not self._run_sync(): return self._stop_sync() print_d(_("Finished song synchronization")) def _stop_sync(self, button=None): """ Stop the song synchronization. :param button: The stop sync button. """ if button: print_d(_("Stopping song synchronization")) self.status_operation.set_label(_("Synchronization was stopped.")) else: self.status_operation.set_label(_("Synchronization has finished.")) self.running = False # Change button visibility self.sync_start_button.set_visible(True) self.sync_stop_button.set_visible(False) def _run_sync(self): """ Synchronize the songs from the selected saved searches with the specified folder. :return: Whether the synchronization was successful. """ self.c_files_copy = ( self.c_files_skip ) = ( self.c_files_skip_previous ) = self.c_files_dupes = self.c_files_delete = self.c_files_failed = 0 self.model.foreach(self._sync_entry) if not self.running: return False self._remove_empty_dirs() return True def _sync_entry(self, model, path, iter_, *data): """ Synchronize a single song. This is a callback function passed to Gtk.TreeModel.foreach() to iterate over the rows in a tree model. :return: True to stop iterating, False to continue. """ entry = model[path][self._model_col_id("entry")] if not self.running: print_d(_("Stopped song synchronization")) return True self._run_pending_events() if not self.destination_entry.get_text(): print_d(_("A different plugin was selected - stop synchronization")) return True print_d( _('{tag} - "{filename}"').format(tag=entry.tag, filename=entry.filename) ) if not entry.export_path and not entry.tag: return False if entry.tag == Entry.Tags.PENDING_COPY: # Export, skipping existing files expanded_path = os.path.expanduser(entry.export_path) if os.path.exists(expanded_path): entry.tag = Entry.Tags.RESULT_SKIP_EXISTING self._update_model_value(iter_, "tag", entry.tag) self.c_files_skip += 1 else: entry.tag = Entry.Tags.IN_PROGRESS_SYNC self._update_model_value(iter_, "tag", entry.tag) song_folders = os.path.dirname(expanded_path) os.makedirs(song_folders, exist_ok=True) try: shutil.copyfile(entry.filename, expanded_path) except Exception as ex: entry.tag = Entry.Tags.RESULT_FAILURE + ": " + str(ex) self._update_model_value(iter_, "tag", entry.tag) print_exc() self.c_files_failed += 1 else: entry.tag = Entry.Tags.RESULT_SUCCESS self._update_model_value(iter_, "tag", entry.tag) self.c_files_copy += 1 elif entry.tag == Entry.Tags.SKIP_DUPLICATE: self.c_files_dupes += 1 elif entry.tag == Entry.Tags.PENDING_DELETE: # Delete file try: entry.tag = Entry.Tags.IN_PROGRESS_DELETE self._update_model_value(iter_, "tag", entry.tag) os.remove(entry.filename) except Exception as ex: entry.tag = Entry.Tags.RESULT_FAILURE + ": " + str(ex) self._update_model_value(iter_, "tag", entry.tag) print_exc() self.c_files_failed += 1 else: entry.tag = Entry.Tags.RESULT_SUCCESS self._update_model_value(iter_, "tag", entry.tag) self.c_files_delete += 1 else: self.c_files_skip_previous += 1 self._update_sync_summary() return False def _remove_empty_dirs(self): """ Delete all empty sub-directories from the given path. """ for root, dirs, files in os.walk(self.expanded_destination, topdown=False): for dirname in dirs: dir_path = os.path.realpath(os.path.join(root, dirname)) last_file_is_cover = files and files[0] == "cover.jpg" if not files or last_file_is_cover: entry = Entry(None) entry.filename = dir_path entry.tag = Entry.Tags.IN_PROGRESS_DELETE iter_ = self.model.append(row=self._make_model_row(entry)) print_d(_('Removing "{}"').format(entry.filename)) self.c_songs_delete += 1 try: if last_file_is_cover: os.remove(os.path.join(dir_path, files[0])) os.rmdir(dir_path) except Exception as ex: entry.tag = Entry.Tags.RESULT_FAILURE + ": " + str(ex) self._update_model_value(iter_, "tag", entry.tag) print_exc() self.c_files_failed += 1 else: entry.tag = Entry.Tags.RESULT_SUCCESS self._update_model_value(iter_, "tag", entry.tag) self.c_files_delete += 1 self._update_sync_summary() def _update_sync_summary(self): """ Update the synchronization summary text field. """ sync_summary_prefix = _("Synchronization has:") + self.summary_sep sync_summary = [] if self.c_files_copy > 0 or self.c_files_skip > 0: text = [] counter = self.c_files_copy text.append( ngettext( "written {count}/{total} file", "written {count}/{total} files", counter, ).format(count=counter, total=self.c_songs_copy) ) if self.c_files_skip > 0: counter = self.c_files_skip text.append( ngettext( "(skipped {count} existing file)", "(skipped {count} existing files)", counter, ).format(count=counter) ) sync_summary.append(self.summary_sep.join(text)) if self.c_files_dupes > 0: counter = self.c_files_dupes sync_summary.append( ngettext( "skipped {count}/{total} duplicate file", "skipped {count}/{total} duplicate files", counter, ).format(count=counter, total=self.c_song_dupes) ) if self.c_files_delete > 0: counter = self.c_files_delete sync_summary.append( ngettext( "deleted {count}/{total} file", "deleted {count}/{total} files", counter, ).format(count=counter, total=self.c_songs_delete) ) if self.c_files_failed > 0: counter = self.c_files_failed sync_summary.append( ngettext( "failed to sync {count} file", "failed to sync {count} files", counter, ).format(count=counter) ) if self.c_files_skip_previous > 0: counter = self.c_files_skip_previous sync_summary.append( ngettext( "skipped {count} file synchronized previously", "skipped {count} files synchronized previously", counter, ).format(count=counter) ) sync_summary_text = self.summary_sep_list.join(sync_summary) sync_summary_text = sync_summary_prefix + sync_summary_text self.status_progress.set_label(sync_summary_text) print_d(sync_summary_text)
metrics-exporter
metrics_collectors
import re import typing from apps.alerts.constants import AlertGroupState from apps.metrics_exporter.constants import ( ALERT_GROUPS_RESPONSE_TIME, ALERT_GROUPS_TOTAL, USER_WAS_NOTIFIED_OF_ALERT_GROUPS, AlertGroupsResponseTimeMetricsDict, AlertGroupsTotalMetricsDict, RecalculateOrgMetricsDict, UserWasNotifiedOfAlertGroupsMetricsDict, ) from apps.metrics_exporter.helpers import ( get_metric_alert_groups_response_time_key, get_metric_alert_groups_total_key, get_metric_calculation_started_key, get_metric_user_was_notified_of_alert_groups_key, get_metrics_cache_timer_key, get_organization_ids, ) from apps.metrics_exporter.tasks import ( start_calculate_and_cache_metrics, start_recalculation_for_new_metric, ) from django.core.cache import cache from prometheus_client import CollectorRegistry from prometheus_client.metrics_core import ( CounterMetricFamily, GaugeMetricFamily, HistogramMetricFamily, ) application_metrics_registry = CollectorRegistry() RE_ALERT_GROUPS_TOTAL = re.compile(r"{}_(\d+)".format(ALERT_GROUPS_TOTAL)) RE_ALERT_GROUPS_RESPONSE_TIME = re.compile( r"{}_(\d+)".format(ALERT_GROUPS_RESPONSE_TIME) ) RE_USER_WAS_NOTIFIED_OF_ALERT_GROUPS = re.compile( r"{}_(\d+)".format(USER_WAS_NOTIFIED_OF_ALERT_GROUPS) ) # https://github.com/prometheus/client_python#custom-collectors class ApplicationMetricsCollector: def __init__(self): self._buckets = (60, 300, 600, 3600, "+Inf") self._stack_labels = [ "org_id", "slug", "id", ] self._integration_labels = [ "integration", "team", ] + self._stack_labels self._integration_labels_with_state = self._integration_labels + ["state"] self._user_labels = ["username"] + self._stack_labels def collect(self): org_ids = set(get_organization_ids()) # alert groups total metric: gauge alert_groups_total, missing_org_ids_1 = self._get_alert_groups_total_metric( org_ids ) # alert groups response time metrics: histogram ( alert_groups_response_time_seconds, missing_org_ids_2, ) = self._get_response_time_metric(org_ids) # user was notified of alert groups metrics: counter ( user_was_notified, missing_org_ids_3, ) = self._get_user_was_notified_of_alert_groups_metric(org_ids) # This part is used for releasing new metrics to avoid recalculation for every metric. # Uncomment with metric name when needed. # # update new metric gradually # missing_org_ids_3 = self._update_new_metric(USER_WAS_NOTIFIED_OF_ALERT_GROUPS, org_ids, missing_org_ids_3) # check for orgs missing any of the metrics or needing a refresh, start recalculation task for missing org ids missing_org_ids = missing_org_ids_1 | missing_org_ids_2 | missing_org_ids_3 self.recalculate_cache_for_missing_org_ids(org_ids, missing_org_ids) yield alert_groups_total yield alert_groups_response_time_seconds yield user_was_notified def _get_alert_groups_total_metric(self, org_ids): alert_groups_total = GaugeMetricFamily( ALERT_GROUPS_TOTAL, "All alert groups", labels=self._integration_labels_with_state, ) processed_org_ids = set() alert_groups_total_keys = [ get_metric_alert_groups_total_key(org_id) for org_id in org_ids ] org_ag_states: typing.Dict[ str, typing.Dict[int, AlertGroupsTotalMetricsDict] ] = cache.get_many(alert_groups_total_keys) for org_key, ag_states in org_ag_states.items(): for integration, integration_data in ag_states.items(): # Labels values should have the same order as _integration_labels_with_state labels_values = [ integration_data["integration_name"], # integration integration_data["team_name"], # team integration_data["org_id"], # grafana org_id integration_data["slug"], # grafana instance slug integration_data["id"], # grafana instance id ] labels_values = list(map(str, labels_values)) for state in AlertGroupState: alert_groups_total.add_metric( labels_values + [state.value], integration_data[state.value] ) org_id_from_key = RE_ALERT_GROUPS_TOTAL.match(org_key).groups()[0] processed_org_ids.add(int(org_id_from_key)) missing_org_ids = org_ids - processed_org_ids return alert_groups_total, missing_org_ids def _get_response_time_metric(self, org_ids): alert_groups_response_time_seconds = HistogramMetricFamily( ALERT_GROUPS_RESPONSE_TIME, "Users response time to alert groups in 7 days (seconds)", labels=self._integration_labels, ) processed_org_ids = set() alert_groups_response_time_keys = [ get_metric_alert_groups_response_time_key(org_id) for org_id in org_ids ] org_ag_response_times: typing.Dict[ str, typing.Dict[int, AlertGroupsResponseTimeMetricsDict] ] = cache.get_many(alert_groups_response_time_keys) for org_key, ag_response_time in org_ag_response_times.items(): for integration, integration_data in ag_response_time.items(): # Labels values should have the same order as _integration_labels labels_values = [ integration_data["integration_name"], # integration integration_data["team_name"], # team integration_data["org_id"], # grafana org_id integration_data["slug"], # grafana instance slug integration_data["id"], # grafana instance id ] labels_values = list(map(str, labels_values)) response_time_values = integration_data["response_time"] if not response_time_values: continue buckets, sum_value = self.get_buckets_with_sum(response_time_values) buckets = sorted(list(buckets.items()), key=lambda x: float(x[0])) alert_groups_response_time_seconds.add_metric( labels_values, buckets=buckets, sum_value=sum_value ) org_id_from_key = RE_ALERT_GROUPS_RESPONSE_TIME.match(org_key).groups()[0] processed_org_ids.add(int(org_id_from_key)) missing_org_ids = org_ids - processed_org_ids return alert_groups_response_time_seconds, missing_org_ids def _get_user_was_notified_of_alert_groups_metric(self, org_ids): user_was_notified = CounterMetricFamily( USER_WAS_NOTIFIED_OF_ALERT_GROUPS, "Number of alert groups user was notified of", labels=self._user_labels, ) processed_org_ids = set() user_was_notified_keys = [ get_metric_user_was_notified_of_alert_groups_key(org_id) for org_id in org_ids ] org_users: typing.Dict[ str, typing.Dict[int, UserWasNotifiedOfAlertGroupsMetricsDict] ] = cache.get_many(user_was_notified_keys) for org_key, users in org_users.items(): for user, user_data in users.items(): # Labels values should have the same order as _user_labels labels_values = [ user_data["user_username"], # username user_data["org_id"], # grafana org_id user_data["slug"], # grafana instance slug user_data["id"], # grafana instance id ] labels_values = list(map(str, labels_values)) user_was_notified.add_metric(labels_values, user_data["counter"]) org_id_from_key = RE_USER_WAS_NOTIFIED_OF_ALERT_GROUPS.match( org_key ).groups()[0] processed_org_ids.add(int(org_id_from_key)) missing_org_ids = org_ids - processed_org_ids return user_was_notified, missing_org_ids def _update_new_metric(self, metric_name, org_ids, missing_org_ids): """ This method is used for new metrics to calculate metrics gradually and avoid force recalculation for all orgs """ calculation_started_key = get_metric_calculation_started_key(metric_name) is_calculation_started = cache.get(calculation_started_key) if len(missing_org_ids) == len(org_ids) or is_calculation_started: missing_org_ids = set() if not is_calculation_started: start_recalculation_for_new_metric.apply_async((metric_name,)) return missing_org_ids def recalculate_cache_for_missing_org_ids(self, org_ids, missing_org_ids): cache_timer_for_org_keys = [ get_metrics_cache_timer_key(org_id) for org_id in org_ids ] cache_timers_for_org = cache.get_many(cache_timer_for_org_keys) recalculate_orgs: typing.List[RecalculateOrgMetricsDict] = [] for org_id in org_ids: force_task = org_id in missing_org_ids if force_task or not cache_timers_for_org.get( get_metrics_cache_timer_key(org_id) ): recalculate_orgs.append( {"organization_id": org_id, "force": force_task} ) if recalculate_orgs: start_calculate_and_cache_metrics.apply_async((recalculate_orgs,)) def get_buckets_with_sum(self, values): """Put values in correct buckets and count values sum""" buckets_values = {str(key): 0 for key in self._buckets} sum_value = 0 for value in values: for bucket in self._buckets: if value <= float(bucket): buckets_values[str(bucket)] += 1.0 sum_value += value return buckets_values, sum_value application_metrics_registry.register(ApplicationMetricsCollector())
chardet
langcyrillicmodel
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Communicator client code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### # KOI8-R language model # Character Mapping Table: KOI8R_char_to_order_map = ( 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 254, 255, 255, 254, 255, 255, # 00 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, # 10 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, # 20 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 253, 253, 253, 253, 253, 253, # 30 253, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 74, 153, 75, 154, # 40 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 253, 253, 253, 253, 253, # 50 253, 71, 172, 66, 173, 65, 174, 76, 175, 64, 176, 177, 77, 72, 178, 69, # 60 67, 179, 78, 73, 180, 181, 79, 182, 183, 184, 185, 253, 253, 253, 253, 253, # 70 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, # 80 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, # 90 223, 224, 225, 68, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, # a0 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, # b0 27, 3, 21, 28, 13, 2, 39, 19, 26, 4, 23, 11, 8, 12, 5, 1, # c0 15, 16, 9, 7, 6, 14, 24, 10, 17, 18, 20, 25, 30, 29, 22, 54, # d0 59, 37, 44, 58, 41, 48, 53, 46, 55, 42, 60, 36, 49, 38, 31, 34, # e0 35, 43, 45, 32, 40, 52, 56, 33, 61, 62, 51, 57, 47, 63, 50, 70, # f0 ) win1251_char_to_order_map = ( 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 254, 255, 255, 254, 255, 255, # 00 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, # 10 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, # 20 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 253, 253, 253, 253, 253, 253, # 30 253, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 74, 153, 75, 154, # 40 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 253, 253, 253, 253, 253, # 50 253, 71, 172, 66, 173, 65, 174, 76, 175, 64, 176, 177, 77, 72, 178, 69, # 60 67, 179, 78, 73, 180, 181, 79, 182, 183, 184, 185, 253, 253, 253, 253, 253, # 70 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 68, 247, 248, 249, 250, 251, 252, 253, 37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35, 45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43, 3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15, 9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16, ) latin5_char_to_order_map = ( 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 254, 255, 255, 254, 255, 255, # 00 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, # 10 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, # 20 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 253, 253, 253, 253, 253, 253, # 30 253, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 74, 153, 75, 154, # 40 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 253, 253, 253, 253, 253, # 50 253, 71, 172, 66, 173, 65, 174, 76, 175, 64, 176, 177, 77, 72, 178, 69, # 60 67, 179, 78, 73, 180, 181, 79, 182, 183, 184, 185, 253, 253, 253, 253, 253, # 70 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35, 45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43, 3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15, 9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16, 239, 68, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 255, ) macCyrillic_char_to_order_map = ( 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 254, 255, 255, 254, 255, 255, # 00 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, # 10 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, # 20 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 253, 253, 253, 253, 253, 253, # 30 253, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 74, 153, 75, 154, # 40 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 253, 253, 253, 253, 253, # 50 253, 71, 172, 66, 173, 65, 174, 76, 175, 64, 176, 177, 77, 72, 178, 69, # 60 67, 179, 78, 73, 180, 181, 79, 182, 183, 184, 185, 253, 253, 253, 253, 253, # 70 37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35, 45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 68, 16, 3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15, 9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 255, ) IBM855_char_to_order_map = ( 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 254, 255, 255, 254, 255, 255, # 00 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, # 10 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, # 20 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 253, 253, 253, 253, 253, 253, # 30 253, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 74, 153, 75, 154, # 40 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 253, 253, 253, 253, 253, # 50 253, 71, 172, 66, 173, 65, 174, 76, 175, 64, 176, 177, 77, 72, 178, 69, # 60 67, 179, 78, 73, 180, 181, 79, 182, 183, 184, 185, 253, 253, 253, 253, 253, # 70 191, 192, 193, 194, 68, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 27, 59, 54, 70, 3, 37, 21, 44, 28, 58, 13, 41, 2, 48, 39, 53, 19, 46, 218, 219, 220, 221, 222, 223, 224, 26, 55, 4, 42, 225, 226, 227, 228, 23, 60, 229, 230, 231, 232, 233, 234, 235, 11, 36, 236, 237, 238, 239, 240, 241, 242, 243, 8, 49, 12, 38, 5, 31, 1, 34, 15, 244, 245, 246, 247, 35, 16, 248, 43, 9, 45, 7, 32, 6, 40, 14, 52, 24, 56, 10, 33, 17, 61, 249, 250, 18, 62, 20, 51, 25, 57, 30, 47, 29, 63, 22, 50, 251, 252, 255, ) IBM866_char_to_order_map = ( 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 254, 255, 255, 254, 255, 255, # 00 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, # 10 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, # 20 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 253, 253, 253, 253, 253, 253, # 30 253, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 74, 153, 75, 154, # 40 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 253, 253, 253, 253, 253, # 50 253, 71, 172, 66, 173, 65, 174, 76, 175, 64, 176, 177, 77, 72, 178, 69, # 60 67, 179, 78, 73, 180, 181, 79, 182, 183, 184, 185, 253, 253, 253, 253, 253, # 70 37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35, 45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43, 3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16, 239, 68, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 255, ) # Model Table: # total sequences: 100% # first 512 sequences: 97.6601% # first 1024 sequences: 2.3389% # rest sequences: 0.1237% # negative sequences: 0.0009% RussianLangModel = ( 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, 1, 3, 3, 3, 3, 1, 3, 3, 3, 2, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 3, 2, 2, 2, 2, 2, 0, 0, 2, 3, 3, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 3, 3, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 3, 3, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 2, 3, 3, 1, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 2, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 3, 3, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 1, 0, 0, 0, 0, 0, 1, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 3, 1, 3, 3, 1, 3, 3, 3, 3, 2, 2, 3, 0, 2, 2, 2, 3, 3, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 2, 3, 3, 3, 3, 3, 2, 2, 3, 2, 3, 3, 3, 2, 1, 2, 2, 0, 1, 2, 2, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 3, 0, 2, 2, 3, 3, 2, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 2, 3, 3, 1, 2, 3, 2, 2, 3, 2, 3, 3, 3, 3, 2, 2, 3, 0, 3, 2, 2, 3, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 3, 3, 3, 3, 3, 2, 3, 3, 3, 3, 2, 2, 2, 0, 3, 3, 3, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 3, 2, 3, 3, 3, 3, 3, 3, 2, 3, 2, 2, 0, 1, 3, 2, 1, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 1, 1, 3, 0, 1, 1, 1, 1, 2, 1, 1, 0, 2, 2, 2, 1, 2, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 2, 3, 3, 2, 2, 2, 2, 1, 3, 2, 3, 2, 3, 2, 1, 2, 2, 0, 1, 1, 2, 1, 2, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 3, 2, 3, 3, 3, 2, 2, 2, 2, 0, 2, 2, 2, 2, 3, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 2, 3, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, 3, 2, 0, 0, 3, 3, 3, 3, 2, 3, 3, 3, 3, 2, 3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 3, 3, 3, 3, 3, 2, 2, 3, 3, 0, 2, 1, 0, 3, 2, 3, 2, 3, 0, 0, 1, 2, 0, 0, 1, 0, 1, 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 2, 3, 3, 3, 3, 2, 3, 3, 3, 3, 1, 2, 2, 0, 0, 2, 3, 2, 2, 2, 3, 2, 3, 2, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 2, 3, 0, 2, 3, 2, 3, 0, 1, 2, 3, 3, 2, 0, 2, 3, 0, 0, 2, 3, 2, 2, 0, 1, 3, 1, 3, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 1, 3, 0, 2, 3, 3, 3, 3, 3, 3, 3, 3, 2, 1, 3, 2, 0, 0, 2, 2, 3, 3, 3, 2, 3, 3, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 2, 2, 3, 3, 2, 2, 2, 3, 3, 0, 0, 1, 1, 1, 1, 1, 2, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 2, 2, 3, 3, 3, 3, 3, 3, 3, 0, 3, 2, 3, 3, 2, 3, 2, 0, 2, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 2, 3, 3, 3, 2, 2, 2, 2, 3, 1, 3, 2, 3, 1, 1, 2, 1, 0, 2, 2, 2, 2, 1, 3, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 3, 3, 3, 3, 3, 1, 2, 2, 1, 3, 1, 0, 3, 0, 0, 3, 0, 0, 0, 1, 1, 0, 1, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 2, 2, 1, 1, 3, 3, 3, 2, 2, 1, 2, 2, 3, 1, 1, 2, 0, 0, 2, 2, 1, 3, 0, 0, 2, 1, 1, 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 2, 3, 3, 3, 3, 1, 2, 2, 2, 1, 2, 1, 3, 3, 1, 1, 2, 1, 2, 1, 2, 2, 0, 2, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 3, 3, 3, 3, 3, 2, 1, 3, 2, 2, 3, 2, 0, 3, 2, 0, 3, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 2, 3, 3, 3, 2, 2, 2, 3, 3, 1, 2, 1, 2, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 2, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 1, 1, 2, 1, 2, 3, 3, 2, 2, 1, 2, 2, 3, 0, 2, 1, 0, 0, 2, 2, 3, 2, 1, 2, 2, 2, 2, 2, 3, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 1, 1, 0, 1, 1, 2, 2, 1, 1, 3, 0, 0, 1, 3, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 3, 3, 3, 2, 0, 0, 0, 2, 1, 0, 1, 0, 2, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 1, 0, 0, 2, 3, 2, 2, 2, 1, 2, 2, 2, 1, 2, 1, 0, 0, 1, 1, 1, 0, 2, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 2, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0, 1, 0, 0, 0, 0, 3, 0, 1, 2, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 2, 0, 0, 1, 1, 2, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 2, 2, 3, 2, 2, 2, 3, 1, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 3, 3, 3, 2, 2, 2, 2, 3, 2, 2, 1, 1, 2, 2, 2, 2, 1, 1, 3, 1, 2, 1, 2, 0, 0, 1, 1, 0, 1, 0, 2, 1, 1, 1, 1, 1, 1, 2, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 2, 0, 0, 1, 0, 3, 2, 2, 2, 2, 1, 2, 1, 2, 1, 2, 0, 0, 0, 2, 1, 2, 2, 1, 1, 2, 2, 0, 1, 1, 0, 2, 1, 1, 1, 1, 1, 0, 1, 1, 1, 2, 1, 1, 1, 2, 1, 0, 1, 2, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 3, 2, 2, 2, 1, 1, 1, 2, 3, 0, 0, 0, 0, 2, 0, 2, 2, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 2, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 2, 3, 2, 3, 2, 1, 2, 2, 2, 2, 1, 0, 0, 0, 2, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 2, 1, 1, 1, 2, 1, 0, 2, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 3, 0, 0, 1, 0, 2, 2, 2, 3, 2, 2, 2, 2, 2, 2, 2, 0, 0, 0, 2, 1, 2, 1, 1, 1, 2, 2, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 2, 3, 2, 3, 3, 2, 0, 1, 1, 1, 0, 0, 1, 0, 2, 0, 1, 1, 3, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 2, 3, 3, 3, 3, 1, 2, 2, 2, 2, 0, 1, 1, 0, 2, 1, 1, 1, 2, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 3, 3, 3, 2, 0, 0, 1, 1, 2, 2, 1, 0, 0, 2, 0, 1, 1, 3, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 2, 1, 1, 1, 2, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 3, 2, 3, 2, 1, 0, 0, 2, 2, 2, 0, 1, 0, 2, 0, 1, 1, 1, 0, 1, 0, 0, 0, 3, 0, 1, 1, 0, 0, 2, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 2, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 3, 1, 2, 1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 1, 1, 0, 0, 0, 2, 2, 2, 0, 0, 0, 1, 2, 1, 0, 1, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 2, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 3, 0, 0, 0, 0, 2, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 3, 3, 2, 2, 0, 0, 0, 2, 2, 0, 0, 0, 1, 2, 0, 1, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 2, 3, 2, 3, 2, 0, 0, 0, 0, 1, 1, 0, 0, 0, 2, 0, 2, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 2, 0, 1, 2, 1, 0, 1, 1, 2, 1, 1, 1, 1, 1, 2, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 3, 2, 2, 2, 1, 0, 0, 2, 2, 1, 0, 1, 2, 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 2, 3, 1, 2, 2, 2, 2, 2, 2, 1, 1, 0, 0, 0, 1, 0, 1, 0, 2, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 2, 0, 2, 0, 0, 1, 0, 3, 2, 1, 2, 1, 2, 2, 0, 1, 0, 0, 0, 2, 1, 0, 0, 2, 1, 1, 1, 1, 0, 2, 0, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 2, 2, 2, 2, 1, 0, 0, 1, 0, 0, 0, 0, 0, 2, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 2, 0, 0, 2, 0, 1, 0, 1, 1, 1, 2, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 2, 1, 2, 2, 2, 0, 3, 0, 1, 1, 0, 0, 0, 0, 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 2, 2, 3, 2, 2, 0, 0, 1, 1, 2, 0, 1, 2, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 2, 2, 1, 1, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2, 0, 1, 0, 0, 0, 1, 2, 2, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 2, 2, 2, 2, 0, 1, 0, 2, 2, 0, 0, 0, 0, 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 2, 0, 0, 0, 2, 2, 2, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 2, 0, 0, 0, 0, 1, 0, 0, 1, 1, 2, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 1, 1, 2, 0, 2, 1, 1, 1, 1, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 2, 1, 2, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 2, 0, 1, 2, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, ) Koi8rModel = { "char_to_order_map": KOI8R_char_to_order_map, "precedence_matrix": RussianLangModel, "typical_positive_ratio": 0.976601, "keep_english_letter": False, "charset_name": "KOI8-R", "language": "Russian", } Win1251CyrillicModel = { "char_to_order_map": win1251_char_to_order_map, "precedence_matrix": RussianLangModel, "typical_positive_ratio": 0.976601, "keep_english_letter": False, "charset_name": "windows-1251", "language": "Russian", } Latin5CyrillicModel = { "char_to_order_map": latin5_char_to_order_map, "precedence_matrix": RussianLangModel, "typical_positive_ratio": 0.976601, "keep_english_letter": False, "charset_name": "ISO-8859-5", "language": "Russian", } MacCyrillicModel = { "char_to_order_map": macCyrillic_char_to_order_map, "precedence_matrix": RussianLangModel, "typical_positive_ratio": 0.976601, "keep_english_letter": False, "charset_name": "MacCyrillic", "language": "Russian", } Ibm866Model = { "char_to_order_map": IBM866_char_to_order_map, "precedence_matrix": RussianLangModel, "typical_positive_ratio": 0.976601, "keep_english_letter": False, "charset_name": "IBM866", "language": "Russian", } Ibm855Model = { "char_to_order_map": IBM855_char_to_order_map, "precedence_matrix": RussianLangModel, "typical_positive_ratio": 0.976601, "keep_english_letter": False, "charset_name": "IBM855", "language": "Russian", }
ModelChecker
ModelChecker
# Copyright (c) 2018 Ultimaker B.V. # Cura is released under the terms of the LGPLv3 or higher. import os from PyQt6.QtCore import QObject, QTimer, pyqtProperty, pyqtSignal, pyqtSlot from UM.Application import Application from UM.Extension import Extension from UM.i18n import i18nCatalog from UM.Logger import Logger from UM.Message import Message from UM.PluginRegistry import PluginRegistry from UM.Scene.Camera import Camera from UM.Scene.Iterator.DepthFirstIterator import DepthFirstIterator catalog = i18nCatalog("cura") class ModelChecker(QObject, Extension): onChanged = pyqtSignal() """Signal that gets emitted when anything changed that we need to check.""" def __init__(self): super().__init__() self._button_view = None self._caution_message = Message( "", # Message text gets set when the message gets shown, to display the models in question. lifetime=0, title=catalog.i18nc("@info:title", "3D Model Assistant"), message_type=Message.MessageType.WARNING, ) self._change_timer = QTimer() self._change_timer.setInterval(200) self._change_timer.setSingleShot(True) self._change_timer.timeout.connect(self.onChanged) Application.getInstance().initializationFinished.connect( self._pluginsInitialized ) Application.getInstance().getController().getScene().sceneChanged.connect( self._onChanged ) Application.getInstance().globalContainerStackChanged.connect(self._onChanged) def _onChanged(self, *args, **kwargs): # Ignore camera updates. if len(args) == 0: self._change_timer.start() return if not isinstance(args[0], Camera): self._change_timer.start() def _pluginsInitialized(self): """Called when plug-ins are initialized. This makes sure that we listen to changes of the material and that the button is created that indicates warnings with the current set-up. """ Application.getInstance().getMachineManager().rootMaterialChanged.connect( self.onChanged ) self._createView() def checkObjectsForShrinkage(self): shrinkage_threshold = 100.5 # From what shrinkage percentage a warning will be issued about the model size. warning_size_xy = 150 # The horizontal size of a model that would be too large when dealing with shrinking materials. warning_size_z = 100 # The vertical size of a model that would be too large when dealing with shrinking materials. # This function can be triggered in the middle of a machine change, so do not proceed if the machine change # has not done yet. global_container_stack = Application.getInstance().getGlobalContainerStack() if global_container_stack is None: return False material_shrinkage = self._getMaterialShrinkage() warning_nodes = [] # Check node material shrinkage and bounding box size for node in self.sliceableNodes(): node_extruder_position = node.callDecoration("getActiveExtruderPosition") if node_extruder_position is None: continue # This function can be triggered in the middle of a machine change, so do not proceed if the machine change # has not done yet. try: global_container_stack.extruderList[int(node_extruder_position)] except IndexError: Application.getInstance().callLater(lambda: self.onChanged.emit()) return False if material_shrinkage > shrinkage_threshold: bbox = node.getBoundingBox() if bbox is not None and ( bbox.width >= warning_size_xy or bbox.depth >= warning_size_xy or bbox.height >= warning_size_z ): warning_nodes.append(node) self._caution_message.setText( catalog.i18nc( "@info:status", "<p>One or more 3D models may not print optimally due to the model size and material configuration:</p>\n" "<p>{model_names}</p>\n" "<p>Find out how to ensure the best possible print quality and reliability.</p>\n" '<p><a href="https://ultimaker.com/3D-model-assistant">View print quality guide</a></p>', ).format(model_names=", ".join([n.getName() for n in warning_nodes])) ) return len(warning_nodes) > 0 def sliceableNodes(self): # Add all sliceable scene nodes to check scene = Application.getInstance().getController().getScene() for node in DepthFirstIterator(scene.getRoot()): if node.callDecoration("isSliceable"): yield node def _createView(self): """Creates the view used by show popup. The view is saved because of the fairly aggressive garbage collection. """ Logger.log("d", "Creating model checker view.") # Create the plugin dialog component path = os.path.join( PluginRegistry.getInstance().getPluginPath("ModelChecker"), "ModelChecker.qml", ) self._button_view = Application.getInstance().createQmlComponent( path, {"manager": self} ) # The qml is only the button Application.getInstance().addAdditionalComponent( "jobSpecsButton", self._button_view ) Logger.log("d", "Model checker view created.") @pyqtProperty(bool, notify=onChanged) def hasWarnings(self): danger_shrinkage = self.checkObjectsForShrinkage() return any( (danger_shrinkage,) ) # If any of the checks fail, show the warning button. @pyqtSlot() def showWarnings(self): self._caution_message.show() def _getMaterialShrinkage(self) -> float: global_container_stack = Application.getInstance().getGlobalContainerStack() if global_container_stack is None: return 100 return global_container_stack.getProperty( "material_shrinkage_percentage", "value" )
windows
animation
""" @file @brief This file loads the Animation dialog (i.e about Openshot Project) @author Jonathan Thomas <jonathan@openshot.org> @author Olivier Girard <olivier@openshot.org> @section LICENSE Copyright (c) 2008-2018 OpenShot Studios, LLC (http://www.openshotstudios.com). This file is part of OpenShot Video Editor (http://www.openshot.org), an open-source project dedicated to delivering high quality video editing and animation solutions to the world. OpenShot Video Editor is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. OpenShot Video Editor is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with OpenShot Library. If not, see <http://www.gnu.org/licenses/>. """ import os from classes import info, ui_util from classes.app import get_app from classes.metrics import track_metric_screen from PyQt5.QtWidgets import QDialog class Animation(QDialog): """Animation Dialog""" ui_path = os.path.join(info.PATH, "windows", "ui", "animation.ui") def __init__(self): # Create dialog class QDialog.__init__(self) # Load UI from designer ui_util.load_ui(self, self.ui_path) # Init Ui ui_util.init_ui(self) # get translations self.app = get_app() _ = self.app._tr # Track metrics track_metric_screen("animation-screen")
context
combine
# -*- coding: utf-8 -*- # # Copyright (C) 2013 by Ihor E. Novikov # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. from sk1.resources import pdids from .base import ActionCtxPlugin class CombinePlugin(ActionCtxPlugin): name = "CombinePlugin" ids = [pdids.ID_COMBINE, pdids.ID_BREAK_APART] class GroupPlugin(ActionCtxPlugin): name = "GroupPlugin" ids = [pdids.ID_GROUP, pdids.ID_UNGROUP, pdids.ID_UNGROUPALL] class ToCurvePlugin(ActionCtxPlugin): name = "ToCurvePlugin" ids = [ pdids.ID_TO_CURVES, ]
TableModelWithSearch
FilteringArrayController
# # FilteringArrayController.py # TableModelWithSearch # # Created by Bill Bumgarner on Sun Apr 04 2004. # Copyright (c) 2004 __MyCompanyName__. All rights reserved. # import re from Cocoa import * kLiteralSearch = u'Literal Search' kRegularExpressionSearch = u'Regular Expression Search' def regexForSearchString(searchString, searchType): if not searchString: return None searchString = searchString.strip() if searchType == kLiteralSearch: searchString = re.escape(searchString.strip()) + ur'(?i)' return re.compile(searchString) def dictValueFilter(dicts, regex): for dct in dicts: for value in dct.itervalues(): print value if regex.search(value): yield dct break class FilteringArrayController (NSArrayController): searchString = None lastRegex = None searchType = kLiteralSearch def arrangeObjects_(self, objects): supermethod = super(FilteringArrayController, self).arrangeObjects_ try: regex = regexForSearchString(self.searchString, self.searchType) except: regex = self.lastRegex self.lastRegex = regex if regex is None: return supermethod(objects) return supermethod(list(dictValueFilter(objects, regex))) @objc.IBAction def performSearch_(self, sender): self.searchString = sender.stringValue() self.rearrangeObjects() @objc.IBAction def changeSearchType_(self, searchType): self.lastRegex = None self.searchString = None self.searchType = searchType self.rearrangeObjects()
utils
invariants
"""Functions to register auxiliary functions on a class' methods to check for invariants. This is intended to be used in a test, whereby your test will setup a class to automatically run invariant verification functions before and after each function call, to ensure some extra sanity checks that wouldn't be used in non-tests. Example: Instrument the Inventory class with the check_inventory_invariants() function. def setUp(module): instrument_invariants(Inventory, check_inventory_invariants, check_inventory_invariants) def tearDown(module): uninstrument_invariants(Inventory) """ __copyright__ = "Copyright (C) 2015-2017 Martin Blais" __license__ = "GNU GPLv2" import types def invariant_check(method, prefun, postfun): """Decorate a method with the pre/post invariant checkers. Args: method: An unbound method to instrument. prefun: A function that checks invariants pre-call. postfun: A function that checks invariants post-call. Returns: An unbound method, decorated. """ reentrant = [] def new_method(self, *args, **kw): reentrant.append(None) if len(reentrant) == 1: prefun(self) result = method(self, *args, **kw) if len(reentrant) == 1: postfun(self) reentrant.pop() return result return new_method def instrument_invariants(klass, prefun, postfun): """Instrument the class 'klass' with pre/post invariant checker functions. Args: klass: A class object, whose methods to be instrumented. prefun: A function that checks invariants pre-call. postfun: A function that checks invariants pre-call. """ instrumented = {} for attrname, object_ in klass.__dict__.items(): if attrname.startswith("_"): continue if not isinstance(object_, types.FunctionType): continue instrumented[attrname] = object_ setattr(klass, attrname, invariant_check(object_, prefun, postfun)) klass.__instrumented = instrumented def uninstrument_invariants(klass): """Undo the instrumentation for invariants. Args: klass: A class object, whose methods to be uninstrumented. """ instrumented = getattr(klass, "__instrumented", None) if instrumented: for attrname, object_ in instrumented.items(): setattr(klass, attrname, object_) del klass.__instrumented
ops
pad
"""Automatic padding of gaps between entries. """ __copyright__ = "Copyright (C) 2013-2016 Martin Blais" __license__ = "GNU GPLv2" import collections from beancount.core import ( account, amount, data, flags, inventory, position, realization, ) from beancount.ops import balance from beancount.utils import misc_utils __plugins__ = ("pad",) PadError = collections.namedtuple("PadError", "source message entry") def pad(entries, options_map): """Insert transaction entries for to fulfill a subsequent balance check. Synthesize and insert Transaction entries right after Pad entries in order to fulfill checks in the padded accounts. Returns a new list of entries. Note that this doesn't pad across parent-child relationships, it is a very simple kind of pad. (I have found this to be sufficient in practice, and simpler to implement and understand.) Furthermore, this pads for a single currency only, that is, balance checks are specified only for one currency at a time, and pads will only be inserted for those currencies. Args: entries: A list of directives. options_map: A parser options dict. Returns: A new list of directives, with Pad entries inserted, and a list of new errors produced. """ pad_errors = [] # Find all the pad entries and group them by account. pads = list(misc_utils.filter_type(entries, data.Pad)) pad_dict = misc_utils.groupby(lambda x: x.account, pads) # Partially realize the postings, so we can iterate them by account. by_account = realization.postings_by_account(entries) # A dict of pad -> list of entries to be inserted. new_entries = {id(pad): [] for pad in pads} # Process each account that has a padding group. for account_, pad_list in sorted(pad_dict.items()): # Last encountered / currency active pad entry. active_pad = None # Gather all the postings for the account and its children. postings = [] is_child = account.parent_matcher(account_) for item_account, item_postings in by_account.items(): if is_child(item_account): postings.extend(item_postings) postings.sort(key=data.posting_sortkey) # A set of currencies already padded so far in this account. padded_lots = set() pad_balance = inventory.Inventory() for entry in postings: assert not isinstance(entry, data.Posting) if isinstance(entry, data.TxnPosting): # This is a transaction; update the running balance for this # account. pad_balance.add_position(entry.posting) elif isinstance(entry, data.Pad): if entry.account == account_: # Mark this newly encountered pad as active and allow all lots # to be padded heretofore. active_pad = entry padded_lots = set() elif isinstance(entry, data.Balance): check_amount = entry.amount # Compare the current balance amount to the expected one from # the check entry. IMPORTANT: You need to understand that this # does not check a single position, but rather checks that the # total amount for a particular currency (which itself is # distinct from the cost). balance_amount = pad_balance.get_currency_units(check_amount.currency) diff_amount = amount.sub(balance_amount, check_amount) # Use the specified tolerance or automatically infer it. tolerance = balance.get_balance_tolerance(entry, options_map) if abs(diff_amount.number) > tolerance: # The check fails; we need to pad. # Pad only if pad entry is active and we haven't already # padded that lot since it was last encountered. if active_pad and (check_amount.currency not in padded_lots): # Note: we decide that it's an error to try to pad # positions at cost; we check here that all the existing # positions with that currency have no cost. positions = [ pos for pos in pad_balance.get_positions() if pos.units.currency == check_amount.currency ] for position_ in positions: if position_.cost is not None: pad_errors.append( PadError( entry.meta, ( "Attempt to pad an entry with cost for " "balance: {}".format(pad_balance) ), active_pad, ) ) # Thus our padding lot is without cost by default. diff_position = position.Position.from_amounts( amount.Amount( check_amount.number - balance_amount.number, check_amount.currency, ) ) # Synthesize a new transaction entry for the difference. narration = ( "(Padding inserted for Balance of {} for " "difference {})" ).format(check_amount, diff_position) new_entry = data.Transaction( active_pad.meta.copy(), active_pad.date, flags.FLAG_PADDING, None, narration, data.EMPTY_SET, data.EMPTY_SET, [], ) new_entry.postings.append( data.Posting( active_pad.account, diff_position.units, diff_position.cost, None, None, None, ) ) neg_diff_position = -diff_position new_entry.postings.append( data.Posting( active_pad.source_account, neg_diff_position.units, neg_diff_position.cost, None, None, None, ) ) # Save it for later insertion after the active pad. new_entries[id(active_pad)].append(new_entry) # Fixup the running balance. pos, _ = pad_balance.add_position(diff_position) if pos is not None and pos.is_negative_at_cost(): raise ValueError( "Position held at cost goes negative: {}".format(pos) ) # Mark this lot as padded. Further checks should not pad this lot. padded_lots.add(check_amount.currency) # Insert the newly created entries right after the pad entries that created them. padded_entries = [] for entry in entries: padded_entries.append(entry) if isinstance(entry, data.Pad): entry_list = new_entries[id(entry)] if entry_list: padded_entries.extend(entry_list) else: # Generate errors on unused pad entries. pad_errors.append(PadError(entry.meta, "Unused Pad entry", entry)) return padded_entries, pad_errors
lexers
http
import re import pygments from httpie.output.lexers.common import precise RE_STATUS_LINE = re.compile(r"(\d{3})( +)?(.+)?") STATUS_TYPES = { "1": pygments.token.Number.HTTP.INFO, "2": pygments.token.Number.HTTP.OK, "3": pygments.token.Number.HTTP.REDIRECT, "4": pygments.token.Number.HTTP.CLIENT_ERR, "5": pygments.token.Number.HTTP.SERVER_ERR, } RESPONSE_TYPES = { "GET": pygments.token.Name.Function.HTTP.GET, "HEAD": pygments.token.Name.Function.HTTP.HEAD, "POST": pygments.token.Name.Function.HTTP.POST, "PUT": pygments.token.Name.Function.HTTP.PUT, "PATCH": pygments.token.Name.Function.HTTP.PATCH, "DELETE": pygments.token.Name.Function.HTTP.DELETE, } def http_response_type(lexer, match, ctx): status_match = RE_STATUS_LINE.match(match.group()) if status_match is None: return None status_code, text, reason = status_match.groups() status_type = precise( lexer, STATUS_TYPES.get(status_code[0]), pygments.token.Number ) groups = pygments.lexer.bygroups(status_type, pygments.token.Text, status_type) yield from groups(lexer, status_match, ctx) def request_method(lexer, match, ctx): response_type = precise( lexer, RESPONSE_TYPES.get(match.group()), pygments.token.Name.Function ) yield match.start(), response_type, match.group() class SimplifiedHTTPLexer(pygments.lexer.RegexLexer): """Simplified HTTP lexer for Pygments. It only operates on headers and provides a stronger contrast between their names and values than the original one bundled with Pygments (:class:`pygments.lexers.text import HttpLexer`), especially when Solarized color scheme is used. """ name = "HTTP" aliases = ["http"] filenames = ["*.http"] tokens = { "root": [ # Request-Line ( r"([A-Z]+)( +)([^ ]+)( +)(HTTP)(/)(\d+\.\d+)", pygments.lexer.bygroups( request_method, pygments.token.Text, pygments.token.Name.Namespace, pygments.token.Text, pygments.token.Keyword.Reserved, pygments.token.Operator, pygments.token.Number, ), ), # Response Status-Line ( r"(HTTP)(/)(\d+\.\d+)( +)(.+)", pygments.lexer.bygroups( pygments.token.Keyword.Reserved, # 'HTTP' pygments.token.Operator, # '/' pygments.token.Number, # Version pygments.token.Text, http_response_type, # Status code and Reason ), ), # Header ( r"(.*?)( *)(:)( *)(.+)", pygments.lexer.bygroups( pygments.token.Name.Attribute, # Name pygments.token.Text, pygments.token.Operator, # Colon pygments.token.Text, pygments.token.String, # Value ), ), ] }
commands
sync_replicated_schema
import logging import re from collections import defaultdict from typing import Dict, Set import structlog from django.conf import settings from django.core.management.base import BaseCommand from posthog.clickhouse.schema import CREATE_TABLE_QUERIES, get_table_name from posthog.client import sync_execute from posthog.cloud_utils import is_cloud logger = structlog.get_logger(__name__) logger.setLevel(logging.INFO) TableName = str Query = str HostName = str class Command(BaseCommand): help = "Synchronize schema across clickhouse cluster, creating missing tables on new nodes" def add_arguments(self, parser): parser.add_argument( "--dry-run", action="store_true", help="Exits with a non-zero status if schema changes would be required.", ) def handle(self, *args, **options): if is_cloud(): logger.info("✅ Skipping sync_replicated_schema because is_cloud=true") return _, create_table_queries, out_of_sync_hosts = self.analyze_cluster_tables() if len(out_of_sync_hosts) > 0: logger.info( "Schema out of sync on some clickhouse nodes!", out_of_sync_hosts=out_of_sync_hosts, ) if options.get("dry_run"): exit(1) else: self.create_missing_tables(out_of_sync_hosts, create_table_queries) logger.info("✅ All ClickHouse nodes schema in sync") def analyze_cluster_tables(self): table_names = list(map(get_table_name, CREATE_TABLE_QUERIES)) rows = sync_execute( """ SELECT hostName() as host, name, create_table_query FROM clusterAllReplicas(%(cluster)s, system, tables) WHERE database = %(database)s AND name IN %(table_names)s """, { "cluster": settings.CLICKHOUSE_CLUSTER, "database": settings.CLICKHOUSE_DATABASE, "table_names": table_names, }, ) host_tables: Dict[HostName, Set[TableName]] = defaultdict(set) create_table_queries: Dict[TableName, Query] = {} for host, table_name, create_table_query in rows: host_tables[host].add(table_name) create_table_queries[table_name] = create_table_query return ( host_tables, create_table_queries, self.get_out_of_sync_hosts(host_tables), ) def get_out_of_sync_hosts( self, host_tables: Dict[HostName, Set[TableName]] ) -> Dict[HostName, Set[TableName]]: table_names = list(map(get_table_name, CREATE_TABLE_QUERIES)) out_of_sync = {} for host, tables in host_tables.items(): missing_tables = set(table_names) - tables if len(missing_tables) > 0: out_of_sync[host] = missing_tables return out_of_sync def create_missing_tables( self, out_of_sync_hosts: Dict[HostName, Set[TableName]], create_table_queries: Dict[TableName, Query], ): missing_tables = set( table for tables in out_of_sync_hosts.values() for table in tables ) logger.info("Creating missing tables", missing_tables=missing_tables) for table in missing_tables: query = create_table_queries[table] sync_execute(self.run_on_cluster(query)) def run_on_cluster(self, create_table_query: Query) -> Query: return re.sub( r"^CREATE TABLE (\S+)", f"CREATE TABLE IF NOT EXISTS \\1 ON CLUSTER '{settings.CLICKHOUSE_CLUSTER}'", create_table_query, 1, )
v22-webassets
plugin
# encoding: utf-8 import ckan.plugins as plugins import ckan.plugins.toolkit as toolkit from ckan.common import CKANConfig def most_popular_groups(): """Return a sorted list of the groups with the most datasets.""" # Get a list of all the site's groups from CKAN, sorted by number of # datasets. groups = toolkit.get_action("group_list")( {}, {"sort": "package_count desc", "all_fields": True} ) # Truncate the list to the 10 most popular groups only. groups = groups[:10] return groups class ExampleThemePlugin(plugins.SingletonPlugin): """An example theme plugin.""" plugins.implements(plugins.IConfigurer) # Declare that this plugin will implement ITemplateHelpers. plugins.implements(plugins.ITemplateHelpers) def update_config(self, config: CKANConfig): # Add this plugin's templates dir to CKAN's extra_template_paths, so # that CKAN will use this plugin's custom templates. toolkit.add_template_directory(config, "templates") # Add this plugin's public dir to CKAN's extra_public_paths, so # that CKAN will use this plugin's custom static files. toolkit.add_public_directory(config, "public") # Register this plugin's assets directory with CKAN. # Here, 'assets' is the path to the webassets directory # (relative to this plugin.py file), and 'example_theme' is the name # that we'll use to refer to this assets directory from CKAN # templates. toolkit.add_resource("assets", "example_theme") def get_helpers(self): """Register the most_popular_groups() function above as a template helper function. """ # Template helper function names should begin with the name of the # extension they belong to, to avoid clashing with functions from # other extensions. return {"example_theme_most_popular_groups": most_popular_groups}
metadata
mod
# Copyright (C) 2008-2010 Adam Olsen # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # # The developers of the Exaile media player hereby grant permission # for non-GPL compatible GStreamer and Exaile plugins to be used and # distributed together with GStreamer and Exaile. This permission is # above and beyond the permissions granted by the GPL license by which # Exaile is covered. If you modify this code, you may extend this # exception to your version of the code, but you are not obligated to # do so. If you do not wish to do so, delete this exception statement # from your version. import logging import os from xl.metadata._base import BaseFormat logger = logging.getLogger(__name__) try: import ctypes modplug = ctypes.cdll.LoadLibrary("libmodplug.so.1") modplug.ModPlug_Load.restype = ctypes.c_void_p modplug.ModPlug_Load.argtypes = (ctypes.c_void_p, ctypes.c_int) modplug.ModPlug_GetName.restype = ctypes.c_char_p modplug.ModPlug_GetName.argtypes = (ctypes.c_void_p,) modplug.ModPlug_GetLength.restype = ctypes.c_int modplug.ModPlug_GetLength.argtypes = (ctypes.c_void_p,) except (ImportError, OSError): logger.debug("No support for Mod metadata because libmodplug could not be found.") modplug = None class ModFormat(BaseFormat): writable = False def load(self): if modplug: data = open(self.loc, "rb").read() f = modplug.ModPlug_Load(data, len(data)) if f: name = modplug.ModPlug_GetName(f) or os.path.split(self.loc)[-1] length = modplug.ModPlug_GetLength(f) / 1000.0 or -1 self.mutagen = {"title": name, "__length": length} else: self.mutagen = {} def get_length(self): try: return self.mutagen["__length"] except KeyError: return -1 def get_bitrate(self): return -1 # vim: et sts=4 sw=4
gtk3
filtertreeview
# # Copyright (C) 2008 Martijn Voncken <mvoncken@gmail.com> # 2008 Andrew Resch <andrewresch@gmail.com> # 2014 Calum Lind <calumlind@gmail.com> # # This file is part of Deluge and is licensed under GNU General Public License 3.0, or later, with # the additional special exception to link portions of this program with the OpenSSL library. # See LICENSE for more details. # import logging import os import warnings import deluge.component as component from deluge.common import TORRENT_STATE, decode_bytes, resource_filename from deluge.configmanager import ConfigManager from deluge.ui.client import client from gi.repository import Gtk from gi.repository.GdkPixbuf import Pixbuf from gi.repository.Pango import EllipsizeMode from .common import get_pixbuf log = logging.getLogger(__name__) STATE_PIX = { "All": "all", "Downloading": "downloading", "Seeding": "seeding", "Paused": "inactive", "Checking": "checking", "Queued": "queued", "Error": "alert", "Active": "active", "Allocating": "checking", "Moving": "checking", } TRACKER_PIX = {"All": "tracker_all", "Error": "tracker_warning"} FILTER_COLUMN = 5 class FilterTreeView(component.Component): def __init__(self): component.Component.__init__(self, "FilterTreeView", interval=2) self.config = ConfigManager("gtk3ui.conf") self.tracker_icons = component.get("TrackerIcons") self.sidebar = component.get("SideBar") self.treeview = Gtk.TreeView() self.sidebar.add_tab(self.treeview, "filters", "Filters") # set filter to all when hidden: self.sidebar.notebook.connect("hide", self._on_hide) # Create the treestore # cat, value, label, count, pixmap, visible self.treestore = Gtk.TreeStore(str, str, str, int, Pixbuf, bool) # Create the column and cells column = Gtk.TreeViewColumn("Filters") column.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE) # icon cell self.cell_pix = Gtk.CellRendererPixbuf() column.pack_start(self.cell_pix, expand=False) column.add_attribute(self.cell_pix, "pixbuf", 4) # label cell cell_label = Gtk.CellRendererText() cell_label.set_property("ellipsize", EllipsizeMode.END) column.pack_start(cell_label, expand=True) column.set_cell_data_func(cell_label, self.render_cell_data, None) # count cell self.cell_count = Gtk.CellRendererText() self.cell_count.set_property("xalign", 1.0) self.cell_count.set_padding(3, 0) column.pack_start(self.cell_count, expand=False) self.treeview.append_column(column) # Style self.treeview.set_show_expanders(True) self.treeview.set_headers_visible(False) self.treeview.set_level_indentation(-21) # Force theme to use expander-size so we don't cut out entries due to indentation hack. provider = Gtk.CssProvider() provider.load_from_data(b"* {-GtkTreeView-expander-size: 9;}") context = self.treeview.get_style_context() context.add_provider(provider, Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION) self.treeview.set_model(self.treestore) self.treeview.get_selection().connect("changed", self.on_selection_changed) self.create_model_filter() self.treeview.connect("button-press-event", self.on_button_press_event) # filtertree menu builder = Gtk.Builder() builder.add_from_file( resource_filename(__package__, os.path.join("glade", "filtertree_menu.ui")) ) self.menu = builder.get_object("filtertree_menu") builder.connect_signals(self) self.default_menu_items = self.menu.get_children() # add Cat nodes: self.cat_nodes = {} self.filters = {} def start(self): self.cat_nodes = {} self.filters = {} # initial order of state filter: self.cat_nodes["state"] = self.treestore.append( None, ["cat", "state", _("States"), 0, None, False] ) for state in ["All", "Active"] + TORRENT_STATE: self.update_row("state", state, 0, _(state)) self.cat_nodes["tracker_host"] = self.treestore.append( None, ["cat", "tracker_host", _("Trackers"), 0, None, False] ) self.update_row("tracker_host", "All", 0, _("All")) self.update_row("tracker_host", "Error", 0, _("Error")) self.update_row("tracker_host", "", 0, _("None")) self.cat_nodes["owner"] = self.treestore.append( None, ["cat", "owner", _("Owner"), 0, None, False] ) self.update_row("owner", "localclient", 0, _("Admin")) self.update_row("owner", "", 0, _("None")) # We set to this expand the rows on start-up self.expand_rows = True self.selected_path = None def stop(self): self.treestore.clear() def create_model_filter(self): self.model_filter = self.treestore.filter_new() self.model_filter.set_visible_column(FILTER_COLUMN) self.treeview.set_model(self.model_filter) def cb_update_filter_tree(self, filter_items): # create missing cat_nodes for cat in filter_items: if cat not in self.cat_nodes: label = _(cat) if cat == "label": label = _("Labels") self.cat_nodes[cat] = self.treestore.append( None, ["cat", cat, label, 0, None, False] ) # update rows visible_filters = [] for cat, filters in filter_items.items(): for value, count in filters: self.update_row(cat, value, count) visible_filters.append((cat, value)) # hide root-categories not returned by core-part of the plugin. for cat in self.cat_nodes: self.treestore.set_value( self.cat_nodes[cat], FILTER_COLUMN, True if cat in filter_items else False, ) # hide items not returned by core-plugin. for f in self.filters: if f not in visible_filters: self.treestore.set_value(self.filters[f], FILTER_COLUMN, False) if self.expand_rows: self.treeview.expand_all() self.expand_rows = False if not self.selected_path: self.select_default_filter() def update_row(self, cat, value, count, label=None): def on_get_icon(icon): if icon: self.set_row_image(cat, value, icon.get_filename()) if (cat, value) in self.filters: row = self.filters[(cat, value)] self.treestore.set_value(row, 3, count) else: pix = self.get_pixmap(cat, value) if value == "": if cat == "label": label = _("No Label") elif cat == "owner": label = _("No Owner") elif not label and value: label = _(value) row = self.treestore.append( self.cat_nodes[cat], [cat, value, label, count, pix, True] ) self.filters[(cat, value)] = row if cat == "tracker_host" and value not in ("All", "Error") and value: d = self.tracker_icons.fetch(value) d.addCallback(on_get_icon) self.treestore.set_value(row, FILTER_COLUMN, True) return row def render_cell_data(self, column, cell, model, row, data): cat = model.get_value(row, 0) label = decode_bytes(model.get_value(row, 2)) count = model.get_value(row, 3) # Suppress Warning: g_object_set_qdata: assertion `G_IS_OBJECT (object)' failed original_filters = warnings.filters[:] warnings.simplefilter("ignore") try: pix = model.get_value(row, 4) finally: warnings.filters = original_filters self.cell_pix.set_property("visible", True if pix else False) if cat == "cat": self.cell_count.set_property("visible", False) cell.set_padding(10, 2) label = "<b>%s</b>" % label else: count_txt = "<small>%s</small>" % count self.cell_count.set_property("markup", count_txt) self.cell_count.set_property("visible", True) cell.set_padding(2, 1) cell.set_property("markup", label) def get_pixmap(self, cat, value): pix = None if cat == "state": pix = STATE_PIX.get(value, None) elif cat == "tracker_host": pix = TRACKER_PIX.get(value, None) if pix: return get_pixbuf("%s16.png" % pix) def set_row_image(self, cat, value, filename): pix = get_pixbuf(filename, size=16) row = self.filters[(cat, value)] self.treestore.set_value(row, 4, pix) return False def on_selection_changed(self, selection): try: (model, row) = self.treeview.get_selection().get_selected() if not row: log.debug("nothing selected") return cat = model.get_value(row, 0) value = model.get_value(row, 1) filter_dict = {cat: [value]} if value == "All" or cat == "cat": filter_dict = {} component.get("TorrentView").set_filter(filter_dict) self.selected_path = model.get_path(row) except Exception as ex: log.debug(ex) # paths is likely None .. so lets return None return None def update(self): try: hide_cat = [] if not self.config["sidebar_show_trackers"]: hide_cat.append("tracker_host") if not self.config["sidebar_show_owners"]: hide_cat.append("owner") client.core.get_filter_tree( self.config["sidebar_show_zero"], hide_cat ).addCallback(self.cb_update_filter_tree) except Exception as ex: log.debug(ex) # Callbacks # def on_button_press_event(self, widget, event): """This is a callback for showing the right-click context menu.""" x, y = event.get_coords() path = self.treeview.get_path_at_pos(int(x), int(y)) if not path: return path = path[0] cat = self.model_filter[path][0] if event.button == 1: # Prevent selecting a category label if cat == "cat": if self.treeview.row_expanded(path): self.treeview.collapse_row(path) else: self.treeview.expand_row(path, False) if not self.selected_path: self.select_default_filter() else: self.treeview.get_selection().select_path(self.selected_path) return True elif event.button == 3: # assign current cat, value to self: x, y = event.get_coords() path = self.treeview.get_path_at_pos(int(x), int(y)) if not path: return row = self.model_filter.get_iter(path[0]) self.cat = self.model_filter.get_value(row, 0) self.value = self.model_filter.get_value(row, 1) self.count = self.model_filter.get_value(row, 3) # Show the pop-up menu self.set_menu_sensitivity() self.menu.hide() self.menu.popup(None, None, None, None, event.button, event.time) self.menu.show() if cat == "cat": # Do not select the row return True def set_menu_sensitivity(self): # select-all/pause/resume sensitive = self.cat != "cat" and self.count != 0 for item in self.default_menu_items: item.set_sensitive(sensitive) def select_all(self): """For use in popup menu.""" component.get("TorrentView").treeview.get_selection().select_all() def on_select_all(self, event): self.select_all() def on_pause_all(self, event): self.select_all() func = getattr(component.get("MenuBar"), "on_menuitem_%s_activate" % "pause") func(event) def on_resume_all(self, event): self.select_all() func = getattr(component.get("MenuBar"), "on_menuitem_%s_activate" % "resume") func(event) def _on_hide(self, *args): self.select_default_filter() def select_default_filter(self): row = self.filters[("state", "All")] path = self.treestore.get_path(row) self.treeview.get_selection().select_path(path)
windows
release
# Run this script from Windows, not MSYS2 import json import os import shutil import subprocess CWD = os.path.abspath( os.path.join( os.path.dirname(__file__), "..", ), ) os.chdir(CWD) with open("meta.json") as f: meta = json.load(f) MAJOR_VERSION = meta["version"]["major"] MINOR_VERSION = meta["version"]["minor"] if os.path.isdir("dist/stargate"): shutil.rmtree("dist/stargate") print("Running Pyinstaller") subprocess.check_call(["pyinstaller", "pyinstaller-windows-onedir.spec"]) TEMPLATE = r""" !define PRODUCT_NAME "stargate" !define PRODUCT_VERSION "{MAJOR_VERSION_NUM}.0" !define PRODUCT_PUBLISHER "stargatedaw" ;Require admin rights on NT6+ (When UAC is turned on) RequestExecutionLevel admin SetCompressor /SOLID lzma Name "Stargate DAW {MINOR_VERSION}" OutFile "dist\StargateDAW-{MINOR_VERSION}-win64-installer.exe" InstallDir "$PROGRAMFILES64\stargatedaw@github\Stargate" ;-------------------------------- ;Interface Settings !define MUI_ABORTWARNING !define MUI_LICENSEPAGE_CHECKBOX !define MUI_FINISHPAGE_RUN "$INSTDIR\program\{MAJOR_VERSION}.exe" !define MUI_STARTMENUPAGE_DEFAULTFOLDER "Stargate DAW" !include MUI2.nsh !include WinVer.nsh !include x64.nsh ;-------------------------------- ;Modern UI Configuration ;Installer pages !insertmacro MUI_PAGE_WELCOME !insertmacro MUI_PAGE_LICENSE "windows\gpl-3.0.txt" !insertmacro MUI_PAGE_COMPONENTS !insertmacro MUI_PAGE_DIRECTORY !insertmacro MUI_PAGE_INSTFILES !insertmacro MUI_PAGE_FINISH ;Uninstaller pages !insertmacro MUI_UNPAGE_WELCOME !insertmacro MUI_UNPAGE_CONFIRM ;!insertmacro MUI_UNPAGE_LICENSE textfile ;!insertmacro MUI_UNPAGE_COMPONENTS !insertmacro MUI_UNPAGE_DIRECTORY !insertmacro MUI_UNPAGE_INSTFILES !insertmacro MUI_UNPAGE_FINISH ;-------------------------------- ;Languages !insertmacro MUI_LANGUAGE "English" Function .onInit ${{IfNot}} ${{AtLeastWin10}} MessageBox MB_OK "Windows 10 or later required, 64 bit only" Quit ${{EndIf}} ${{IfNot}} ${{RunningX64}} MessageBox MB_OK "64 bit Windows 10 or later required, 32 bit is not supported" Quit ${{EndIf}} FunctionEnd ;Section ; UserInfo::getAccountType ; Pop $0 ; ; # compare the result with the string "Admin" to see if the user is admin. ; # If match, jump 3 lines down. ; StrCmp $0 "Admin" +3 ; ; # if there is not a match, print message and return ; MessageBox MB_OK "not admin: $0" ; Return ;SectionEnd Section "Base Install" SEC01 SectionIn RO SetOutPath $INSTDIR writeUninstaller "$INSTDIR\uninstall.exe" ; Clean up the old legacy file structure ; TODO: Remove this in mid 2023 RMDir /r "$PROGRAMFILES64\stargateaudio@github\Stargate\program" Delete "$PROGRAMFILES64\stargateaudio@github\Stargate\uninstall.exe" ; Only if empty RMDir "$PROGRAMFILES64\stargateaudio@github\Stargate" RMDir "$PROGRAMFILES64\stargateaudio@github" ; Delete the old program RMDir /r $INSTDIR\program ; Install the program CreateDirectory $INSTDIR\program SetOutPath $INSTDIR\program File /r "dist\stargate\" File "files\share\pixmaps\{MAJOR_VERSION}.ico" ; Add to the "Add or remove programs" dialog WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\StargateDAW" \ "DisplayName" "Stargate DAW" WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\StargateDAW" \ "DisplayIcon" "$\"$INSTDIR\program\files\share\pixmaps\stargate.ico$\"" WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\StargateDAW" \ "UninstallString" "$\"$INSTDIR\uninstall.exe$\"" SectionEnd Section "Start Menu Shortcut" SEC02 createShortCut \ "$SMPROGRAMS\Stargate DAW.lnk" \ "$INSTDIR\program\{MAJOR_VERSION}.exe" \ "" \ "$INSTDIR\program\{MAJOR_VERSION}.ico" SectionEnd Section /o "Portable Flash Drive Install" SEC03 SetOutPath $INSTDIR ; Create the shortcut to the executable File windows\LaunchStargate.cmd SetOutPath $INSTDIR\program ; The exe looks for this empty file to choose the Stargate home folder FileOpen $9 ..\_stargate_home w FileWrite $9 "This file tells Stargate it is a portable install." FileClose $9 SectionEnd LangString DESC_SEC03 ${{LANG_ENGLISH}} "Store settings and projects in the same folder as the executable. Only use this if you are installing to a flash drive, and you must change the install folder to the flash drive in the next step." !insertmacro MUI_FUNCTION_DESCRIPTION_BEGIN !insertmacro MUI_DESCRIPTION_TEXT ${{SEC03}} $(DESC_SEC03) !insertmacro MUI_FUNCTION_DESCRIPTION_END Section "uninstall" ; We do not delete settings, projects or any other files the user may have ; stored next to the application, only the application itself RMDir /r $INSTDIR\program Delete "$SMPROGRAMS\Stargate DAW.lnk" DeleteRegKey HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\StargateDAW" SectionEnd """ NSIS = r"C:\Program Files (x86)\NSIS\Bin\makensis.exe" template = TEMPLATE.format( MINOR_VERSION=MINOR_VERSION, MAJOR_VERSION=MAJOR_VERSION, MAJOR_VERSION_NUM=MAJOR_VERSION[-1], ) template_name = "{0}.nsi".format(MAJOR_VERSION) with open(template_name, "w") as f: f.write(template) print("Running NSIS") subprocess.check_call([NSIS, template_name])
ConfidentialStamper
ConfidentialStamper
""" Add a watermark to all pages in a PDF document """ import math import os import sys from Foundation import * from Quartz import * def usage(name): print >> sys.stderr, "Usage %s [inputfile]" % (name,) class MyPDFData(object): pdfDoc = None mediaRect = None # This is a simple function to create a CFURLRef from # a path to a file. The path can be relative to the # current directory or an absolute path. def createURL(path): return CFURLCreateFromFileSystemRepresentation(None, path, len(path), False) # For the supplied URL and media box, create a PDF context # that creates a PDF file at that URL and uses supplied rect # as its document media box. def myCreatePDFContext(url, mediaBox): dict = {} dict[kCGPDFContextCreator] = "PDF Stamper Application" pdfContext = CGPDFContextCreateWithURL(url, mediaBox, dict) return pdfContext # For a URL corresponding to an existing PDF document on disk, # create a CGPDFDocumentRef and obtain the media box of the first # page. def myCreatePDFSourceDocument(url): myPDFData = MyPDFData() myPDFData.pdfDoc = CGPDFDocumentCreateWithURL(url) if myPDFData.pdfDoc is not None: # NOTE: the original code uses CGPDFDocumentGetMediaBox, but that # API is deprecated and doesn't work in Leopard. page = CGPDFDocumentGetPage(myPDFData.pdfDoc, 1) myPDFData.mediaRect = CGPDFPageGetBoxRect(page, kCGPDFMediaBox) # Make the media rect origin at 0,0. myPDFData.mediaRect.origin.x = myPDFData.mediaRect.origin.y = 0.0 return myPDFData # Draw the source PDF document into the context and then draw the stamp PDF document # on top of it. When drawing the stamp on top, place it along the diagonal from the lower # left corner to the upper right corner and center its media rect to the center of that # diagonal. def StampWithPDFDocument(context, sourcePDFDoc, stampFileDoc, stampMediaRect): numPages = CGPDFDocumentGetNumberOfPages(sourcePDFDoc) # Loop over document pages and stamp each one appropriately. for i in range(1, numPages + 1): # Use the page rectangle of each page from the source to compute # the destination media box for each page and the location of # the stamp. # NOTE: the original code uses CGPDFDocumentGetMediaBox, but that # API is deprecated and doesn't work in Leopard. page = CGPDFDocumentGetPage(sourcePDFDoc, i) pageRect = CGPDFPageGetBoxRect(page, kCGPDFMediaBox) CGContextBeginPage(context, pageRect) CGContextSaveGState(context) # Clip to the media box of the page. CGContextClipToRect(context, pageRect) # First draw the content of the source document. CGContextDrawPDFDocument(context, pageRect, sourcePDFDoc, i) # Translate to center of destination rect, that is the center of # the media box of content to draw on top of. CGContextTranslateCTM( context, pageRect.size.width / 2, pageRect.size.height / 2 ) # Compute angle of the diagonal across the destination page. angle = math.atan(pageRect.size.height / pageRect.size.width) # Rotate by an amount so that drawn content goes along a diagonal # axis across the page. CGContextRotateCTM(context, angle) # Move the origin so that the media box of the PDF to stamp # is centered around center point of destination. CGContextTranslateCTM( context, -stampMediaRect.size.width / 2, -stampMediaRect.size.height / 2 ) # Now draw the document to stamp with on top of original content. CGContextDrawPDFDocument(context, stampMediaRect, stampFileDoc, 1) CGContextRestoreGState(context) CGContextEndPage(context) # From an input PDF document and a PDF document whose contents you # want to draw on top of the other, create a new PDF document # containing all the pages of the input document with the first page # of the "stamping" overlayed. def createStampedFileWithFile(inURL, stampURL, outURL): sourceFileData = myCreatePDFSourceDocument(inURL) if sourceFileData.pdfDoc is None: print >> sys.stderr, "Can't create PDFDocumentRef for source input file!" return stampFileData = myCreatePDFSourceDocument(stampURL) if stampFileData.pdfDoc is None: CGPDFDocumentRelease(sourceFileData.pdfDoc) print >> sys.stderr, "Can't create PDFDocumentRef for file to stamp with!" return pdfContext = myCreatePDFContext(outURL, sourceFileData.mediaRect) if pdfContext is None: print >> sys.stderr, "Can't create PDFContext for output file!" return StampWithPDFDocument( pdfContext, sourceFileData.pdfDoc, stampFileData.pdfDoc, stampFileData.mediaRect ) def main(args=None): if args is None: args = sys.argv suffix = ".watermarked.pdf" stampFileName = os.path.join(os.path.dirname(__file__), "confidential.pdf") if len(args) != 2: usage(args[0]) return 1 inputFileName = args[1] outputFileName = os.path.splitext(inputFileName)[0] + suffix inURL = createURL(inputFileName) if inURL is None: print >> sys.stderr, "Couldn't create URL for input file!" return 1 outURL = createURL(outputFileName) if outURL is None: print >> sys.stderr, "Couldn't create URL for output file!" return 1 stampURL = createURL(stampFileName) if stampURL is None: print >> sys.stderr, "Couldn't create URL for stamping file!" return 1 createStampedFileWithFile(inURL, stampURL, outURL) return 0 if __name__ == "__main__": sys.exit(main())
engines
mixcloud
# SPDX-License-Identifier: AGPL-3.0-or-later """ Mixcloud (Music) """ from json import loads from urllib.parse import urlencode from dateutil import parser # about about = { "website": "https://www.mixcloud.com/", "wikidata_id": "Q6883832", "official_api_documentation": "http://www.mixcloud.com/developers/", "use_official_api": True, "require_api_key": False, "results": "JSON", } # engine dependent config categories = ["music"] paging = True # search-url url = "https://api.mixcloud.com/" search_url = url + "search/?{query}&type=cloudcast&limit=10&offset={offset}" embedded_url = ( '<iframe scrolling="no" frameborder="0" allowTransparency="true" ' + 'data-src="https://www.mixcloud.com/widget/iframe/?feed={url}" width="300" height="300"></iframe>' ) # do search-request def request(query, params): offset = (params["pageno"] - 1) * 10 params["url"] = search_url.format(query=urlencode({"q": query}), offset=offset) return params # get response from search-request def response(resp): results = [] search_res = loads(resp.text) # parse results for result in search_res.get("data", []): title = result["name"] url = result["url"] content = result["user"]["name"] embedded = embedded_url.format(url=url) publishedDate = parser.parse(result["created_time"]) # append result results.append( { "url": url, "title": title, "embedded": embedded, "publishedDate": publishedDate, "content": content, } ) # return results return results
ui
ui_options_maintenance
# -*- coding: utf-8 -*- # Automatically generated - don't edit. # Use `python setup.py build_ui` to update it. from PyQt5 import QtCore, QtGui, QtWidgets class Ui_MaintenanceOptionsPage(object): def setupUi(self, MaintenanceOptionsPage): MaintenanceOptionsPage.setObjectName("MaintenanceOptionsPage") MaintenanceOptionsPage.resize(334, 397) self.vboxlayout = QtWidgets.QVBoxLayout(MaintenanceOptionsPage) self.vboxlayout.setObjectName("vboxlayout") self.label = QtWidgets.QLabel(MaintenanceOptionsPage) self.label.setObjectName("label") self.vboxlayout.addWidget(self.label) self.horizontalLayout_3 = QtWidgets.QHBoxLayout() self.horizontalLayout_3.setContentsMargins(-1, -1, -1, 0) self.horizontalLayout_3.setObjectName("horizontalLayout_3") self.config_file = QtWidgets.QLineEdit(MaintenanceOptionsPage) self.config_file.setReadOnly(True) self.config_file.setObjectName("config_file") self.horizontalLayout_3.addWidget(self.config_file) self.open_folder_button = QtWidgets.QToolButton(MaintenanceOptionsPage) self.open_folder_button.setObjectName("open_folder_button") self.horizontalLayout_3.addWidget(self.open_folder_button) self.vboxlayout.addLayout(self.horizontalLayout_3) self.horizontalLayout = QtWidgets.QHBoxLayout() self.horizontalLayout.setContentsMargins(-1, -1, -1, 0) self.horizontalLayout.setObjectName("horizontalLayout") spacerItem = QtWidgets.QSpacerItem( 40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum ) self.horizontalLayout.addItem(spacerItem) self.load_backup_button = QtWidgets.QToolButton(MaintenanceOptionsPage) self.load_backup_button.setObjectName("load_backup_button") self.horizontalLayout.addWidget(self.load_backup_button) self.save_backup_button = QtWidgets.QToolButton(MaintenanceOptionsPage) self.save_backup_button.setObjectName("save_backup_button") self.horizontalLayout.addWidget(self.save_backup_button) self.vboxlayout.addLayout(self.horizontalLayout) self.option_counts = QtWidgets.QLabel(MaintenanceOptionsPage) sizePolicy = QtWidgets.QSizePolicy( QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred ) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.option_counts.sizePolicy().hasHeightForWidth() ) self.option_counts.setSizePolicy(sizePolicy) self.option_counts.setText("") self.option_counts.setObjectName("option_counts") self.vboxlayout.addWidget(self.option_counts) self.enable_cleanup = QtWidgets.QCheckBox(MaintenanceOptionsPage) self.enable_cleanup.setObjectName("enable_cleanup") self.vboxlayout.addWidget(self.enable_cleanup) self.description = QtWidgets.QLabel(MaintenanceOptionsPage) self.description.setText("") self.description.setAlignment( QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignTop ) self.description.setWordWrap(True) self.description.setIndent(0) self.description.setObjectName("description") self.vboxlayout.addWidget(self.description) spacerItem1 = QtWidgets.QSpacerItem( 20, 8, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed ) self.vboxlayout.addItem(spacerItem1) self.line = QtWidgets.QFrame(MaintenanceOptionsPage) self.line.setFrameShape(QtWidgets.QFrame.HLine) self.line.setFrameShadow(QtWidgets.QFrame.Sunken) self.line.setObjectName("line") self.vboxlayout.addWidget(self.line) self.select_all = QtWidgets.QCheckBox(MaintenanceOptionsPage) self.select_all.setObjectName("select_all") self.vboxlayout.addWidget(self.select_all) self.tableWidget = QtWidgets.QTableWidget(MaintenanceOptionsPage) self.tableWidget.setSizeAdjustPolicy( QtWidgets.QAbstractScrollArea.AdjustToContents ) self.tableWidget.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers) self.tableWidget.setColumnCount(2) self.tableWidget.setObjectName("tableWidget") self.tableWidget.setRowCount(0) self.tableWidget.horizontalHeader().setStretchLastSection(True) self.tableWidget.verticalHeader().setVisible(False) self.vboxlayout.addWidget(self.tableWidget) self.retranslateUi(MaintenanceOptionsPage) QtCore.QMetaObject.connectSlotsByName(MaintenanceOptionsPage) def retranslateUi(self, MaintenanceOptionsPage): _translate = QtCore.QCoreApplication.translate self.label.setText(_("Configuration File:")) self.open_folder_button.setText(_("Open folder")) self.load_backup_button.setText(_("Load Backup")) self.save_backup_button.setText(_("Save Backup")) self.enable_cleanup.setText(_("Remove selected options")) self.select_all.setText(_("Select all"))
mylar
cv
# This file is part of Mylar. # # Mylar is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Mylar is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the # implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public # License for more details. # # You should have received a copy of the GNU General Public License # along with Mylar. If not, see <http://www.gnu.org/licenses/>. import os import platform import re import string import sys import time from xml.parsers.expat import ExpatError import httplib import lib.feedparser import logger import mylar import requests import urllib2 from bs4 import BeautifulSoup as Soup def patch_http_response_read(func): def inner(*args): try: return func(*args) except httplib.IncompleteRead, e: return e.partial return inner httplib.HTTPResponse.read = patch_http_response_read(httplib.HTTPResponse.read) if platform.python_version() == "2.7.6": httplib.HTTPConnection._http_vsn = 10 httplib.HTTPConnection._http_vsn_str = "HTTP/1.0" def pulldetails(comicid, type, issueid=None, offset=1, arclist=None, comicidlist=None): # import easy to use xml parser called minidom: from xml.dom.minidom import parseString if mylar.CONFIG.COMICVINE_API == "None" or mylar.CONFIG.COMICVINE_API is None: logger.warn( "You have not specified your own ComicVine API key - it's a requirement. Get your own @ http://api.comicvine.com." ) return else: comicapi = mylar.CONFIG.COMICVINE_API if type == "comic": if not comicid.startswith("4050-"): comicid = "4050-" + comicid PULLURL = ( mylar.CVURL + "volume/" + str(comicid) + "/?api_key=" + str(comicapi) + "&format=xml&field_list=name,count_of_issues,issues,start_year,site_detail_url,image,publisher,description,first_issue,deck,aliases" ) elif type == "issue": if mylar.CONFIG.CV_ONLY: cv_type = "issues" if arclist is None: searchset = ( "filter=volume:" + str(comicid) + "&field_list=cover_date,description,id,image,issue_number,name,date_last_updated,store_date" ) else: searchset = ( "filter=id:" + (arclist) + "&field_list=cover_date,id,issue_number,name,date_last_updated,store_date,volume" ) else: cv_type = "volume/" + str(comicid) searchset = "name,count_of_issues,issues,start_year,site_detail_url,image,publisher,description,store_date" PULLURL = ( mylar.CVURL + str(cv_type) + "/?api_key=" + str(comicapi) + "&format=xml&" + str(searchset) + "&offset=" + str(offset) ) elif any([type == "image", type == "firstissue"]): # this is used ONLY for CV_ONLY PULLURL = ( mylar.CVURL + "issues/?api_key=" + str(comicapi) + "&format=xml&filter=id:" + str(issueid) + "&field_list=cover_date,image" ) elif type == "storyarc": PULLURL = ( mylar.CVURL + "story_arcs/?api_key=" + str(comicapi) + "&format=xml&filter=name:" + str(issueid) + "&field_list=cover_date" ) elif type == "comicyears": PULLURL = ( mylar.CVURL + "volumes/?api_key=" + str(comicapi) + "&format=xml&filter=id:" + str(comicidlist) + "&field_list=name,id,start_year,publisher,description,deck,aliases&offset=" + str(offset) ) elif type == "import": PULLURL = ( mylar.CVURL + "issues/?api_key=" + str(comicapi) + "&format=xml&filter=id:" + (comicidlist) + "&field_list=cover_date,id,issue_number,name,date_last_updated,store_date,volume" + "&offset=" + str(offset) ) elif type == "update_dates": PULLURL = ( mylar.CVURL + "issues/?api_key=" + str(comicapi) + "&format=xml&filter=id:" + (comicidlist) + "&field_list=date_last_updated, id, issue_number, store_date, cover_date, name, volume " + "&offset=" + str(offset) ) # logger.info('CV.PULLURL: ' + PULLURL) # new CV API restriction - one api request / second. if mylar.CONFIG.CVAPI_RATE is None or mylar.CONFIG.CVAPI_RATE < 2: time.sleep(2) else: time.sleep(mylar.CONFIG.CVAPI_RATE) # download the file: # set payload to None for now... payload = None try: r = requests.get( PULLURL, params=payload, verify=mylar.CONFIG.CV_VERIFY, headers=mylar.CV_HEADERS, ) except Exception, e: logger.warn("Error fetching data from ComicVine: %s" % (e)) return # logger.fdebug('cv status code : ' + str(r.status_code)) try: dom = parseString(r.content) except ExpatError: if "<title>Abnormal Traffic Detected" in r.content: logger.error( "ComicVine has banned this server's IP address because it exceeded the API rate limit." ) else: logger.warn( "[WARNING] ComicVine is not responding correctly at the moment. This is usually due to some problems on their end. If you re-try things again in a few moments, things might work" ) return except Exception as e: logger.warn("[ERROR] Error returned from CV: %s" % e) return else: return dom def getComic( comicid, type, issueid=None, arc=None, arcid=None, arclist=None, comicidlist=None ): if type == "issue": offset = 1 issue = {} ndic = [] issuechoice = [] comicResults = [] firstdate = "2099-00-00" # let's find out how many results we get from the query... if comicid is None: # if comicid is None, it's coming from the story arc search results. id = arcid # since the arclist holds the issueids, and the pertinent reading order - we need to strip out the reading order so this works. aclist = "" if arclist.startswith("M"): islist = arclist[1:] else: for ac in arclist.split("|"): aclist += ac[: ac.find(",")] + "|" if aclist.endswith("|"): aclist = aclist[:-1] islist = aclist else: id = comicid islist = None searched = pulldetails(id, "issue", None, 0, islist) if searched is None: return False totalResults = searched.getElementsByTagName("number_of_total_results")[ 0 ].firstChild.wholeText logger.fdebug("there are " + str(totalResults) + " search results...") if not totalResults: return False countResults = 0 while countResults < int(totalResults): logger.fdebug( "querying range from " + str(countResults) + " to " + str(countResults + 100) ) if countResults > 0: # new api - have to change to page # instead of offset count offsetcount = countResults searched = pulldetails(id, "issue", None, offsetcount, islist) issuechoice, tmpdate = GetIssuesInfo(id, searched, arcid) if tmpdate < firstdate: firstdate = tmpdate ndic = ndic + issuechoice # search results are limited to 100 and by pagination now...let's account for this. countResults = countResults + 100 issue["issuechoice"] = ndic issue["firstdate"] = firstdate return issue elif type == "comic": dom = pulldetails(comicid, "comic", None, 1) return GetComicInfo(comicid, dom) elif any([type == "image", type == "firstissue"]): dom = pulldetails(comicid, type, issueid, 1) return Getissue(issueid, dom, type) elif type == "storyarc": dom = pulldetails(arc, "storyarc", None, 1) return GetComicInfo(issueid, dom) elif type == "comicyears": # used by the story arc searcher when adding a given arc to poll each ComicID in order to populate the Series Year & volume (hopefully). # this grabs each issue based on issueid, and then subsets the comicid for each to be used later. # set the offset to 0, since we're doing a filter. dom = pulldetails(arcid, "comicyears", offset=0, comicidlist=comicidlist) return GetSeriesYears(dom) elif type == "import": # used by the importer when doing a scan with metatagging enabled. If metatagging comes back true, then there's an IssueID present # within the tagging (with CT). This compiles all of the IssueID's during a scan (in 100's), and returns the corresponding CV data # related to the given IssueID's - namely ComicID, Name, Volume (more at some point, but those are the important ones). offset = 1 id_count = 0 import_list = [] logger.fdebug("comicidlist:" + str(comicidlist)) while id_count < len(comicidlist): # break it up by 100 per api hit # do the first 100 regardless in_cnt = 0 if id_count + 100 <= len(comicidlist): endcnt = id_count + 100 else: endcnt = len(comicidlist) for i in range(id_count, endcnt): if in_cnt == 0: tmpidlist = str(comicidlist[i]) else: tmpidlist += "|" + str(comicidlist[i]) in_cnt += 1 logger.fdebug("tmpidlist: " + str(tmpidlist)) searched = pulldetails(None, "import", offset=0, comicidlist=tmpidlist) if searched is None: break else: tGIL = GetImportList(searched) import_list += tGIL id_count += 100 return import_list elif type == "update_dates": dom = pulldetails(None, "update_dates", offset=1, comicidlist=comicidlist) return UpdateDates(dom) def GetComicInfo(comicid, dom, safechk=None): if safechk is None: # safetycheck when checking comicvine. If it times out, increment the chk on retry attempts up until 5 tries then abort. safechk = 1 elif safechk > 4: logger.error( "Unable to add / refresh the series due to inablity to retrieve data from ComicVine. You might want to try abit later and/or make sure ComicVine is up." ) return # comicvine isn't as up-to-date with issue counts.. # so this can get really buggered, really fast. tracks = dom.getElementsByTagName("issue") try: cntit = dom.getElementsByTagName("count_of_issues")[0].firstChild.wholeText except: cntit = len(tracks) trackcnt = len(tracks) logger.fdebug("number of issues I counted: " + str(trackcnt)) logger.fdebug("number of issues CV says it has: " + str(cntit)) # if the two don't match, use trackcnt as count_of_issues might be not upto-date for some reason if int(trackcnt) != int(cntit): cntit = trackcnt vari = "yes" else: vari = "no" logger.fdebug("vari is set to: " + str(vari)) # if str(trackcnt) != str(int(cntit)+2): # cntit = int(cntit) + 1 comic = {} comicchoice = [] cntit = int(cntit) # retrieve the first xml tag (<tag>data</tag>) # that the parser finds with name tagName: # to return the parent name of the <name> node : dom.getElementsByTagName('name')[0].parentNode.nodeName # where [0] denotes the number of the name field(s) # where nodeName denotes the parentNode : ComicName = results, publisher = publisher, issues = issue try: names = len(dom.getElementsByTagName("name")) n = 0 comic[ "ComicPublisher" ] = "Unknown" # set this to a default value here so that it will carry through properly while n < names: if dom.getElementsByTagName("name")[n].parentNode.nodeName == "results": try: comic["ComicName"] = dom.getElementsByTagName("name")[ n ].firstChild.wholeText comic["ComicName"] = comic["ComicName"].rstrip() except: logger.error( "There was a problem retrieving the given data from ComicVine. Ensure that www.comicvine.com is accessible AND that you have provided your OWN ComicVine API key." ) return elif dom.getElementsByTagName("name")[n].parentNode.nodeName == "publisher": try: comic["ComicPublisher"] = dom.getElementsByTagName("name")[ n ].firstChild.wholeText except: comic["ComicPublisher"] = "Unknown" n += 1 except: logger.warn( "Something went wrong retrieving from ComicVine. Ensure your API is up-to-date and that comicvine is accessible" ) return try: comic["ComicYear"] = dom.getElementsByTagName("start_year")[ 0 ].firstChild.wholeText except: comic["ComicYear"] = "0000" # safety check, cause you known, dufus'... if any([comic["ComicYear"][-1:] == "-", comic["ComicYear"][-1:] == "?"]): comic["ComicYear"] = comic["ComicYear"][:-1] try: comic["ComicURL"] = dom.getElementsByTagName("site_detail_url")[ trackcnt ].firstChild.wholeText except: # this should never be an exception. If it is, it's probably due to CV timing out - so let's sleep for abit then retry. logger.warn( "Unable to retrieve URL for volume. This is usually due to a timeout to CV, or going over the API. Retrying again in 10s." ) time.sleep(10) safechk += 1 GetComicInfo(comicid, dom, safechk) desdeck = 0 # the description field actually holds the Volume# - so let's grab it desc_soup = None try: descchunk = dom.getElementsByTagName("description")[0].firstChild.wholeText desc_soup = Soup(descchunk, "html.parser") desclinks = desc_soup.findAll("a") comic_desc = drophtml(descchunk) desdeck += 1 except: comic_desc = "None" # sometimes the deck has volume labels try: deckchunk = dom.getElementsByTagName("deck")[0].firstChild.wholeText comic_deck = deckchunk desdeck += 1 except: comic_deck = "None" # comic['ComicDescription'] = comic_desc try: comic["Aliases"] = dom.getElementsByTagName("aliases")[0].firstChild.wholeText comic["Aliases"] = re.sub("\n", "##", comic["Aliases"]).strip() if comic["Aliases"][-2:] == "##": comic["Aliases"] = comic["Aliases"][:-2] # logger.fdebug('Aliases: ' + str(aliases)) except: comic["Aliases"] = "None" comic["ComicVersion"] = "None" # noversion' # figure out if it's a print / digital edition. comic["Type"] = "None" if comic_deck != "None": if any( [ "print" in comic_deck.lower(), "digital" in comic_deck.lower(), "paperback" in comic_deck.lower(), "one shot" in re.sub("-", "", comic_deck.lower()).strip(), "hardcover" in comic_deck.lower(), ] ): if all( ["print" in comic_deck.lower(), "reprint" not in comic_deck.lower()] ): comic["Type"] = "Print" elif "digital" in comic_deck.lower(): comic["Type"] = "Digital" elif "paperback" in comic_deck.lower(): comic["Type"] = "TPB" elif "hardcover" in comic_deck.lower(): comic["Type"] = "HC" elif "oneshot" in re.sub("-", "", comic_deck.lower()).strip(): comic["Type"] = "One-Shot" else: comic["Type"] = "Print" if comic_desc != "None" and comic["Type"] == "None": if "print" in comic_desc[:60].lower() and all( [ "for the printed edition" not in comic_desc.lower(), "print edition can be found" not in comic_desc.lower(), "reprints" not in comic_desc.lower(), ] ): comic["Type"] = "Print" elif ( "digital" in comic_desc[:60].lower() and "digital edition can be found" not in comic_desc.lower() ): comic["Type"] = "Digital" elif ( all( [ "paperback" in comic_desc[:60].lower(), "paperback can be found" not in comic_desc.lower(), ] ) or "collects" in comic_desc[:60].lower() ): comic["Type"] = "TPB" elif ( "hardcover" in comic_desc[:60].lower() and "hardcover can be found" not in comic_desc.lower() ): comic["Type"] = "HC" elif any( [ "one-shot" in comic_desc[:60].lower(), "one shot" in comic_desc[:60].lower(), ] ) and any( [ "can be found" not in comic_desc.lower(), "following the" not in comic_desc.lower(), "after the" not in comic_desc.lower(), ] ): i = 0 comic["Type"] = "One-Shot" avoidwords = ["preceding", "after the", "following the"] while i < 2: if i == 0: cbd = "one-shot" elif i == 1: cbd = "one shot" tmp1 = comic_desc[:60].lower().find(cbd) if tmp1 != -1: for x in avoidwords: tmp2 = comic_desc[:tmp1].lower().find(x) if tmp2 != -1: logger.fdebug( "FAKE NEWS: caught incorrect reference to one-shot. Forcing to Print" ) comic["Type"] = "Print" i = 3 break i += 1 else: comic["Type"] = "Print" if all( [ comic_desc != "None", "trade paperback" in comic_desc[:30].lower(), "collecting" in comic_desc[:40].lower(), ] ): # ie. Trade paperback collecting Marvel Team-Up #9-11, 48-51, 72, 110 & 145. first_collect = comic_desc.lower().find("collecting") # logger.info('first_collect: %s' % first_collect) # logger.info('comic_desc: %s' % comic_desc) # logger.info('desclinks: %s' % desclinks) issue_list = [] micdrop = [] if desc_soup is not None: # if it's point form bullets, ignore it cause it's not the current volume stuff. test_it = desc_soup.find("ul") if test_it: for x in test_it.findAll("li"): if any( [ "Next" in x.findNext(text=True), "Previous" in x.findNext(text=True), ] ): mic_check = x.find("a") micdrop.append(mic_check["data-ref-id"]) for fc in desclinks: try: fc_id = fc["data-ref-id"] except: continue if fc_id in micdrop: continue fc_name = fc.findNext(text=True) if fc_id.startswith("4000"): fc_cid = None fc_isid = fc_id iss_start = fc_name.find("#") issuerun = fc_name[iss_start:].strip() fc_name = fc_name[:iss_start].strip() elif fc_id.startswith("4050"): fc_cid = fc_id fc_isid = None issuerun = fc.next_sibling if issuerun is not None: lines = re.sub("[^0-9]", " ", issuerun).strip().split(" ") if len(lines) > 0: for x in sorted(lines, reverse=True): srchline = issuerun.rfind(x) if srchline != -1: try: if ( issuerun[srchline + len(x)] == "," or issuerun[srchline + len(x)] == "." or issuerun[srchline + len(x)] == " " ): issuerun = issuerun[: srchline + len(x)] break except Exception as e: # logger.warn('[ERROR] %s' % e) continue else: iss_start = fc_name.find("#") issuerun = fc_name[iss_start:].strip() fc_name = fc_name[:iss_start].strip() if issuerun.strip().endswith(".") or issuerun.strip().endswith(","): # logger.fdebug('Changed issuerun from %s to %s' % (issuerun, issuerun[:-1])) issuerun = issuerun.strip()[:-1] if issuerun.endswith(" and "): issuerun = issuerun[:-4].strip() elif issuerun.endswith(" and"): issuerun = issuerun[:-3].strip() else: continue # except: # pass issue_list.append( { "series": fc_name, "comicid": fc_cid, "issueid": fc_isid, "issues": issuerun, } ) # first_collect = cis logger.info("Collected issues in volume: %s" % issue_list) if len(issue_list) == 0: comic["Issue_List"] = "None" else: comic["Issue_List"] = issue_list else: comic["Issue_List"] = "None" while desdeck > 0: if desdeck == 1: if comic_desc == "None": comicDes = comic_deck[:30] else: # extract the first 60 characters comicDes = comic_desc[:60].replace("New 52", "") elif desdeck == 2: # extract the characters from the deck comicDes = comic_deck[:30].replace("New 52", "") else: break i = 0 while i < 2: if "volume" in comicDes.lower(): # found volume - let's grab it. v_find = comicDes.lower().find("volume") # arbitrarily grab the next 10 chars (6 for volume + 1 for space + 3 for the actual vol #) # increased to 10 to allow for text numbering (+5 max) # sometimes it's volume 5 and ocassionally it's fifth volume. if comicDes[v_find + 7 : comicDes.find(" ", v_find + 7)].isdigit(): comic["ComicVersion"] = re.sub( "[^0-9]", "", comicDes[v_find + 7 : comicDes.find(" ", v_find + 7)], ).strip() break elif i == 0: vfind = comicDes[v_find : v_find + 15] # if it's volume 5 format basenums = { "zero": "0", "one": "1", "two": "2", "three": "3", "four": "4", "five": "5", "six": "6", "seven": "7", "eight": "8", "nine": "9", "ten": "10", "i": "1", "ii": "2", "iii": "3", "iv": "4", "v": "5", } logger.fdebug("volume X format - " + str(i) + ": " + vfind) else: vfind = comicDes[:v_find] # if it's fifth volume format basenums = { "zero": "0", "first": "1", "second": "2", "third": "3", "fourth": "4", "fifth": "5", "sixth": "6", "seventh": "7", "eighth": "8", "nineth": "9", "tenth": "10", "i": "1", "ii": "2", "iii": "3", "iv": "4", "v": "5", } logger.fdebug("X volume format - " + str(i) + ": " + vfind) volconv = "" for nums in basenums: if nums in vfind.lower(): sconv = basenums[nums] vfind = re.sub(nums, sconv, vfind.lower()) break # logger.info('volconv: ' + str(volconv)) # now we attempt to find the character position after the word 'volume' if i == 0: volthis = vfind.lower().find("volume") volthis = ( volthis + 6 ) # add on the actual word to the position so that we can grab the subsequent digit vfind = vfind[ volthis : volthis + 4 ] # grab the next 4 characters ;) elif i == 1: volthis = vfind.lower().find("volume") vfind = vfind[ volthis - 4 : volthis ] # grab the next 4 characters ;) if "(" in vfind: # bracket detected in versioning' vfindit = re.findall("[^()]+", vfind) vfind = vfindit[0] vf = re.findall("[^<>]+", vfind) try: ledigit = re.sub("[^0-9]", "", vf[0]) if ledigit != "": comic["ComicVersion"] = ledigit logger.fdebug( "Volume information found! Adding to series record : volume " + comic["ComicVersion"] ) break except: pass i += 1 else: i += 1 if comic["ComicVersion"] == "None": logger.fdebug("comic[ComicVersion]:" + str(comic["ComicVersion"])) desdeck -= 1 else: break if vari == "yes": comic["ComicIssues"] = str(cntit) else: comic["ComicIssues"] = dom.getElementsByTagName("count_of_issues")[ 0 ].firstChild.wholeText comic["ComicImage"] = dom.getElementsByTagName("super_url")[0].firstChild.wholeText comic["ComicImageALT"] = dom.getElementsByTagName("small_url")[ 0 ].firstChild.wholeText comic["FirstIssueID"] = dom.getElementsByTagName("id")[0].firstChild.wholeText # logger.info('comic: %s' % comic) return comic def GetIssuesInfo(comicid, dom, arcid=None): subtracks = dom.getElementsByTagName("issue") if not mylar.CONFIG.CV_ONLY: cntiss = dom.getElementsByTagName("count_of_issues")[0].firstChild.wholeText logger.fdebug("issues I've counted: " + str(len(subtracks))) logger.fdebug("issues CV says it has: " + str(int(cntiss))) if int(len(subtracks)) != int(cntiss): logger.fdebug( "CV's count is wrong, I counted different...going with my count for physicals" + str(len(subtracks)) ) cntiss = len( subtracks ) # assume count of issues is wrong, go with ACTUAL physical api count cntiss = int(cntiss) n = cntiss - 1 else: n = int(len(subtracks)) tempissue = {} issuech = [] firstdate = "2099-00-00" for subtrack in subtracks: if not mylar.CONFIG.CV_ONLY: if (dom.getElementsByTagName("name")[n].firstChild) is not None: issue["Issue_Name"] = dom.getElementsByTagName("name")[ n ].firstChild.wholeText else: issue["Issue_Name"] = "None" issue["Issue_ID"] = dom.getElementsByTagName("id")[n].firstChild.wholeText issue["Issue_Number"] = dom.getElementsByTagName("issue_number")[ n ].firstChild.wholeText issuech.append( { "Issue_ID": issue["Issue_ID"], "Issue_Number": issue["Issue_Number"], "Issue_Name": issue["Issue_Name"], } ) else: try: totnames = len(subtrack.getElementsByTagName("name")) tot = 0 while tot < totnames: if ( subtrack.getElementsByTagName("name")[tot].parentNode.nodeName == "volume" ): tempissue["ComicName"] = subtrack.getElementsByTagName("name")[ tot ].firstChild.wholeText elif ( subtrack.getElementsByTagName("name")[tot].parentNode.nodeName == "issue" ): try: tempissue["Issue_Name"] = subtrack.getElementsByTagName( "name" )[tot].firstChild.wholeText except: tempissue["Issue_Name"] = None tot += 1 except: tempissue["ComicName"] = "None" try: totids = len(subtrack.getElementsByTagName("id")) idt = 0 while idt < totids: if ( subtrack.getElementsByTagName("id")[idt].parentNode.nodeName == "volume" ): tempissue["Comic_ID"] = subtrack.getElementsByTagName("id")[ idt ].firstChild.wholeText elif ( subtrack.getElementsByTagName("id")[idt].parentNode.nodeName == "issue" ): tempissue["Issue_ID"] = subtrack.getElementsByTagName("id")[ idt ].firstChild.wholeText idt += 1 except: tempissue["Issue_Name"] = "None" try: tempissue["CoverDate"] = subtrack.getElementsByTagName("cover_date")[ 0 ].firstChild.wholeText except: tempissue["CoverDate"] = "0000-00-00" try: tempissue["StoreDate"] = subtrack.getElementsByTagName("store_date")[ 0 ].firstChild.wholeText except: tempissue["StoreDate"] = "0000-00-00" try: digital_desc = subtrack.getElementsByTagName("description")[ 0 ].firstChild.wholeText except: tempissue["DigitalDate"] = "0000-00-00" else: tempissue["DigitalDate"] = "0000-00-00" if all( [ "digital" in digital_desc.lower()[-90:], "print" in digital_desc.lower()[-90:], ] ): # get the digital date of issue here... mff = mylar.filechecker.FileChecker() vlddate = mff.checkthedate(digital_desc[-90:], fulldate=True) # logger.fdebug('vlddate: %s' % vlddate) if vlddate: tempissue["DigitalDate"] = vlddate try: tempissue["Issue_Number"] = subtrack.getElementsByTagName( "issue_number" )[0].firstChild.wholeText except: logger.fdebug( "No Issue Number available - Trade Paperbacks, Graphic Novels and Compendiums are not supported as of yet." ) try: tempissue["ComicImage"] = subtrack.getElementsByTagName("small_url")[ 0 ].firstChild.wholeText except: tempissue["ComicImage"] = "None" try: tempissue["ComicImageALT"] = subtrack.getElementsByTagName( "medium_url" )[0].firstChild.wholeText except: tempissue["ComicImageALT"] = "None" if arcid is None: issuech.append( { "Comic_ID": comicid, "Issue_ID": tempissue["Issue_ID"], "Issue_Number": tempissue["Issue_Number"], "Issue_Date": tempissue["CoverDate"], "Store_Date": tempissue["StoreDate"], "Digital_Date": tempissue["DigitalDate"], "Issue_Name": tempissue["Issue_Name"], "Image": tempissue["ComicImage"], "ImageALT": tempissue["ComicImageALT"], } ) else: issuech.append( { "ArcID": arcid, "ComicName": tempissue["ComicName"], "ComicID": tempissue["Comic_ID"], "IssueID": tempissue["Issue_ID"], "Issue_Number": tempissue["Issue_Number"], "Issue_Date": tempissue["CoverDate"], "Store_Date": tempissue["StoreDate"], "Digital_Date": tempissue["DigitalDate"], "Issue_Name": tempissue["Issue_Name"], } ) if ( tempissue["CoverDate"] < firstdate and tempissue["CoverDate"] != "0000-00-00" ): firstdate = tempissue["CoverDate"] n -= 1 # logger.fdebug('issue_info: %s' % issuech) # issue['firstdate'] = firstdate return issuech, firstdate def Getissue(issueid, dom, type): # if the Series Year doesn't exist, get the first issue and take the date from that if type == "firstissue": try: first_year = dom.getElementsByTagName("cover_date")[0].firstChild.wholeText except: first_year = "0000" return first_year the_year = first_year[:4] the_month = first_year[5:7] the_date = the_year + "-" + the_month return the_year else: try: image = dom.getElementsByTagName("super_url")[0].firstChild.wholeText except: image = None try: image_alt = dom.getElementsByTagName("small_url")[0].firstChild.wholeText except: image_alt = None return {"image": image, "image_alt": image_alt} def GetSeriesYears(dom): # used by the 'add a story arc' option to individually populate the Series Year for each series within the given arc. # series year is required for alot of functionality. series = dom.getElementsByTagName("volume") tempseries = {} serieslist = [] for dm in series: try: totids = len(dm.getElementsByTagName("id")) idc = 0 while idc < totids: if dm.getElementsByTagName("id")[idc].parentNode.nodeName == "volume": tempseries["ComicID"] = dm.getElementsByTagName("id")[ idc ].firstChild.wholeText idc += 1 except: logger.warn( "There was a problem retrieving a comicid for a series within the arc. This will have to manually corrected most likely." ) tempseries["ComicID"] = "None" tempseries["Series"] = "None" tempseries["Publisher"] = "None" try: totnames = len(dm.getElementsByTagName("name")) namesc = 0 while namesc < totnames: if ( dm.getElementsByTagName("name")[namesc].parentNode.nodeName == "volume" ): tempseries["Series"] = dm.getElementsByTagName("name")[ namesc ].firstChild.wholeText elif ( dm.getElementsByTagName("name")[namesc].parentNode.nodeName == "publisher" ): tempseries["Publisher"] = dm.getElementsByTagName("name")[ namesc ].firstChild.wholeText namesc += 1 except: logger.warn( "There was a problem retrieving a Series Name or Publisher for a series within the arc. This will have to manually corrected." ) try: tempseries["SeriesYear"] = dm.getElementsByTagName("start_year")[ 0 ].firstChild.wholeText except: logger.warn( "There was a problem retrieving the start year for a particular series within the story arc." ) tempseries["SeriesYear"] = "0000" # cause you know, dufus'... if tempseries["SeriesYear"][-1:] == "-": tempseries["SeriesYear"] = tempseries["SeriesYear"][:-1] desdeck = 0 # the description field actually holds the Volume# - so let's grab it desc_soup = None try: descchunk = dm.getElementsByTagName("description")[0].firstChild.wholeText desc_soup = Soup(descchunk, "html.parser") desclinks = desc_soup.findAll("a") comic_desc = drophtml(descchunk) desdeck += 1 except: comic_desc = "None" # sometimes the deck has volume labels try: deckchunk = dm.getElementsByTagName("deck")[0].firstChild.wholeText comic_deck = deckchunk desdeck += 1 except: comic_deck = "None" # comic['ComicDescription'] = comic_desc try: tempseries["Aliases"] = dm.getElementsByTagName("aliases")[ 0 ].firstChild.wholeText tempseries["Aliases"] = re.sub("\n", "##", tempseries["Aliases"]).strip() if tempseries["Aliases"][-2:] == "##": tempseries["Aliases"] = tempseries["Aliases"][:-2] # logger.fdebug('Aliases: ' + str(aliases)) except: tempseries["Aliases"] = "None" tempseries["Volume"] = "None" # noversion' # figure out if it's a print / digital edition. tempseries["Type"] = "None" if comic_deck != "None": if any( [ "print" in comic_deck.lower(), "digital" in comic_deck.lower(), "paperback" in comic_deck.lower(), "one shot" in re.sub("-", "", comic_deck.lower()).strip(), "hardcover" in comic_deck.lower(), ] ): if "print" in comic_deck.lower(): tempseries["Type"] = "Print" elif "digital" in comic_deck.lower(): tempseries["Type"] = "Digital" elif "paperback" in comic_deck.lower(): tempseries["Type"] = "TPB" elif "hardcover" in comic_deck.lower(): tempseries["Type"] = "HC" elif "oneshot" in re.sub("-", "", comic_deck.lower()).strip(): tempseries["Type"] = "One-Shot" if comic_desc != "None" and tempseries["Type"] == "None": if ( "print" in comic_desc[:60].lower() and "print edition can be found" not in comic_desc.lower() ): tempseries["Type"] = "Print" elif ( "digital" in comic_desc[:60].lower() and "digital edition can be found" not in comic_desc.lower() ): tempseries["Type"] = "Digital" elif ( all( [ "paperback" in comic_desc[:60].lower(), "paperback can be found" not in comic_desc.lower(), ] ) or "collects" in comic_desc[:60].lower() ): tempseries["Type"] = "TPB" elif ( "hardcover" in comic_desc[:60].lower() and "hardcover can be found" not in comic_desc.lower() ): tempseries["Type"] = "HC" elif any( [ "one-shot" in comic_desc[:60].lower(), "one shot" in comic_desc[:60].lower(), ] ) and any( [ "can be found" not in comic_desc.lower(), "following the" not in comic_desc.lower(), ] ): i = 0 tempseries["Type"] = "One-Shot" avoidwords = ["preceding", "after the special", "following the"] while i < 2: if i == 0: cbd = "one-shot" elif i == 1: cbd = "one shot" tmp1 = comic_desc[:60].lower().find(cbd) if tmp1 != -1: for x in avoidwords: tmp2 = comic_desc[:tmp1].lower().find(x) if tmp2 != -1: logger.fdebug( "FAKE NEWS: caught incorrect reference to one-shot. Forcing to Print" ) tempseries["Type"] = "Print" i = 3 break i += 1 else: tempseries["Type"] = "Print" if all( [ comic_desc != "None", "trade paperback" in comic_desc[:30].lower(), "collecting" in comic_desc[:40].lower(), ] ): # ie. Trade paperback collecting Marvel Team-Up #9-11, 48-51, 72, 110 & 145. first_collect = comic_desc.lower().find("collecting") # logger.info('first_collect: %s' % first_collect) # logger.info('comic_desc: %s' % comic_desc) # logger.info('desclinks: %s' % desclinks) issue_list = [] micdrop = [] if desc_soup is not None: # if it's point form bullets, ignore it cause it's not the current volume stuff. test_it = desc_soup.find("ul") if test_it: for x in test_it.findAll("li"): if any( [ "Next" in x.findNext(text=True), "Previous" in x.findNext(text=True), ] ): mic_check = x.find("a") micdrop.append(mic_check["data-ref-id"]) for fc in desclinks: # logger.info('fc: %s' % fc) fc_id = fc["data-ref-id"] # logger.info('fc_id: %s' % fc_id) if fc_id in micdrop: continue fc_name = fc.findNext(text=True) if fc_id.startswith("4000"): fc_cid = None fc_isid = fc_id iss_start = fc_name.find("#") issuerun = fc_name[iss_start:].strip() fc_name = fc_name[:iss_start].strip() elif fc_id.startswith("4050"): fc_cid = fc_id fc_isid = None issuerun = fc.next_sibling if issuerun is not None: lines = re.sub("[^0-9]", " ", issuerun).strip().split(" ") if len(lines) > 0: for x in sorted(lines, reverse=True): srchline = issuerun.rfind(x) if srchline != -1: try: if ( issuerun[srchline + len(x)] == "," or issuerun[srchline + len(x)] == "." or issuerun[srchline + len(x)] == " " ): issuerun = issuerun[: srchline + len(x)] break except Exception as e: logger.warn("[ERROR] %s" % e) continue else: iss_start = fc_name.find("#") issuerun = fc_name[iss_start:].strip() fc_name = fc_name[:iss_start].strip() if issuerun.endswith(".") or issuerun.endswith(","): # logger.fdebug('Changed issuerun from %s to %s' % (issuerun, issuerun[:-1])) issuerun = issuerun[:-1] if issuerun.endswith(" and "): issuerun = issuerun[:-4].strip() elif issuerun.endswith(" and"): issuerun = issuerun[:-3].strip() else: continue # except: # pass issue_list.append( { "series": fc_name, "comicid": fc_cid, "issueid": fc_isid, "issues": issuerun, } ) # first_collect = cis logger.info("Collected issues in volume: %s" % issue_list) tempseries["Issue_List"] = issue_list else: tempseries["Issue_List"] = "None" while desdeck > 0: if desdeck == 1: if comic_desc == "None": comicDes = comic_deck[:30] else: # extract the first 60 characters comicDes = comic_desc[:60].replace("New 52", "") elif desdeck == 2: # extract the characters from the deck comicDes = comic_deck[:30].replace("New 52", "") else: break i = 0 while i < 2: if "volume" in comicDes.lower(): # found volume - let's grab it. v_find = comicDes.lower().find("volume") # arbitrarily grab the next 10 chars (6 for volume + 1 for space + 3 for the actual vol #) # increased to 10 to allow for text numbering (+5 max) # sometimes it's volume 5 and ocassionally it's fifth volume. if i == 0: vfind = comicDes[ v_find : v_find + 15 ] # if it's volume 5 format basenums = { "zero": "0", "one": "1", "two": "2", "three": "3", "four": "4", "five": "5", "six": "6", "seven": "7", "eight": "8", "nine": "9", "ten": "10", "i": "1", "ii": "2", "iii": "3", "iv": "4", "v": "5", } logger.fdebug("volume X format - %s: %s" % (i, vfind)) else: vfind = comicDes[:v_find] # if it's fifth volume format basenums = { "zero": "0", "first": "1", "second": "2", "third": "3", "fourth": "4", "fifth": "5", "sixth": "6", "seventh": "7", "eighth": "8", "nineth": "9", "tenth": "10", "i": "1", "ii": "2", "iii": "3", "iv": "4", "v": "5", } logger.fdebug("X volume format - %s: %s" % (i, vfind)) volconv = "" for nums in basenums: if nums in vfind.lower(): sconv = basenums[nums] vfind = re.sub(nums, sconv, vfind.lower()) break # logger.info('volconv: ' + str(volconv)) # now we attempt to find the character position after the word 'volume' if i == 0: volthis = vfind.lower().find("volume") volthis = ( volthis + 6 ) # add on the actual word to the position so that we can grab the subsequent digit vfind = vfind[ volthis : volthis + 4 ] # grab the next 4 characters ;) elif i == 1: volthis = vfind.lower().find("volume") vfind = vfind[ volthis - 4 : volthis ] # grab the next 4 characters ;) if "(" in vfind: # bracket detected in versioning' vfindit = re.findall("[^()]+", vfind) vfind = vfindit[0] vf = re.findall("[^<>]+", vfind) try: ledigit = re.sub("[^0-9]", "", vf[0]) if ledigit != "": tempseries["Volume"] = ledigit logger.fdebug( "Volume information found! Adding to series record : volume %s" % tempseries["Volume"] ) break except: pass i += 1 else: i += 1 if tempseries["Volume"] == "None": logger.fdebug("tempseries[Volume]: %s" % tempseries["Volume"]) desdeck -= 1 else: break serieslist.append( { "ComicID": tempseries["ComicID"], "ComicName": tempseries["Series"], "SeriesYear": tempseries["SeriesYear"], "Publisher": tempseries["Publisher"], "Volume": tempseries["Volume"], "Aliases": tempseries["Aliases"], "Type": tempseries["Type"], } ) return serieslist def UpdateDates(dom): issues = dom.getElementsByTagName("issue") tempissue = {} issuelist = [] for dm in issues: tempissue["ComicID"] = "None" tempissue["IssueID"] = "None" try: totids = len(dm.getElementsByTagName("id")) idc = 0 while idc < totids: if dm.getElementsByTagName("id")[idc].parentNode.nodeName == "volume": tempissue["ComicID"] = dm.getElementsByTagName("id")[ idc ].firstChild.wholeText if dm.getElementsByTagName("id")[idc].parentNode.nodeName == "issue": tempissue["IssueID"] = dm.getElementsByTagName("id")[ idc ].firstChild.wholeText idc += 1 except: logger.warn( "There was a problem retrieving a comicid/issueid for the given issue. This will have to manually corrected most likely." ) tempissue["SeriesTitle"] = "None" tempissue["IssueTitle"] = "None" try: totnames = len(dm.getElementsByTagName("name")) namesc = 0 while namesc < totnames: if ( dm.getElementsByTagName("name")[namesc].parentNode.nodeName == "issue" ): tempissue["IssueTitle"] = dm.getElementsByTagName("name")[ namesc ].firstChild.wholeText elif ( dm.getElementsByTagName("name")[namesc].parentNode.nodeName == "volume" ): tempissue["SeriesTitle"] = dm.getElementsByTagName("name")[ namesc ].firstChild.wholeText namesc += 1 except: logger.warn( "There was a problem retrieving the Series Title / Issue Title for a series within the arc. This will have to manually corrected." ) try: tempissue["CoverDate"] = dm.getElementsByTagName("cover_date")[ 0 ].firstChild.wholeText except: tempissue["CoverDate"] = "0000-00-00" try: tempissue["StoreDate"] = dm.getElementsByTagName("store_date")[ 0 ].firstChild.wholeText except: tempissue["StoreDate"] = "0000-00-00" try: tempissue["IssueNumber"] = dm.getElementsByTagName("issue_number")[ 0 ].firstChild.wholeText except: logger.fdebug( "No Issue Number available - Trade Paperbacks, Graphic Novels and Compendiums are not supported as of yet." ) tempissue["IssueNumber"] = "None" try: tempissue["date_last_updated"] = dm.getElementsByTagName( "date_last_updated" )[0].firstChild.wholeText except: tempissue["date_last_updated"] = "0000-00-00" issuelist.append( { "ComicID": tempissue["ComicID"], "IssueID": tempissue["IssueID"], "SeriesTitle": tempissue["SeriesTitle"], "IssueTitle": tempissue["IssueTitle"], "CoverDate": tempissue["CoverDate"], "StoreDate": tempissue["StoreDate"], "IssueNumber": tempissue["IssueNumber"], "Date_Last_Updated": tempissue["date_last_updated"], } ) return issuelist def GetImportList(results): importlist = results.getElementsByTagName("issue") serieslist = [] importids = {} tempseries = {} for implist in importlist: try: totids = len(implist.getElementsByTagName("id")) idt = 0 while idt < totids: if ( implist.getElementsByTagName("id")[idt].parentNode.nodeName == "volume" ): tempseries["ComicID"] = implist.getElementsByTagName("id")[ idt ].firstChild.wholeText elif ( implist.getElementsByTagName("id")[idt].parentNode.nodeName == "issue" ): tempseries["IssueID"] = implist.getElementsByTagName("id")[ idt ].firstChild.wholeText idt += 1 except: tempseries["ComicID"] = None try: totnames = len(implist.getElementsByTagName("name")) tot = 0 while tot < totnames: if ( implist.getElementsByTagName("name")[tot].parentNode.nodeName == "volume" ): tempseries["ComicName"] = implist.getElementsByTagName("name")[ tot ].firstChild.wholeText elif ( implist.getElementsByTagName("name")[tot].parentNode.nodeName == "issue" ): try: tempseries["Issue_Name"] = implist.getElementsByTagName("name")[ tot ].firstChild.wholeText except: tempseries["Issue_Name"] = None tot += 1 except: tempseries["ComicName"] = "None" try: tempseries["Issue_Number"] = implist.getElementsByTagName("issue_number")[ 0 ].firstChild.wholeText except: logger.fdebug( "No Issue Number available - Trade Paperbacks, Graphic Novels and Compendiums are not supported as of yet." ) logger.info("tempseries:" + str(tempseries)) serieslist.append( { "ComicID": tempseries["ComicID"], "IssueID": tempseries["IssueID"], "ComicName": tempseries["ComicName"], "Issue_Name": tempseries["Issue_Name"], "Issue_Number": tempseries["Issue_Number"], } ) return serieslist def drophtml(html): soup = Soup(html, "html.parser") text_parts = soup.findAll(text=True) # print ''.join(text_parts) return "".join(text_parts)
models
webhook
import json import logging import typing from json import JSONDecodeError import requests from apps.webhooks.utils import ( OUTGOING_WEBHOOK_TIMEOUT, InvalidWebhookData, InvalidWebhookHeaders, InvalidWebhookTrigger, InvalidWebhookUrl, apply_jinja_template_for_json, parse_url, ) from celery.utils.log import get_task_logger from common.jinja_templater import apply_jinja_template from common.jinja_templater.apply_jinja_template import ( JinjaTemplateError, JinjaTemplateWarning, ) from common.public_primary_keys import ( generate_public_primary_key, increase_public_primary_key_length, ) from django.conf import settings from django.core.validators import MinLengthValidator from django.db import models from django.db.models import F from django.utils import timezone from mirage import fields as mirage_fields from requests.auth import HTTPBasicAuth if typing.TYPE_CHECKING: from apps.alerts.models import EscalationPolicy from django.db.models.manager import RelatedManager WEBHOOK_FIELD_PLACEHOLDER = "****************" PUBLIC_WEBHOOK_HTTP_METHODS = ["GET", "POST", "PUT", "DELETE", "OPTIONS"] logger = get_task_logger(__name__) logger.setLevel(logging.DEBUG) def generate_public_primary_key_for_webhook(): prefix = "WH" new_public_primary_key = generate_public_primary_key(prefix) failure_counter = 0 while Webhook.objects.filter(public_primary_key=new_public_primary_key).exists(): new_public_primary_key = increase_public_primary_key_length( failure_counter=failure_counter, prefix=prefix, model_name="Webhook" ) failure_counter += 1 return new_public_primary_key class WebhookQueryset(models.QuerySet): def delete(self): self.update( deleted_at=timezone.now(), name=F("name") + "_deleted_" + F("public_primary_key"), ) class WebhookManager(models.Manager): def get_queryset(self): return WebhookQueryset(self.model, using=self._db).filter(deleted_at=None) def hard_delete(self): return self.get_queryset().hard_delete() class Webhook(models.Model): escalation_policies: "RelatedManager['EscalationPolicy']" objects = WebhookManager() objects_with_deleted = models.Manager() ( TRIGGER_ESCALATION_STEP, TRIGGER_ALERT_GROUP_CREATED, TRIGGER_ACKNOWLEDGE, TRIGGER_RESOLVE, TRIGGER_SILENCE, TRIGGER_UNSILENCE, TRIGGER_UNRESOLVE, TRIGGER_UNACKNOWLEDGE, ) = range(8) # Must be the same order as previous TRIGGER_TYPES = ( (TRIGGER_ESCALATION_STEP, "Escalation step"), (TRIGGER_ALERT_GROUP_CREATED, "Alert Group Created"), (TRIGGER_ACKNOWLEDGE, "Acknowledged"), (TRIGGER_RESOLVE, "Resolved"), (TRIGGER_SILENCE, "Silenced"), (TRIGGER_UNSILENCE, "Unsilenced"), (TRIGGER_UNRESOLVE, "Unresolved"), (TRIGGER_UNACKNOWLEDGE, "Unacknowledged"), ) ALL_TRIGGER_TYPES = [i[0] for i in TRIGGER_TYPES] PUBLIC_TRIGGER_TYPES_MAP = { TRIGGER_ESCALATION_STEP: "escalation", TRIGGER_ALERT_GROUP_CREATED: "alert group created", TRIGGER_ACKNOWLEDGE: "acknowledge", TRIGGER_RESOLVE: "resolve", TRIGGER_SILENCE: "silence", TRIGGER_UNSILENCE: "unsilence", TRIGGER_UNRESOLVE: "unresolve", TRIGGER_UNACKNOWLEDGE: "unacknowledge", } PUBLIC_ALL_TRIGGER_TYPES = [i for i in PUBLIC_TRIGGER_TYPES_MAP.values()] public_primary_key = models.CharField( max_length=20, validators=[MinLengthValidator(settings.PUBLIC_PRIMARY_KEY_MIN_LENGTH + 1)], unique=True, default=generate_public_primary_key_for_webhook, ) organization = models.ForeignKey( "user_management.Organization", null=True, on_delete=models.CASCADE, related_name="webhooks", default=None, ) team = models.ForeignKey( "user_management.Team", null=True, on_delete=models.CASCADE, related_name="webhooks", default=None, ) user = models.ForeignKey( "user_management.User", null=True, on_delete=models.CASCADE, related_name="webhooks", default=None, ) created_at = models.DateTimeField(auto_now_add=True) deleted_at = models.DateTimeField(blank=True, null=True) name = models.CharField(max_length=100, null=True, default=None) username = models.CharField(max_length=100, null=True, default=None) password = mirage_fields.EncryptedCharField( max_length=1000, null=True, default=None ) authorization_header = mirage_fields.EncryptedCharField( max_length=2000, null=True, default=None ) trigger_template = models.TextField(null=True, default=None) headers = models.TextField(null=True, default=None) url = models.TextField(null=True, default=None) data = models.TextField(null=True, default=None) forward_all = models.BooleanField(default=True) http_method = models.CharField(max_length=32, default="POST", null=True) trigger_type = models.IntegerField( choices=TRIGGER_TYPES, default=TRIGGER_ESCALATION_STEP, null=True ) is_webhook_enabled = models.BooleanField(null=True, default=True) integration_filter = models.JSONField(default=None, null=True, blank=True) is_legacy = models.BooleanField(null=True, default=False) preset = models.CharField(max_length=100, null=True, blank=True, default=None) class Meta: unique_together = ("name", "organization") def __str__(self): return str(self.name) def delete(self): # TODO: delete related escalation policies on delete, once implemented # self.escalation_policies.all().delete() self.deleted_at = timezone.now() # 100 - 22 = 78. 100 is max len of name field, and 22 is len of suffix _deleted_<public_primary_key> # So for case when user created an entry with maximum length name it is needed to trim it to 78 chars # to be able to add suffix. self.name = f"{self.name[:78]}_deleted_{self.public_primary_key}" self.save() def hard_delete(self): super().delete() def build_request_kwargs(self, event_data, raise_data_errors=False): request_kwargs = {} if self.username and self.password: request_kwargs["auth"] = HTTPBasicAuth(self.username, self.password) request_kwargs["headers"] = {} if self.headers: try: rendered_headers = apply_jinja_template_for_json( self.headers, event_data, ) request_kwargs["headers"] = json.loads(rendered_headers) except (JinjaTemplateError, JinjaTemplateWarning) as e: raise InvalidWebhookHeaders(e.fallback_message) except JSONDecodeError: raise InvalidWebhookHeaders("Template did not result in json/dict") if self.authorization_header: request_kwargs["headers"]["Authorization"] = self.authorization_header if self.http_method in ["POST", "PUT"]: if self.forward_all: request_kwargs["json"] = event_data if self.is_legacy: request_kwargs["json"] = event_data["alert_payload"] elif self.data: context_data = event_data if self.is_legacy: context_data = { "alert_payload": event_data.get("alert_payload", {}), "alert_group_id": event_data.get("alert_group_id"), } try: rendered_data = apply_jinja_template_for_json( self.data, context_data, ) try: request_kwargs["json"] = json.loads(rendered_data) except (JSONDecodeError, TypeError): request_kwargs["data"] = rendered_data except (JinjaTemplateError, JinjaTemplateWarning) as e: if raise_data_errors: raise InvalidWebhookData(e.fallback_message) else: request_kwargs["json"] = {"error": e.fallback_message} return request_kwargs def build_url(self, event_data): try: url = apply_jinja_template( self.url, **event_data, ) except (JinjaTemplateError, JinjaTemplateWarning) as e: raise InvalidWebhookUrl(e.fallback_message) # raise if URL is not valid parse_url(url) return url def check_integration_filter(self, alert_group): if not self.integration_filter: return True return alert_group.channel.public_primary_key in self.integration_filter def check_trigger(self, event_data): if not self.trigger_template: return True, "" try: result = apply_jinja_template(self.trigger_template, **event_data) return result.lower() in ["true", "1"], result except (JinjaTemplateError, JinjaTemplateWarning) as e: raise InvalidWebhookTrigger(e.fallback_message) def make_request(self, url, request_kwargs): if self.http_method == "GET": r = requests.get(url, timeout=OUTGOING_WEBHOOK_TIMEOUT, **request_kwargs) elif self.http_method == "POST": r = requests.post(url, timeout=OUTGOING_WEBHOOK_TIMEOUT, **request_kwargs) elif self.http_method == "PUT": r = requests.put(url, timeout=OUTGOING_WEBHOOK_TIMEOUT, **request_kwargs) elif self.http_method == "DELETE": r = requests.delete(url, timeout=OUTGOING_WEBHOOK_TIMEOUT, **request_kwargs) elif self.http_method == "OPTIONS": r = requests.options( url, timeout=OUTGOING_WEBHOOK_TIMEOUT, **request_kwargs ) else: raise Exception(f"Unsupported http method: {self.http_method}") return r # Insight logs @property def insight_logs_type_verbal(self): return "webhook" @property def insight_logs_verbal(self): return self.name def _insight_log_team(self): result = {"team": "General"} if self.team: result["team"] = self.team.name result["team_id"] = self.team.public_primary_key return result @property def insight_logs_serialized(self): result = { "name": self.name, "trigger_type": self.trigger_type, "url": self.url, "data": self.data, "forward_all": self.forward_all, } result.update(self._insight_log_team()) return result @property def insight_logs_metadata(self): result = {} result.update(self._insight_log_team()) return result class WebhookResponse(models.Model): alert_group = models.ForeignKey( "alerts.AlertGroup", on_delete=models.CASCADE, null=True, related_name="webhook_responses", ) webhook = models.ForeignKey( "webhooks.Webhook", on_delete=models.SET_NULL, null=True, related_name="responses", ) trigger_type = models.IntegerField(choices=Webhook.TRIGGER_TYPES) timestamp = models.DateTimeField(default=timezone.now) request_trigger = models.TextField(null=True, default=None) request_headers = models.TextField(null=True, default=None) request_data = models.TextField(null=True, default=None) url = models.TextField(null=True, default=None) status_code = models.IntegerField(default=None, null=True) content = models.TextField(null=True, default=None) event_data = models.TextField(null=True, default=None) def json(self): if self.content: return json.loads(self.content)
core
interpolate
"""Code used to automatically complete postings without positions. """ __copyright__ = "Copyright (C) 2014-2017 Martin Blais" __license__ = "GNU GPLv2" import collections import copy from decimal import Decimal from beancount.core import convert, getters, inventory from beancount.core.amount import Amount from beancount.core.data import Posting, Transaction from beancount.core.inventory import Inventory from beancount.core.number import MISSING, ONE, ZERO, D from beancount.core.position import Cost, CostSpec from beancount.utils import defdict # An upper bound on the tolerance value, this is the maximum the tolerance # should ever be. MAXIMUM_TOLERANCE = D("0.5") # The maximum number of user-specified coefficient digits we should allow for a # tolerance setting. MAX_TOLERANCE_DIGITS = 5 def is_tolerance_user_specified(tolerance): """Return true if the given tolerance number was user-specified. This would allow the user to provide a tolerance like # 0.1234 but not 0.123456. This is used to detect whether a tolerance value # is input by the user and not inferred automatically. Args: tolerance: An instance of Decimal. Returns: A boolean. """ return len(tolerance.as_tuple().digits) < MAX_TOLERANCE_DIGITS # An error from balancing the postings. BalanceError = collections.namedtuple("BalanceError", "source message entry") def has_nontrivial_balance(posting): """Return True if a Posting has a balance amount that would have to be calculated. Args: posting: A Posting instance. Returns: A boolean. """ return posting.cost or posting.price def compute_residual(postings): """Compute the residual of a set of complete postings, and the per-currency precision. This is used to cross-check a balanced transaction. The precision is the maximum fraction that is being used for each currency (a dict). We use the currency of the weight amount in order to infer the quantization precision for each currency. Integer amounts aren't contributing to the determination of precision. Args: postings: A list of Posting instances. Returns: An instance of Inventory, with the residual of the given list of postings. """ inventory = Inventory() for posting in postings: # Skip auto-postings inserted to absorb the residual (rounding error). if posting.meta and posting.meta.get(AUTOMATIC_RESIDUAL, False): continue # Add to total residual balance. inventory.add_amount(convert.get_weight(posting)) return inventory def infer_tolerances(postings, options_map, use_cost=None): """Infer tolerances from a list of postings. The tolerance is the maximum fraction that is being used for each currency (a dict). We use the currency of the weight amount in order to infer the quantization precision for each currency. Integer amounts aren't contributing to the determination of precision. The 'use_cost' option allows one to experiment with letting postings at cost and at price influence the maximum value of the tolerance. It's tricky to use and alters the definition of the tolerance in a non-trivial way, if you use it. The tolerance is expanded by the sum of the cost times a fraction 'M' of the smallest digits in the number of units for all postings held at cost. For example, in this transaction: 2006-01-17 * "Plan Contribution" Assets:Investments:VWELX 18.572 VWELX {30.96 USD} Assets:Investments:VWELX 18.572 VWELX {30.96 USD} Assets:Investments:Cash -1150.00 USD The tolerance for units of USD will calculated as the MAXIMUM of: 0.01 * M = 0.005 (from the 1150.00 USD leg) The sum of 0.001 * M x 30.96 = 0.01548 + 0.001 * M x 30.96 = 0.01548 = 0.03096 So the tolerance for USD in this case is max(0.005, 0.03096) = 0.03096. Prices contribute similarly to the maximum tolerance allowed. Note that 'M' above is the inferred_tolerance_multiplier and its default value is 0.5. Args: postings: A list of Posting instances. options_map: A dict of options. use_cost: A boolean, true if we should be using a combination of the smallest digit of the number times the cost or price in order to infer the tolerance. If the value is left unspecified (as 'None'), the default value can be overridden by setting an option. Returns: A dict of currency to the tolerated difference amount to be used for it, e.g. 0.005. """ if use_cost is None: use_cost = options_map["infer_tolerance_from_cost"] inferred_tolerance_multiplier = options_map["inferred_tolerance_multiplier"] default_tolerances = options_map["inferred_tolerance_default"] tolerances = default_tolerances.copy() cost_tolerances = collections.defaultdict(D) for posting in postings: # Skip the precision on automatically inferred postings. if posting.meta and AUTOMATIC_META in posting.meta: continue units = posting.units if not (isinstance(units, Amount) and isinstance(units.number, Decimal)): continue # Compute bounds on the number. currency = units.currency expo = units.number.as_tuple().exponent if expo < 0: # Note: the exponent is a negative value. tolerance = ONE.scaleb(expo) * inferred_tolerance_multiplier tolerances[currency] = max(tolerance, tolerances.get(currency, -1024)) if not use_cost: continue # Compute bounds on the smallest digit of the number implied as cost. cost = posting.cost if cost is not None: cost_currency = cost.currency if isinstance(cost, Cost): cost_tolerance = min(tolerance * cost.number, MAXIMUM_TOLERANCE) else: assert isinstance(cost, CostSpec) cost_tolerance = MAXIMUM_TOLERANCE for cost_number in cost.number_total, cost.number_per: if cost_number is None or cost_number is MISSING: continue cost_tolerance = min(tolerance * cost_number, cost_tolerance) cost_tolerances[cost_currency] += cost_tolerance # Compute bounds on the smallest digit of the number implied as cost. price = posting.price if isinstance(price, Amount) and isinstance(price.number, Decimal): price_currency = price.currency price_tolerance = min(tolerance * price.number, MAXIMUM_TOLERANCE) cost_tolerances[price_currency] += price_tolerance for currency, tolerance in cost_tolerances.items(): tolerances[currency] = max(tolerance, tolerances.get(currency, -1024)) default = tolerances.pop("*", ZERO) return defdict.ImmutableDictWithDefault(tolerances, default=default) # Meta-data field appended to automatically inserted postings. # (Note: A better name might have been '__interpolated__'.) AUTOMATIC_META = "__automatic__" # Meta-data field appended to postings inserted to absorb rounding error. AUTOMATIC_RESIDUAL = "__residual__" # Meta-data field added for the tolerances inferred for this entry. AUTOMATIC_TOLERANCES = "__tolerances__" def get_residual_postings(residual, account_rounding): """Create postings to book the given residuals. Args: residual: An Inventory, the residual positions. account_rounding: A string, the name of the rounding account that absorbs residuals / rounding errors. Returns: A list of new postings to be inserted to reduce the given residual. """ meta = {AUTOMATIC_META: True, AUTOMATIC_RESIDUAL: True} return [ Posting(account_rounding, -position.units, position.cost, None, None, meta.copy()) for position in residual.get_positions() ] def fill_residual_posting(entry, account_rounding): """If necessary, insert a posting to absorb the residual. This makes the transaction balance exactly. Note: This was developed in order to tweak transactions before exporting them to Ledger. A better method would be to enable the feature that automatically inserts these rounding postings on all transactions, and so maybe this method can be deprecated if we do so. Args: entry: An instance of a Transaction. account_rounding: A string, the name of the rounding account that absorbs residuals / rounding errors. Returns: A possibly new, modified entry with a new posting. If a residual was not needed - the transaction already balanced perfectly - no new leg is inserted. """ residual = compute_residual(entry.postings) if not residual.is_empty(): new_postings = list(entry.postings) new_postings.extend(get_residual_postings(residual, account_rounding)) entry = entry._replace(postings=new_postings) return entry def compute_entries_balance(entries, prefix=None, date=None): """Compute the balance of all postings of a list of entries. Sum up all the positions in all the postings of all the transactions in the list of entries and return an inventory of it. Args: entries: A list of directives. prefix: If specified, a prefix string to restrict by account name. Only postings with an account that starts with this prefix will be summed up. date: A datetime.date instance at which to stop adding up the balance. The date is exclusive. Returns: An instance of Inventory. """ total_balance = Inventory() for entry in entries: if not (date is None or entry.date < date): break if isinstance(entry, Transaction): for posting in entry.postings: if prefix is None or posting.account.startswith(prefix): total_balance.add_position(posting) return total_balance def compute_entry_context(entries, context_entry, additional_accounts=None): """Compute the balances of all accounts referenced by entry up to entry. This provides the inventory of the accounts to which the entry is to be applied, before and after. Args: entries: A list of directives. context_entry: The entry for which we want to obtain the before and after context. additional_accounts: Additional list of accounts to include in calculating the balance. This is used when invoked for debugging, in case the booked & interpolated transaction doesn't have all the accounts we need because it had an error (the booking code will remove invalid postings). Returns: Two dicts of account-name to Inventory instance, one which represents the context before the entry is applied, and one that represents the context after it has been applied. """ assert context_entry is not None, "context_entry is missing." # Get the set of accounts for which to compute the context. context_accounts = getters.get_entry_accounts(context_entry) if additional_accounts: context_accounts.update(additional_accounts) # Iterate over the entries until we find the target one and accumulate the # balance. context_before = collections.defaultdict(inventory.Inventory) for entry in entries: if entry is context_entry: break if isinstance(entry, Transaction): for posting in entry.postings: if not any(posting.account == account for account in context_accounts): continue balance = context_before[posting.account] balance.add_position(posting) # Compute the after context for the entry. context_after = copy.deepcopy(context_before) if isinstance(context_entry, Transaction): for posting in entry.postings: balance = context_after[posting.account] balance.add_position(posting) return context_before, context_after def quantize_with_tolerance(tolerances, currency, number): """Quantize the units using the tolerance dict. Args: tolerances: A dict of currency to tolerance Decimalvalues. number: A number to quantize. currency: A string currency. Returns: A Decimal, the number possibly quantized. """ # Applying rounding to the default tolerance, if there is one. tolerance = tolerances.get(currency) if tolerance: quantum = (tolerance * 2).normalize() # If the tolerance is a neat number provided by the user, # quantize the inferred numbers. See doc on quantize(): # # Unlike other operations, if the length of the coefficient # after the quantize operation would be greater than # precision, then an InvalidOperation is signaled. This # guarantees that, unless there is an error condition, the # quantized exponent is always equal to that of the # right-hand operand. if is_tolerance_user_specified(quantum): number = number.quantize(quantum) return number
builders
anchor_generator_builder
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A function to build an object detection anchor generator from config.""" from app.object_detection.anchor_generators import ( grid_anchor_generator, multiple_grid_anchor_generator, ) from app.object_detection.protos import anchor_generator_pb2 def build(anchor_generator_config): """Builds an anchor generator based on the config. Args: anchor_generator_config: An anchor_generator.proto object containing the config for the desired anchor generator. Returns: Anchor generator based on the config. Raises: ValueError: On empty anchor generator proto. """ if not isinstance(anchor_generator_config, anchor_generator_pb2.AnchorGenerator): raise ValueError( "anchor_generator_config not of type " "anchor_generator_pb2.AnchorGenerator" ) if ( anchor_generator_config.WhichOneof("anchor_generator_oneof") == "grid_anchor_generator" ): grid_anchor_generator_config = anchor_generator_config.grid_anchor_generator return grid_anchor_generator.GridAnchorGenerator( scales=[float(scale) for scale in grid_anchor_generator_config.scales], aspect_ratios=[ float(aspect_ratio) for aspect_ratio in grid_anchor_generator_config.aspect_ratios ], base_anchor_size=[ grid_anchor_generator_config.height, grid_anchor_generator_config.width, ], anchor_stride=[ grid_anchor_generator_config.height_stride, grid_anchor_generator_config.width_stride, ], anchor_offset=[ grid_anchor_generator_config.height_offset, grid_anchor_generator_config.width_offset, ], ) elif ( anchor_generator_config.WhichOneof("anchor_generator_oneof") == "ssd_anchor_generator" ): ssd_anchor_generator_config = anchor_generator_config.ssd_anchor_generator anchor_strides = None if ssd_anchor_generator_config.height_stride: anchor_strides = zip( ssd_anchor_generator_config.height_stride, ssd_anchor_generator_config.width_stride, ) anchor_offsets = None if ssd_anchor_generator_config.height_offset: anchor_offsets = zip( ssd_anchor_generator_config.height_offset, ssd_anchor_generator_config.width_offset, ) return multiple_grid_anchor_generator.create_ssd_anchors( num_layers=ssd_anchor_generator_config.num_layers, min_scale=ssd_anchor_generator_config.min_scale, max_scale=ssd_anchor_generator_config.max_scale, scales=[float(scale) for scale in ssd_anchor_generator_config.scales], aspect_ratios=ssd_anchor_generator_config.aspect_ratios, interpolated_scale_aspect_ratio=( ssd_anchor_generator_config.interpolated_scale_aspect_ratio ), base_anchor_size=[ ssd_anchor_generator_config.base_anchor_height, ssd_anchor_generator_config.base_anchor_width, ], anchor_strides=anchor_strides, anchor_offsets=anchor_offsets, reduce_boxes_in_lowest_layer=( ssd_anchor_generator_config.reduce_boxes_in_lowest_layer ), ) else: raise ValueError("Empty anchor generator.")
cli
disable
# # Copyright 2018 Free Software Foundation, Inc. # # This file is part of GNU Radio # # SPDX-License-Identifier: GPL-3.0-or-later # # """ Disable blocks module """ import click from ..core import ModToolDisable, get_block_candidates from ..tools import SequenceCompleter from .base import block_name, cli_input, common_params, run @click.command("disable", short_help=ModToolDisable.description) @common_params @block_name def cli(**kwargs): """Disable a block (comments out CMake entries for files)""" kwargs["cli"] = True self = ModToolDisable(**kwargs) click.secho("GNU Radio module name identified: " + self.info["modname"], fg="green") get_pattern(self) run(self) def get_pattern(self): """Get the regex pattern for block(s) to be disabled""" if self.info["pattern"] is None: block_candidates = get_block_candidates() with SequenceCompleter(block_candidates): self.info["pattern"] = cli_input( "Which blocks do you want to disable? (Regex): " ) if not self.info["pattern"] or self.info["pattern"].isspace(): self.info["pattern"] = "."
beetsplug
fetchart
# This file is part of beets. # Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. """Fetches album art. """ import os import re from collections import OrderedDict from contextlib import closing from tempfile import NamedTemporaryFile import confuse import requests from beets import config, importer, plugins, ui, util from beets.util import bytestring_path, py3_path, sorted_walk, syspath from beets.util.artresizer import ArtResizer from mediafile import image_mime_type try: from bs4 import BeautifulSoup HAS_BEAUTIFUL_SOUP = True except ImportError: HAS_BEAUTIFUL_SOUP = False CONTENT_TYPES = {"image/jpeg": [b"jpg", b"jpeg"], "image/png": [b"png"]} IMAGE_EXTENSIONS = [ext for exts in CONTENT_TYPES.values() for ext in exts] class Candidate: """Holds information about a matching artwork, deals with validation of dimension restrictions and resizing. """ CANDIDATE_BAD = 0 CANDIDATE_EXACT = 1 CANDIDATE_DOWNSCALE = 2 CANDIDATE_DOWNSIZE = 3 CANDIDATE_DEINTERLACE = 4 CANDIDATE_REFORMAT = 5 MATCH_EXACT = 0 MATCH_FALLBACK = 1 def __init__(self, log, path=None, url=None, source="", match=None, size=None): self._log = log self.path = path self.url = url self.source = source self.check = None self.match = match self.size = size def _validate(self, plugin): """Determine whether the candidate artwork is valid based on its dimensions (width and ratio). Return `CANDIDATE_BAD` if the file is unusable. Return `CANDIDATE_EXACT` if the file is usable as-is. Return `CANDIDATE_DOWNSCALE` if the file must be rescaled. Return `CANDIDATE_DOWNSIZE` if the file must be resized, and possibly also rescaled. Return `CANDIDATE_DEINTERLACE` if the file must be deinterlaced. Return `CANDIDATE_REFORMAT` if the file has to be converted. """ if not self.path: return self.CANDIDATE_BAD if not ( plugin.enforce_ratio or plugin.minwidth or plugin.maxwidth or plugin.max_filesize or plugin.deinterlace or plugin.cover_format ): return self.CANDIDATE_EXACT # get_size returns None if no local imaging backend is available if not self.size: self.size = ArtResizer.shared.get_size(self.path) self._log.debug("image size: {}", self.size) if not self.size: self._log.warning( "Could not get size of image (please see " "documentation for dependencies). " "The configuration options `minwidth`, " "`enforce_ratio` and `max_filesize` " "may be violated." ) return self.CANDIDATE_EXACT short_edge = min(self.size) long_edge = max(self.size) # Check minimum dimension. if plugin.minwidth and self.size[0] < plugin.minwidth: self._log.debug("image too small ({} < {})", self.size[0], plugin.minwidth) return self.CANDIDATE_BAD # Check aspect ratio. edge_diff = long_edge - short_edge if plugin.enforce_ratio: if plugin.margin_px: if edge_diff > plugin.margin_px: self._log.debug( "image is not close enough to being " "square, ({} - {} > {})", long_edge, short_edge, plugin.margin_px, ) return self.CANDIDATE_BAD elif plugin.margin_percent: margin_px = plugin.margin_percent * long_edge if edge_diff > margin_px: self._log.debug( "image is not close enough to being " "square, ({} - {} > {})", long_edge, short_edge, margin_px, ) return self.CANDIDATE_BAD elif edge_diff: # also reached for margin_px == 0 and margin_percent == 0.0 self._log.debug( "image is not square ({} != {})", self.size[0], self.size[1] ) return self.CANDIDATE_BAD # Check maximum dimension. downscale = False if plugin.maxwidth and self.size[0] > plugin.maxwidth: self._log.debug( "image needs rescaling ({} > {})", self.size[0], plugin.maxwidth ) downscale = True # Check filesize. downsize = False if plugin.max_filesize: filesize = os.stat(syspath(self.path)).st_size if filesize > plugin.max_filesize: self._log.debug( "image needs resizing ({}B > {}B)", filesize, plugin.max_filesize ) downsize = True # Check image format reformat = False if plugin.cover_format: fmt = ArtResizer.shared.get_format(self.path) reformat = fmt != plugin.cover_format if reformat: self._log.debug( "image needs reformatting: {} -> {}", fmt, plugin.cover_format ) if downscale: return self.CANDIDATE_DOWNSCALE elif downsize: return self.CANDIDATE_DOWNSIZE elif plugin.deinterlace: return self.CANDIDATE_DEINTERLACE elif reformat: return self.CANDIDATE_REFORMAT else: return self.CANDIDATE_EXACT def validate(self, plugin): self.check = self._validate(plugin) return self.check def resize(self, plugin): if self.check == self.CANDIDATE_DOWNSCALE: self.path = ArtResizer.shared.resize( plugin.maxwidth, self.path, quality=plugin.quality, max_filesize=plugin.max_filesize, ) elif self.check == self.CANDIDATE_DOWNSIZE: # dimensions are correct, so maxwidth is set to maximum dimension self.path = ArtResizer.shared.resize( max(self.size), self.path, quality=plugin.quality, max_filesize=plugin.max_filesize, ) elif self.check == self.CANDIDATE_DEINTERLACE: self.path = ArtResizer.shared.deinterlace(self.path) elif self.check == self.CANDIDATE_REFORMAT: self.path = ArtResizer.shared.reformat( self.path, plugin.cover_format, deinterlaced=plugin.deinterlace, ) def _logged_get(log, *args, **kwargs): """Like `requests.get`, but logs the effective URL to the specified `log` at the `DEBUG` level. Use the optional `message` parameter to specify what to log before the URL. By default, the string is "getting URL". Also sets the User-Agent header to indicate beets. """ # Use some arguments with the `send` call but most with the # `Request` construction. This is a cheap, magic-filled way to # emulate `requests.get` or, more pertinently, # `requests.Session.request`. req_kwargs = kwargs send_kwargs = {} for arg in ("stream", "verify", "proxies", "cert", "timeout"): if arg in kwargs: send_kwargs[arg] = req_kwargs.pop(arg) # Our special logging message parameter. if "message" in kwargs: message = kwargs.pop("message") else: message = "getting URL" req = requests.Request("GET", *args, **req_kwargs) with requests.Session() as s: s.headers = {"User-Agent": "beets"} prepped = s.prepare_request(req) settings = s.merge_environment_settings(prepped.url, {}, None, None, None) send_kwargs.update(settings) log.debug("{}: {}", message, prepped.url) return s.send(prepped, **send_kwargs) class RequestMixin: """Adds a Requests wrapper to the class that uses the logger, which must be named `self._log`. """ def request(self, *args, **kwargs): """Like `requests.get`, but uses the logger `self._log`. See also `_logged_get`. """ return _logged_get(self._log, *args, **kwargs) # ART SOURCES ################################################################ class ArtSource(RequestMixin): VALID_MATCHING_CRITERIA = ["default"] def __init__(self, log, config, match_by=None): self._log = log self._config = config self.match_by = match_by or self.VALID_MATCHING_CRITERIA @staticmethod def add_default_config(config): pass @classmethod def available(cls, log, config): """Return whether or not all dependencies are met and the art source is in fact usable. """ return True def get(self, album, plugin, paths): raise NotImplementedError() def _candidate(self, **kwargs): return Candidate(source=self, log=self._log, **kwargs) def fetch_image(self, candidate, plugin): raise NotImplementedError() def cleanup(self, candidate): pass class LocalArtSource(ArtSource): IS_LOCAL = True LOC_STR = "local" def fetch_image(self, candidate, plugin): pass class RemoteArtSource(ArtSource): IS_LOCAL = False LOC_STR = "remote" def fetch_image(self, candidate, plugin): """Downloads an image from a URL and checks whether it seems to actually be an image. If so, returns a path to the downloaded image. Otherwise, returns None. """ if plugin.maxwidth: candidate.url = ArtResizer.shared.proxy_url(plugin.maxwidth, candidate.url) try: with closing( self.request(candidate.url, stream=True, message="downloading image") ) as resp: ct = resp.headers.get("Content-Type", None) # Download the image to a temporary file. As some servers # (notably fanart.tv) have proven to return wrong Content-Types # when images were uploaded with a bad file extension, do not # rely on it. Instead validate the type using the file magic # and only then determine the extension. data = resp.iter_content(chunk_size=1024) header = b"" for chunk in data: header += chunk if len(header) >= 32: # The imghdr module will only read 32 bytes, and our # own additions in mediafile even less. break else: # server didn't return enough data, i.e. corrupt image return real_ct = image_mime_type(header) if real_ct is None: # detection by file magic failed, fall back to the # server-supplied Content-Type # Is our type detection failsafe enough to drop this? real_ct = ct if real_ct not in CONTENT_TYPES: self._log.debug( "not a supported image: {}", real_ct or "unknown content type" ) return ext = b"." + CONTENT_TYPES[real_ct][0] if real_ct != ct: self._log.warning( "Server specified {}, but returned a " "{} image. Correcting the extension " "to {}", ct, real_ct, ext, ) suffix = py3_path(ext) with NamedTemporaryFile(suffix=suffix, delete=False) as fh: # write the first already loaded part of the image fh.write(header) # download the remaining part of the image for chunk in data: fh.write(chunk) self._log.debug( "downloaded art to: {0}", util.displayable_path(fh.name) ) candidate.path = util.bytestring_path(fh.name) return except (OSError, requests.RequestException, TypeError) as exc: # Handling TypeError works around a urllib3 bug: # https://github.com/shazow/urllib3/issues/556 self._log.debug("error fetching art: {}", exc) return def cleanup(self, candidate): if candidate.path: try: util.remove(path=candidate.path) except util.FilesystemError as exc: self._log.debug("error cleaning up tmp art: {}", exc) class CoverArtArchive(RemoteArtSource): NAME = "Cover Art Archive" VALID_MATCHING_CRITERIA = ["release", "releasegroup"] VALID_THUMBNAIL_SIZES = [250, 500, 1200] URL = "https://coverartarchive.org/release/{mbid}" GROUP_URL = "https://coverartarchive.org/release-group/{mbid}" def get(self, album, plugin, paths): """Return the Cover Art Archive and Cover Art Archive release group URLs using album MusicBrainz release ID and release group ID. """ def get_image_urls(url, preferred_width=None): try: response = self.request(url) except requests.RequestException: self._log.debug("{}: error receiving response".format(self.NAME)) return try: data = response.json() except ValueError: self._log.debug( "{}: error loading response: {}".format(self.NAME, response.text) ) return for item in data.get("images", []): try: if "Front" not in item["types"]: continue if preferred_width: yield item["thumbnails"][preferred_width] else: yield item["image"] except KeyError: pass release_url = self.URL.format(mbid=album.mb_albumid) release_group_url = self.GROUP_URL.format(mbid=album.mb_releasegroupid) # Cover Art Archive API offers pre-resized thumbnails at several sizes. # If the maxwidth config matches one of the already available sizes # fetch it directly instead of fetching the full sized image and # resizing it. preferred_width = None if plugin.maxwidth in self.VALID_THUMBNAIL_SIZES: preferred_width = str(plugin.maxwidth) if "release" in self.match_by and album.mb_albumid: for url in get_image_urls(release_url, preferred_width): yield self._candidate(url=url, match=Candidate.MATCH_EXACT) if "releasegroup" in self.match_by and album.mb_releasegroupid: for url in get_image_urls(release_group_url): yield self._candidate(url=url, match=Candidate.MATCH_FALLBACK) class Amazon(RemoteArtSource): NAME = "Amazon" URL = "https://images.amazon.com/images/P/%s.%02i.LZZZZZZZ.jpg" INDICES = (1, 2) def get(self, album, plugin, paths): """Generate URLs using Amazon ID (ASIN) string.""" if album.asin: for index in self.INDICES: yield self._candidate( url=self.URL % (album.asin, index), match=Candidate.MATCH_EXACT ) class AlbumArtOrg(RemoteArtSource): NAME = "AlbumArt.org scraper" URL = "https://www.albumart.org/index_detail.php" PAT = r'href\s*=\s*"([^>"]*)"[^>]*title\s*=\s*"View larger image"' def get(self, album, plugin, paths): """Return art URL from AlbumArt.org using album ASIN.""" if not album.asin: return # Get the page from albumart.org. try: resp = self.request(self.URL, params={"asin": album.asin}) self._log.debug("scraped art URL: {0}", resp.url) except requests.RequestException: self._log.debug("error scraping art page") return # Search the page for the image URL. m = re.search(self.PAT, resp.text) if m: image_url = m.group(1) yield self._candidate(url=image_url, match=Candidate.MATCH_EXACT) else: self._log.debug("no image found on page") class GoogleImages(RemoteArtSource): NAME = "Google Images" URL = "https://www.googleapis.com/customsearch/v1" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.key = (self._config["google_key"].get(),) self.cx = (self._config["google_engine"].get(),) @staticmethod def add_default_config(config): config.add( { "google_key": None, "google_engine": "001442825323518660753:hrh5ch1gjzm", } ) config["google_key"].redact = True @classmethod def available(cls, log, config): has_key = bool(config["google_key"].get()) if not has_key: log.debug("google: Disabling art source due to missing key") return has_key def get(self, album, plugin, paths): """Return art URL from google custom search engine given an album title and interpreter. """ if not (album.albumartist and album.album): return search_string = (album.albumartist + "," + album.album).encode("utf-8") try: response = self.request( self.URL, params={ "key": self.key, "cx": self.cx, "q": search_string, "searchType": "image", }, ) except requests.RequestException: self._log.debug("google: error receiving response") return # Get results using JSON. try: data = response.json() except ValueError: self._log.debug("google: error loading response: {}".format(response.text)) return if "error" in data: reason = data["error"]["errors"][0]["reason"] self._log.debug("google fetchart error: {0}", reason) return if "items" in data.keys(): for item in data["items"]: yield self._candidate(url=item["link"], match=Candidate.MATCH_EXACT) class FanartTV(RemoteArtSource): """Art from fanart.tv requested using their API""" NAME = "fanart.tv" API_URL = "https://webservice.fanart.tv/v3/" API_ALBUMS = API_URL + "music/albums/" PROJECT_KEY = "61a7d0ab4e67162b7a0c7c35915cd48e" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.client_key = self._config["fanarttv_key"].get() @staticmethod def add_default_config(config): config.add( { "fanarttv_key": None, } ) config["fanarttv_key"].redact = True def get(self, album, plugin, paths): if not album.mb_releasegroupid: return try: response = self.request( self.API_ALBUMS + album.mb_releasegroupid, headers={"api-key": self.PROJECT_KEY, "client-key": self.client_key}, ) except requests.RequestException: self._log.debug("fanart.tv: error receiving response") return try: data = response.json() except ValueError: self._log.debug("fanart.tv: error loading response: {}", response.text) return if "status" in data and data["status"] == "error": if "not found" in data["error message"].lower(): self._log.debug("fanart.tv: no image found") elif "api key" in data["error message"].lower(): self._log.warning( "fanart.tv: Invalid API key given, please " "enter a valid one in your config file." ) else: self._log.debug( "fanart.tv: error on request: {}", data["error message"] ) return matches = [] # can there be more than one releasegroupid per response? for mbid, art in data.get("albums", {}).items(): # there might be more art referenced, e.g. cdart, and an albumcover # might not be present, even if the request was successful if album.mb_releasegroupid == mbid and "albumcover" in art: matches.extend(art["albumcover"]) # can this actually occur? else: self._log.debug( "fanart.tv: unexpected mb_releasegroupid in " "response!" ) matches.sort(key=lambda x: int(x["likes"]), reverse=True) for item in matches: # fanart.tv has a strict size requirement for album art to be # uploaded yield self._candidate( url=item["url"], match=Candidate.MATCH_EXACT, size=(1000, 1000) ) class ITunesStore(RemoteArtSource): NAME = "iTunes Store" API_URL = "https://itunes.apple.com/search" def get(self, album, plugin, paths): """Return art URL from iTunes Store given an album title.""" if not (album.albumartist and album.album): return payload = { "term": album.albumartist + " " + album.album, "entity": "album", "media": "music", "limit": 200, } try: r = self.request(self.API_URL, params=payload) r.raise_for_status() except requests.RequestException as e: self._log.debug("iTunes search failed: {0}", e) return try: candidates = r.json()["results"] except ValueError as e: self._log.debug("Could not decode json response: {0}", e) return except KeyError as e: self._log.debug( "{} not found in json. Fields are {} ", e, list(r.json().keys()) ) return if not candidates: self._log.debug("iTunes search for {!r} got no results", payload["term"]) return if self._config["high_resolution"]: image_suffix = "100000x100000-999" else: image_suffix = "1200x1200bb" for c in candidates: try: if ( c["artistName"] == album.albumartist and c["collectionName"] == album.album ): art_url = c["artworkUrl100"] art_url = art_url.replace("100x100bb", image_suffix) yield self._candidate(url=art_url, match=Candidate.MATCH_EXACT) except KeyError as e: self._log.debug( "Malformed itunes candidate: {} not found in {}", # NOQA E501 e, list(c.keys()), ) try: fallback_art_url = candidates[0]["artworkUrl100"] fallback_art_url = fallback_art_url.replace("100x100bb", image_suffix) yield self._candidate(url=fallback_art_url, match=Candidate.MATCH_FALLBACK) except KeyError as e: self._log.debug( "Malformed itunes candidate: {} not found in {}", e, list(c.keys()) ) class Wikipedia(RemoteArtSource): NAME = "Wikipedia (queried through DBpedia)" DBPEDIA_URL = "https://dbpedia.org/sparql" WIKIPEDIA_URL = "https://en.wikipedia.org/w/api.php" SPARQL_QUERY = """PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> PREFIX dbpprop: <http://dbpedia.org/property/> PREFIX owl: <http://dbpedia.org/ontology/> PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> PREFIX foaf: <http://xmlns.com/foaf/0.1/> SELECT DISTINCT ?pageId ?coverFilename WHERE {{ ?subject owl:wikiPageID ?pageId . ?subject dbpprop:name ?name . ?subject rdfs:label ?label . {{ ?subject dbpprop:artist ?artist }} UNION {{ ?subject owl:artist ?artist }} {{ ?artist foaf:name "{artist}"@en }} UNION {{ ?artist dbpprop:name "{artist}"@en }} ?subject rdf:type <http://dbpedia.org/ontology/Album> . ?subject dbpprop:cover ?coverFilename . FILTER ( regex(?name, "{album}", "i") ) }} Limit 1""" def get(self, album, plugin, paths): if not (album.albumartist and album.album): return # Find the name of the cover art filename on DBpedia cover_filename, page_id = None, None try: dbpedia_response = self.request( self.DBPEDIA_URL, params={ "format": "application/sparql-results+json", "timeout": 2500, "query": self.SPARQL_QUERY.format( artist=album.albumartist.title(), album=album.album ), }, headers={"content-type": "application/json"}, ) except requests.RequestException: self._log.debug("dbpedia: error receiving response") return try: data = dbpedia_response.json() results = data["results"]["bindings"] if results: cover_filename = "File:" + results[0]["coverFilename"]["value"] page_id = results[0]["pageId"]["value"] else: self._log.debug("wikipedia: album not found on dbpedia") except (ValueError, KeyError, IndexError): self._log.debug( "wikipedia: error scraping dbpedia response: {}", dbpedia_response.text ) # Ensure we have a filename before attempting to query wikipedia if not (cover_filename and page_id): return # DBPedia sometimes provides an incomplete cover_filename, indicated # by the filename having a space before the extension, e.g., 'foo .bar' # An additional Wikipedia call can help to find the real filename. # This may be removed once the DBPedia issue is resolved, see: # https://github.com/dbpedia/extraction-framework/issues/396 if " ." in cover_filename and "." not in cover_filename.split(" .")[-1]: self._log.debug("wikipedia: dbpedia provided incomplete cover_filename") lpart, rpart = cover_filename.rsplit(" .", 1) # Query all the images in the page try: wikipedia_response = self.request( self.WIKIPEDIA_URL, params={ "format": "json", "action": "query", "continue": "", "prop": "images", "pageids": page_id, }, headers={"content-type": "application/json"}, ) except requests.RequestException: self._log.debug("wikipedia: error receiving response") return # Try to see if one of the images on the pages matches our # incomplete cover_filename try: data = wikipedia_response.json() results = data["query"]["pages"][page_id]["images"] for result in results: if re.match( re.escape(lpart) + r".*?\." + re.escape(rpart), result["title"] ): cover_filename = result["title"] break except (ValueError, KeyError): self._log.debug("wikipedia: failed to retrieve a cover_filename") return # Find the absolute url of the cover art on Wikipedia try: wikipedia_response = self.request( self.WIKIPEDIA_URL, params={ "format": "json", "action": "query", "continue": "", "prop": "imageinfo", "iiprop": "url", "titles": cover_filename.encode("utf-8"), }, headers={"content-type": "application/json"}, ) except requests.RequestException: self._log.debug("wikipedia: error receiving response") return try: data = wikipedia_response.json() results = data["query"]["pages"] for _, result in results.items(): image_url = result["imageinfo"][0]["url"] yield self._candidate(url=image_url, match=Candidate.MATCH_EXACT) except (ValueError, KeyError, IndexError): self._log.debug("wikipedia: error scraping imageinfo") return class FileSystem(LocalArtSource): NAME = "Filesystem" @staticmethod def filename_priority(filename, cover_names): """Sort order for image names. Return indexes of cover names found in the image filename. This means that images with lower-numbered and more keywords will have higher priority. """ return [idx for (idx, x) in enumerate(cover_names) if x in filename] def get(self, album, plugin, paths): """Look for album art files in the specified directories.""" if not paths: return cover_names = list(map(util.bytestring_path, plugin.cover_names)) cover_names_str = b"|".join(cover_names) cover_pat = rb"".join([rb"(\b|_)(", cover_names_str, rb")(\b|_)"]) for path in paths: if not os.path.isdir(syspath(path)): continue # Find all files that look like images in the directory. images = [] ignore = config["ignore"].as_str_seq() ignore_hidden = config["ignore_hidden"].get(bool) for _, _, files in sorted_walk( path, ignore=ignore, ignore_hidden=ignore_hidden ): for fn in files: fn = bytestring_path(fn) for ext in IMAGE_EXTENSIONS: if fn.lower().endswith(b"." + ext) and os.path.isfile( syspath(os.path.join(path, fn)) ): images.append(fn) # Look for "preferred" filenames. images = sorted( images, key=lambda x: self.filename_priority(x, cover_names) ) remaining = [] for fn in images: if re.search(cover_pat, os.path.splitext(fn)[0], re.I): self._log.debug( "using well-named art file {0}", util.displayable_path(fn) ) yield self._candidate( path=os.path.join(path, fn), match=Candidate.MATCH_EXACT ) else: remaining.append(fn) # Fall back to any image in the folder. if remaining and not plugin.cautious: self._log.debug( "using fallback art file {0}", util.displayable_path(remaining[0]) ) yield self._candidate( path=os.path.join(path, remaining[0]), match=Candidate.MATCH_FALLBACK, ) class LastFM(RemoteArtSource): NAME = "Last.fm" # Sizes in priority order. SIZES = OrderedDict( [ ("mega", (300, 300)), ("extralarge", (300, 300)), ("large", (174, 174)), ("medium", (64, 64)), ("small", (34, 34)), ] ) API_URL = "https://ws.audioscrobbler.com/2.0" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.key = (self._config["lastfm_key"].get(),) @staticmethod def add_default_config(config): config.add( { "lastfm_key": None, } ) config["lastfm_key"].redact = True @classmethod def available(cls, log, config): has_key = bool(config["lastfm_key"].get()) if not has_key: log.debug("lastfm: Disabling art source due to missing key") return has_key def get(self, album, plugin, paths): if not album.mb_albumid: return try: response = self.request( self.API_URL, params={ "method": "album.getinfo", "api_key": self.key, "mbid": album.mb_albumid, "format": "json", }, ) except requests.RequestException: self._log.debug("lastfm: error receiving response") return try: data = response.json() if "error" in data: if data["error"] == 6: self._log.debug("lastfm: no results for {}", album.mb_albumid) else: self._log.error( "lastfm: failed to get album info: {} ({})", data["message"], data["error"], ) else: images = { image["size"]: image["#text"] for image in data["album"]["image"] } # Provide candidates in order of size. for size in self.SIZES.keys(): if size in images: yield self._candidate(url=images[size], size=self.SIZES[size]) except ValueError: self._log.debug("lastfm: error loading response: {}".format(response.text)) return class Spotify(RemoteArtSource): NAME = "Spotify" SPOTIFY_ALBUM_URL = "https://open.spotify.com/album/" @classmethod def available(cls, log, config): if not HAS_BEAUTIFUL_SOUP: log.debug( "To use Spotify as an album art source, " "you must install the beautifulsoup4 module. See " "the documentation for further details." ) return HAS_BEAUTIFUL_SOUP def get(self, album, plugin, paths): try: url = self.SPOTIFY_ALBUM_URL + album.items().get().spotify_album_id except AttributeError: self._log.debug("Fetchart: no Spotify album ID found") return try: response = requests.get(url) response.raise_for_status() except requests.RequestException as e: self._log.debug("Error: " + str(e)) return try: html = response.text soup = BeautifulSoup(html, "html.parser") image_url = soup.find("meta", attrs={"property": "og:image"})["content"] yield self._candidate(url=image_url, match=Candidate.MATCH_EXACT) except ValueError: self._log.debug("Spotify: error loading response: {}".format(response.text)) return class CoverArtUrl(RemoteArtSource): # This source is intended to be used with a plugin that sets the # cover_art_url field on albums or tracks. Users can also manually update # the cover_art_url field using the "set" command. This source will then # use that URL to fetch the image. NAME = "Cover Art URL" def get(self, album, plugin, paths): image_url = None try: # look for cover_art_url on album or first track if album.cover_art_url: image_url = album.cover_art_url else: image_url = album.items().get().cover_art_url self._log.debug(f"Cover art URL {image_url} found for {album}") except (AttributeError, TypeError): self._log.debug(f"Cover art URL not found for {album}") return if image_url: yield self._candidate(url=image_url, match=Candidate.MATCH_EXACT) else: self._log.debug(f"Cover art URL not found for {album}") return # Try each source in turn. # Note that SOURCES_ALL is redundant (and presently unused). However, we keep # it around nn order not break plugins that "register" (a.k.a. monkey-patch) # their own fetchart sources. SOURCES_ALL = [ "filesystem", "coverart", "itunes", "amazon", "albumart", "wikipedia", "google", "fanarttv", "lastfm", "spotify", ] ART_SOURCES = { "filesystem": FileSystem, "coverart": CoverArtArchive, "itunes": ITunesStore, "albumart": AlbumArtOrg, "amazon": Amazon, "wikipedia": Wikipedia, "google": GoogleImages, "fanarttv": FanartTV, "lastfm": LastFM, "spotify": Spotify, "cover_art_url": CoverArtUrl, } SOURCE_NAMES = {v: k for k, v in ART_SOURCES.items()} # PLUGIN LOGIC ############################################################### class FetchArtPlugin(plugins.BeetsPlugin, RequestMixin): PAT_PX = r"(0|[1-9][0-9]*)px" PAT_PERCENT = r"(100(\.00?)?|[1-9]?[0-9](\.[0-9]{1,2})?)%" def __init__(self): super().__init__() # Holds candidates corresponding to downloaded images between # fetching them and placing them in the filesystem. self.art_candidates = {} self.config.add( { "auto": True, "minwidth": 0, "maxwidth": 0, "quality": 0, "max_filesize": 0, "enforce_ratio": False, "cautious": False, "cover_names": ["cover", "front", "art", "album", "folder"], "sources": [ "filesystem", "coverart", "itunes", "amazon", "albumart", "cover_art_url", ], "store_source": False, "high_resolution": False, "deinterlace": False, "cover_format": None, } ) for source in ART_SOURCES.values(): source.add_default_config(self.config) self.minwidth = self.config["minwidth"].get(int) self.maxwidth = self.config["maxwidth"].get(int) self.max_filesize = self.config["max_filesize"].get(int) self.quality = self.config["quality"].get(int) # allow both pixel and percentage-based margin specifications self.enforce_ratio = self.config["enforce_ratio"].get( confuse.OneOf( [ bool, confuse.String(pattern=self.PAT_PX), confuse.String(pattern=self.PAT_PERCENT), ] ) ) self.margin_px = None self.margin_percent = None self.deinterlace = self.config["deinterlace"].get(bool) if type(self.enforce_ratio) is str: if self.enforce_ratio[-1] == "%": self.margin_percent = float(self.enforce_ratio[:-1]) / 100 elif self.enforce_ratio[-2:] == "px": self.margin_px = int(self.enforce_ratio[:-2]) else: # shouldn't happen raise confuse.ConfigValueError() self.enforce_ratio = True cover_names = self.config["cover_names"].as_str_seq() self.cover_names = list(map(util.bytestring_path, cover_names)) self.cautious = self.config["cautious"].get(bool) self.store_source = self.config["store_source"].get(bool) self.src_removed = config["import"]["delete"].get(bool) or config["import"][ "move" ].get(bool) self.cover_format = self.config["cover_format"].get(confuse.Optional(str)) if self.config["auto"]: # Enable two import hooks when fetching is enabled. self.import_stages = [self.fetch_art] self.register_listener("import_task_files", self.assign_art) available_sources = [ (s_name, c) for (s_name, s_cls) in ART_SOURCES.items() if s_cls.available(self._log, self.config) for c in s_cls.VALID_MATCHING_CRITERIA ] sources = plugins.sanitize_pairs( self.config["sources"].as_pairs(default_value="*"), available_sources ) if "remote_priority" in self.config: self._log.warning( "The `fetch_art.remote_priority` configuration option has " "been deprecated. Instead, place `filesystem` at the end of " "your `sources` list." ) if self.config["remote_priority"].get(bool): fs = [] others = [] for s, c in sources: if s == "filesystem": fs.append((s, c)) else: others.append((s, c)) sources = others + fs self.sources = [ ART_SOURCES[s](self._log, self.config, match_by=[c]) for s, c in sources ] # Asynchronous; after music is added to the library. def fetch_art(self, session, task): """Find art for the album being imported.""" if task.is_album: # Only fetch art for full albums. if task.album.artpath and os.path.isfile(syspath(task.album.artpath)): # Album already has art (probably a re-import); skip it. return if task.choice_flag == importer.action.ASIS: # For as-is imports, don't search Web sources for art. local = True elif task.choice_flag in (importer.action.APPLY, importer.action.RETAG): # Search everywhere for art. local = False else: # For any other choices (e.g., TRACKS), do nothing. return candidate = self.art_for_album(task.album, task.paths, local) if candidate: self.art_candidates[task] = candidate def _set_art(self, album, candidate, delete=False): album.set_art(candidate.path, delete) if self.store_source: # store the source of the chosen artwork in a flexible field self._log.debug("Storing art_source for {0.albumartist} - {0.album}", album) album.art_source = SOURCE_NAMES[type(candidate.source)] album.store() # Synchronous; after music files are put in place. def assign_art(self, session, task): """Place the discovered art in the filesystem.""" if task in self.art_candidates: candidate = self.art_candidates.pop(task) self._set_art(task.album, candidate, not self.src_removed) if self.src_removed: task.prune(candidate.path) # Manual album art fetching. def commands(self): cmd = ui.Subcommand("fetchart", help="download album art") cmd.parser.add_option( "-f", "--force", dest="force", action="store_true", default=False, help="re-download art when already present", ) cmd.parser.add_option( "-q", "--quiet", dest="quiet", action="store_true", default=False, help="quiet mode: do not output albums that already have artwork", ) def func(lib, opts, args): self.batch_fetch_art( lib, lib.albums(ui.decargs(args)), opts.force, opts.quiet ) cmd.func = func return [cmd] # Utilities converted from functions to methods on logging overhaul def art_for_album(self, album, paths, local_only=False): """Given an Album object, returns a path to downloaded art for the album (or None if no art is found). If `maxwidth`, then images are resized to this maximum pixel size. If `quality` then resized images are saved at the specified quality level. If `local_only`, then only local image files from the filesystem are returned; no network requests are made. """ out = None for source in self.sources: if source.IS_LOCAL or not local_only: self._log.debug( "trying source {0} for album {1.albumartist} - {1.album}", SOURCE_NAMES[type(source)], album, ) # URLs might be invalid at this point, or the image may not # fulfill the requirements for candidate in source.get(album, self, paths): source.fetch_image(candidate, self) if candidate.validate(self): out = candidate self._log.debug( "using {0.LOC_STR} image {1}".format( source, util.displayable_path(out.path) ) ) break # Remove temporary files for invalid candidates. source.cleanup(candidate) if out: break if out: out.resize(self) return out def batch_fetch_art(self, lib, albums, force, quiet): """Fetch album art for each of the albums. This implements the manual fetchart CLI command. """ for album in albums: if album.artpath and not force and os.path.isfile(syspath(album.artpath)): if not quiet: message = ui.colorize("text_highlight_minor", "has album art") self._log.info("{0}: {1}", album, message) else: # In ordinary invocations, look for images on the # filesystem. When forcing, however, always go to the Web # sources. local_paths = None if force else [album.path] candidate = self.art_for_album(album, local_paths) if candidate: self._set_art(album, candidate) message = ui.colorize("text_success", "found album art") else: message = ui.colorize("text_error", "no art found") self._log.info("{0}: {1}", album, message)
beets
library
# This file is part of beets. # Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. """The core data store and collection logic for beets. """ import os import re import shlex import string import sys import time import unicodedata import beets from beets import dbcore, logging, plugins, util from beets.dbcore import types from beets.util import ( MoveOperation, bytestring_path, lazy_property, normpath, samefile, syspath, ) from beets.util.functemplate import Template, template from mediafile import MediaFile, UnreadableFileError # To use the SQLite "blob" type, it doesn't suffice to provide a byte # string; SQLite treats that as encoded text. Wrapping it in a # `memoryview` tells it that we actually mean non-text data. BLOB_TYPE = memoryview log = logging.getLogger("beets") # Library-specific query types. class SingletonQuery(dbcore.FieldQuery): """This query is responsible for the 'singleton' lookup. It is based on the FieldQuery and constructs a SQL clause 'album_id is NULL' which yields the same result as the previous filter in Python but is more performant since it's done in SQL. Using util.str2bool ensures that lookups like singleton:true, singleton:1 and singleton:false, singleton:0 are handled consistently. """ def __new__(cls, field, value, *args, **kwargs): query = dbcore.query.NoneQuery("album_id") if util.str2bool(value): return query return dbcore.query.NotQuery(query) class PathQuery(dbcore.FieldQuery): """A query that matches all items under a given path. Matching can either be case-insensitive or case-sensitive. By default, the behavior depends on the OS: case-insensitive on Windows and case-sensitive otherwise. """ # For tests force_implicit_query_detection = False def __init__(self, field, pattern, fast=True, case_sensitive=None): """Create a path query. `pattern` must be a path, either to a file or a directory. `case_sensitive` can be a bool or `None`, indicating that the behavior should depend on the filesystem. """ super().__init__(field, pattern, fast) path = util.normpath(pattern) # By default, the case sensitivity depends on the filesystem # that the query path is located on. if case_sensitive is None: case_sensitive = util.case_sensitive(path) self.case_sensitive = case_sensitive # Use a normalized-case pattern for case-insensitive matches. if not case_sensitive: # We need to lowercase the entire path, not just the pattern. # In particular, on Windows, the drive letter is otherwise not # lowercased. # This also ensures that the `match()` method below and the SQL # from `col_clause()` do the same thing. path = path.lower() # Match the path as a single file. self.file_path = path # As a directory (prefix). self.dir_path = os.path.join(path, b"") @classmethod def is_path_query(cls, query_part): """Try to guess whether a unicode query part is a path query. Condition: separator precedes colon and the file exists. """ colon = query_part.find(":") if colon != -1: query_part = query_part[:colon] # Test both `sep` and `altsep` (i.e., both slash and backslash on # Windows). if not (os.sep in query_part or (os.altsep and os.altsep in query_part)): return False if cls.force_implicit_query_detection: return True return os.path.exists(syspath(normpath(query_part))) def match(self, item): path = item.path if self.case_sensitive else item.path.lower() return (path == self.file_path) or path.startswith(self.dir_path) def col_clause(self): file_blob = BLOB_TYPE(self.file_path) dir_blob = BLOB_TYPE(self.dir_path) if self.case_sensitive: query_part = "({0} = ?) || (substr({0}, 1, ?) = ?)" else: query_part = "(BYTELOWER({0}) = BYTELOWER(?)) || \ (substr(BYTELOWER({0}), 1, ?) = BYTELOWER(?))" return query_part.format(self.field), (file_blob, len(dir_blob), dir_blob) # Library-specific field types. class DateType(types.Float): # TODO representation should be `datetime` object # TODO distinguish between date and time types query = dbcore.query.DateQuery def format(self, value): return time.strftime( beets.config["time_format"].as_str(), time.localtime(value or 0) ) def parse(self, string): try: # Try a formatted date string. return time.mktime( time.strptime(string, beets.config["time_format"].as_str()) ) except ValueError: # Fall back to a plain timestamp number. try: return float(string) except ValueError: return self.null class PathType(types.Type): """A dbcore type for filesystem paths. These are represented as `bytes` objects, in keeping with the Unix filesystem abstraction. """ sql = "BLOB" query = PathQuery model_type = bytes def __init__(self, nullable=False): """Create a path type object. `nullable` controls whether the type may be missing, i.e., None. """ self.nullable = nullable @property def null(self): if self.nullable: return None else: return b"" def format(self, value): return util.displayable_path(value) def parse(self, string): return normpath(bytestring_path(string)) def normalize(self, value): if isinstance(value, str): # Paths stored internally as encoded bytes. return bytestring_path(value) elif isinstance(value, BLOB_TYPE): # We unwrap buffers to bytes. return bytes(value) else: return value def from_sql(self, sql_value): return self.normalize(sql_value) def to_sql(self, value): if isinstance(value, bytes): value = BLOB_TYPE(value) return value class MusicalKey(types.String): """String representing the musical key of a song. The standard format is C, Cm, C#, C#m, etc. """ ENHARMONIC = { r"db": "c#", r"eb": "d#", r"gb": "f#", r"ab": "g#", r"bb": "a#", } null = None def parse(self, key): key = key.lower() for flat, sharp in self.ENHARMONIC.items(): key = re.sub(flat, sharp, key) key = re.sub(r"[\W\s]+minor", "m", key) key = re.sub(r"[\W\s]+major", "", key) return key.capitalize() def normalize(self, key): if key is None: return None else: return self.parse(key) class DurationType(types.Float): """Human-friendly (M:SS) representation of a time interval.""" query = dbcore.query.DurationQuery def format(self, value): if not beets.config["format_raw_length"].get(bool): return beets.ui.human_seconds_short(value or 0.0) else: return value def parse(self, string): try: # Try to format back hh:ss to seconds. return util.raw_seconds_short(string) except ValueError: # Fall back to a plain float. try: return float(string) except ValueError: return self.null # Library-specific sort types. class SmartArtistSort(dbcore.query.Sort): """Sort by artist (either album artist or track artist), prioritizing the sort field over the raw field. """ def __init__(self, model_cls, ascending=True, case_insensitive=True): self.album = model_cls is Album self.ascending = ascending self.case_insensitive = case_insensitive def order_clause(self): order = "ASC" if self.ascending else "DESC" field = "albumartist" if self.album else "artist" collate = "COLLATE NOCASE" if self.case_insensitive else "" return ( "(CASE {0}_sort WHEN NULL THEN {0} " 'WHEN "" THEN {0} ' "ELSE {0}_sort END) {1} {2}" ).format(field, collate, order) def sort(self, objs): if self.album: def field(a): return a.albumartist_sort or a.albumartist else: def field(i): return i.artist_sort or i.artist if self.case_insensitive: def key(x): return field(x).lower() else: key = field return sorted(objs, key=key, reverse=not self.ascending) # Special path format key. PF_KEY_DEFAULT = "default" # Exceptions. class FileOperationError(Exception): """Indicate an error when interacting with a file on disk. Possibilities include an unsupported media type, a permissions error, and an unhandled Mutagen exception. """ def __init__(self, path, reason): """Create an exception describing an operation on the file at `path` with the underlying (chained) exception `reason`. """ super().__init__(path, reason) self.path = path self.reason = reason def __str__(self): """Get a string representing the error. Describe both the underlying reason and the file path in question. """ return f"{util.displayable_path(self.path)}: {self.reason}" class ReadError(FileOperationError): """An error while reading a file (i.e. in `Item.read`).""" def __str__(self): return "error reading " + str(super()) class WriteError(FileOperationError): """An error while writing a file (i.e. in `Item.write`).""" def __str__(self): return "error writing " + str(super()) # Item and Album model classes. class LibModel(dbcore.Model): """Shared concrete functionality for Items and Albums.""" # Config key that specifies how an instance should be formatted. _format_config_key = None def _template_funcs(self): funcs = DefaultTemplateFunctions(self, self._db).functions() funcs.update(plugins.template_funcs()) return funcs def store(self, fields=None): super().store(fields) plugins.send("database_change", lib=self._db, model=self) def remove(self): super().remove() plugins.send("database_change", lib=self._db, model=self) def add(self, lib=None): super().add(lib) plugins.send("database_change", lib=self._db, model=self) def __format__(self, spec): if not spec: spec = beets.config[self._format_config_key].as_str() assert isinstance(spec, str) return self.evaluate_template(spec) def __str__(self): return format(self) def __bytes__(self): return self.__str__().encode("utf-8") class FormattedItemMapping(dbcore.db.FormattedMapping): """Add lookup for album-level fields. Album-level fields take precedence if `for_path` is true. """ ALL_KEYS = "*" def __init__(self, item, included_keys=ALL_KEYS, for_path=False): # We treat album and item keys specially here, # so exclude transitive album keys from the model's keys. super().__init__(item, included_keys=[], for_path=for_path) self.included_keys = included_keys if included_keys == self.ALL_KEYS: # Performance note: this triggers a database query. self.model_keys = item.keys(computed=True, with_album=False) else: self.model_keys = included_keys self.item = item @lazy_property def all_keys(self): return set(self.model_keys).union(self.album_keys) @lazy_property def album_keys(self): album_keys = [] if self.album: if self.included_keys == self.ALL_KEYS: # Performance note: this triggers a database query. for key in self.album.keys(computed=True): if key in Album.item_keys or key not in self.item._fields.keys(): album_keys.append(key) else: album_keys = self.included_keys return album_keys @property def album(self): return self.item._cached_album def _get(self, key): """Get the value for a key, either from the album or the item. Raise a KeyError for invalid keys. """ if self.for_path and key in self.album_keys: return self._get_formatted(self.album, key) elif key in self.model_keys: return self._get_formatted(self.model, key) elif key in self.album_keys: return self._get_formatted(self.album, key) else: raise KeyError(key) def __getitem__(self, key): """Get the value for a key. `artist` and `albumartist` are fallback values for each other when not set. """ value = self._get(key) # `artist` and `albumartist` fields fall back to one another. # This is helpful in path formats when the album artist is unset # on as-is imports. try: if key == "artist" and not value: return self._get("albumartist") elif key == "albumartist" and not value: return self._get("artist") except KeyError: pass return value def __iter__(self): return iter(self.all_keys) def __len__(self): return len(self.all_keys) class Item(LibModel): """Represent a song or track.""" _table = "items" _flex_table = "item_attributes" _fields = { "id": types.PRIMARY_ID, "path": PathType(), "album_id": types.FOREIGN_ID, "title": types.STRING, "artist": types.STRING, "artists": types.MULTI_VALUE_DSV, "artists_ids": types.MULTI_VALUE_DSV, "artist_sort": types.STRING, "artists_sort": types.MULTI_VALUE_DSV, "artist_credit": types.STRING, "artists_credit": types.MULTI_VALUE_DSV, "remixer": types.STRING, "album": types.STRING, "albumartist": types.STRING, "albumartists": types.MULTI_VALUE_DSV, "albumartist_sort": types.STRING, "albumartists_sort": types.MULTI_VALUE_DSV, "albumartist_credit": types.STRING, "albumartists_credit": types.MULTI_VALUE_DSV, "genre": types.STRING, "style": types.STRING, "discogs_albumid": types.INTEGER, "discogs_artistid": types.INTEGER, "discogs_labelid": types.INTEGER, "lyricist": types.STRING, "composer": types.STRING, "composer_sort": types.STRING, "work": types.STRING, "mb_workid": types.STRING, "work_disambig": types.STRING, "arranger": types.STRING, "grouping": types.STRING, "year": types.PaddedInt(4), "month": types.PaddedInt(2), "day": types.PaddedInt(2), "track": types.PaddedInt(2), "tracktotal": types.PaddedInt(2), "disc": types.PaddedInt(2), "disctotal": types.PaddedInt(2), "lyrics": types.STRING, "comments": types.STRING, "bpm": types.INTEGER, "comp": types.BOOLEAN, "mb_trackid": types.STRING, "mb_albumid": types.STRING, "mb_artistid": types.STRING, "mb_artistids": types.MULTI_VALUE_DSV, "mb_albumartistid": types.STRING, "mb_albumartistids": types.MULTI_VALUE_DSV, "mb_releasetrackid": types.STRING, "trackdisambig": types.STRING, "albumtype": types.STRING, "albumtypes": types.SEMICOLON_SPACE_DSV, "label": types.STRING, "acoustid_fingerprint": types.STRING, "acoustid_id": types.STRING, "mb_releasegroupid": types.STRING, "release_group_title": types.STRING, "asin": types.STRING, "isrc": types.STRING, "catalognum": types.STRING, "script": types.STRING, "language": types.STRING, "country": types.STRING, "albumstatus": types.STRING, "media": types.STRING, "albumdisambig": types.STRING, "releasegroupdisambig": types.STRING, "disctitle": types.STRING, "encoder": types.STRING, "rg_track_gain": types.NULL_FLOAT, "rg_track_peak": types.NULL_FLOAT, "rg_album_gain": types.NULL_FLOAT, "rg_album_peak": types.NULL_FLOAT, "r128_track_gain": types.NULL_FLOAT, "r128_album_gain": types.NULL_FLOAT, "original_year": types.PaddedInt(4), "original_month": types.PaddedInt(2), "original_day": types.PaddedInt(2), "initial_key": MusicalKey(), "length": DurationType(), "bitrate": types.ScaledInt(1000, "kbps"), "bitrate_mode": types.STRING, "encoder_info": types.STRING, "encoder_settings": types.STRING, "format": types.STRING, "samplerate": types.ScaledInt(1000, "kHz"), "bitdepth": types.INTEGER, "channels": types.INTEGER, "mtime": DateType(), "added": DateType(), } _search_fields = ("artist", "title", "comments", "album", "albumartist", "genre") _types = { "data_source": types.STRING, } # Set of item fields that are backed by `MediaFile` fields. # Any kind of field (fixed, flexible, and computed) may be a media # field. Only these fields are read from disk in `read` and written in # `write`. _media_fields = set(MediaFile.readable_fields()).intersection(_fields.keys()) # Set of item fields that are backed by *writable* `MediaFile` tag # fields. # This excludes fields that represent audio data, such as `bitrate` or # `length`. _media_tag_fields = set(MediaFile.fields()).intersection(_fields.keys()) _formatter = FormattedItemMapping _sorts = {"artist": SmartArtistSort} _queries = {"singleton": SingletonQuery} _format_config_key = "format_item" # Cached album object. Read-only. __album = None @property def _cached_album(self): """The Album object that this item belongs to, if any, or None if the item is a singleton or is not associated with a library. The instance is cached and refreshed on access. DO NOT MODIFY! If you want a copy to modify, use :meth:`get_album`. """ if not self.__album and self._db: self.__album = self._db.get_album(self) elif self.__album: self.__album.load() return self.__album @_cached_album.setter def _cached_album(self, album): self.__album = album @classmethod def _getters(cls): getters = plugins.item_field_getters() getters["singleton"] = lambda i: i.album_id is None getters["filesize"] = Item.try_filesize # In bytes. return getters @classmethod def from_path(cls, path): """Create a new item from the media file at the specified path.""" # Initiate with values that aren't read from files. i = cls(album_id=None) i.read(path) i.mtime = i.current_mtime() # Initial mtime. return i def __setitem__(self, key, value): """Set the item's value for a standard field or a flexattr.""" # Encode unicode paths and read buffers. if key == "path": if isinstance(value, str): value = bytestring_path(value) elif isinstance(value, BLOB_TYPE): value = bytes(value) elif key == "album_id": self._cached_album = None changed = super()._setitem(key, value) if changed and key in MediaFile.fields(): self.mtime = 0 # Reset mtime on dirty. def __getitem__(self, key): """Get the value for a field, falling back to the album if necessary. Raise a KeyError if the field is not available. """ try: return super().__getitem__(key) except KeyError: if self._cached_album: return self._cached_album[key] raise def __repr__(self): # This must not use `with_album=True`, because that might access # the database. When debugging, that is not guaranteed to succeed, and # can even deadlock due to the database lock. return "{}({})".format( type(self).__name__, ", ".join( "{}={!r}".format(k, self[k]) for k in self.keys(with_album=False) ), ) def keys(self, computed=False, with_album=True): """Get a list of available field names. `with_album` controls whether the album's fields are included. """ keys = super().keys(computed=computed) if with_album and self._cached_album: keys = set(keys) keys.update(self._cached_album.keys(computed=computed)) keys = list(keys) return keys def get(self, key, default=None, with_album=True): """Get the value for a given key or `default` if it does not exist. Set `with_album` to false to skip album fallback. """ try: return self._get(key, default, raise_=with_album) except KeyError: if self._cached_album: return self._cached_album.get(key, default) return default def update(self, values): """Set all key/value pairs in the mapping. If mtime is specified, it is not reset (as it might otherwise be). """ super().update(values) if self.mtime == 0 and "mtime" in values: self.mtime = values["mtime"] def clear(self): """Set all key/value pairs to None.""" for key in self._media_tag_fields: setattr(self, key, None) def get_album(self): """Get the Album object that this item belongs to, if any, or None if the item is a singleton or is not associated with a library. """ if not self._db: return None return self._db.get_album(self) # Interaction with file metadata. def read(self, read_path=None): """Read the metadata from the associated file. If `read_path` is specified, read metadata from that file instead. Update all the properties in `_media_fields` from the media file. Raise a `ReadError` if the file could not be read. """ if read_path is None: read_path = self.path else: read_path = normpath(read_path) try: mediafile = MediaFile(syspath(read_path)) except UnreadableFileError as exc: raise ReadError(read_path, exc) for key in self._media_fields: value = getattr(mediafile, key) if isinstance(value, int): if value.bit_length() > 63: value = 0 self[key] = value # Database's mtime should now reflect the on-disk value. if read_path == self.path: self.mtime = self.current_mtime() self.path = read_path def write(self, path=None, tags=None, id3v23=None): """Write the item's metadata to a media file. All fields in `_media_fields` are written to disk according to the values on this object. `path` is the path of the mediafile to write the data to. It defaults to the item's path. `tags` is a dictionary of additional metadata the should be written to the file. (These tags need not be in `_media_fields`.) `id3v23` will override the global `id3v23` config option if it is set to something other than `None`. Can raise either a `ReadError` or a `WriteError`. """ if path is None: path = self.path else: path = normpath(path) if id3v23 is None: id3v23 = beets.config["id3v23"].get(bool) # Get the data to write to the file. item_tags = dict(self) item_tags = { k: v for k, v in item_tags.items() if k in self._media_fields } # Only write media fields. if tags is not None: item_tags.update(tags) plugins.send("write", item=self, path=path, tags=item_tags) # Open the file. try: mediafile = MediaFile(syspath(path), id3v23=id3v23) except UnreadableFileError as exc: raise ReadError(path, exc) # Write the tags to the file. mediafile.update(item_tags) try: mediafile.save() except UnreadableFileError as exc: raise WriteError(self.path, exc) # The file has a new mtime. if path == self.path: self.mtime = self.current_mtime() plugins.send("after_write", item=self, path=path) def try_write(self, *args, **kwargs): """Call `write()` but catch and log `FileOperationError` exceptions. Return `False` an exception was caught and `True` otherwise. """ try: self.write(*args, **kwargs) return True except FileOperationError as exc: log.error("{0}", exc) return False def try_sync(self, write, move, with_album=True): """Synchronize the item with the database and, possibly, update its tags on disk and its path (by moving the file). `write` indicates whether to write new tags into the file. Similarly, `move` controls whether the path should be updated. In the latter case, files are *only* moved when they are inside their library's directory (if any). Similar to calling :meth:`write`, :meth:`move`, and :meth:`store` (conditionally). """ if write: self.try_write() if move: # Check whether this file is inside the library directory. if self._db and self._db.directory in util.ancestry(self.path): log.debug( "moving {0} to synchronize path", util.displayable_path(self.path) ) self.move(with_album=with_album) self.store() # Files themselves. def move_file(self, dest, operation=MoveOperation.MOVE): """Move, copy, link or hardlink the item depending on `operation`, updating the path value if the move succeeds. If a file exists at `dest`, then it is slightly modified to be unique. `operation` should be an instance of `util.MoveOperation`. """ if not util.samefile(self.path, dest): dest = util.unique_path(dest) if operation == MoveOperation.MOVE: plugins.send( "before_item_moved", item=self, source=self.path, destination=dest ) util.move(self.path, dest) plugins.send("item_moved", item=self, source=self.path, destination=dest) elif operation == MoveOperation.COPY: util.copy(self.path, dest) plugins.send("item_copied", item=self, source=self.path, destination=dest) elif operation == MoveOperation.LINK: util.link(self.path, dest) plugins.send("item_linked", item=self, source=self.path, destination=dest) elif operation == MoveOperation.HARDLINK: util.hardlink(self.path, dest) plugins.send( "item_hardlinked", item=self, source=self.path, destination=dest ) elif operation == MoveOperation.REFLINK: util.reflink(self.path, dest, fallback=False) plugins.send( "item_reflinked", item=self, source=self.path, destination=dest ) elif operation == MoveOperation.REFLINK_AUTO: util.reflink(self.path, dest, fallback=True) plugins.send( "item_reflinked", item=self, source=self.path, destination=dest ) else: assert False, "unknown MoveOperation" # Either copying or moving succeeded, so update the stored path. self.path = dest def current_mtime(self): """Return the current mtime of the file, rounded to the nearest integer. """ return int(os.path.getmtime(syspath(self.path))) def try_filesize(self): """Get the size of the underlying file in bytes. If the file is missing, return 0 (and log a warning). """ try: return os.path.getsize(syspath(self.path)) except (OSError, Exception) as exc: log.warning("could not get filesize: {0}", exc) return 0 # Model methods. def remove(self, delete=False, with_album=True): """Remove the item. If `delete`, then the associated file is removed from disk. If `with_album`, then the item's album (if any) is removed if the item was the last in the album. """ super().remove() # Remove the album if it is empty. if with_album: album = self.get_album() if album and not album.items(): album.remove(delete, False) # Send a 'item_removed' signal to plugins plugins.send("item_removed", item=self) # Delete the associated file. if delete: util.remove(self.path) util.prune_dirs(os.path.dirname(self.path), self._db.directory) self._db._memotable = {} def move( self, operation=MoveOperation.MOVE, basedir=None, with_album=True, store=True ): """Move the item to its designated location within the library directory (provided by destination()). Subdirectories are created as needed. If the operation succeeds, the item's path field is updated to reflect the new location. Instead of moving the item it can also be copied, linked or hardlinked depending on `operation` which should be an instance of `util.MoveOperation`. `basedir` overrides the library base directory for the destination. If the item is in an album and `with_album` is `True`, the album is given an opportunity to move its art. By default, the item is stored to the database if it is in the database, so any dirty fields prior to the move() call will be written as a side effect. If `store` is `False` however, the item won't be stored and it will have to be manually stored after invoking this method. """ self._check_db() dest = self.destination(basedir=basedir) # Create necessary ancestry for the move. util.mkdirall(dest) # Perform the move and store the change. old_path = self.path self.move_file(dest, operation) if store: self.store() # If this item is in an album, move its art. if with_album: album = self.get_album() if album: album.move_art(operation) if store: album.store() # Prune vacated directory. if operation == MoveOperation.MOVE: util.prune_dirs(os.path.dirname(old_path), self._db.directory) # Templating. def destination( self, fragment=False, basedir=None, platform=None, path_formats=None, replacements=None, ): """Return the path in the library directory designated for the item (i.e., where the file ought to be). fragment makes this method return just the path fragment underneath the root library directory; the path is also returned as Unicode instead of encoded as a bytestring. basedir can override the library's base directory for the destination. """ self._check_db() platform = platform or sys.platform basedir = basedir or self._db.directory path_formats = path_formats or self._db.path_formats if replacements is None: replacements = self._db.replacements # Use a path format based on a query, falling back on the # default. for query, path_format in path_formats: if query == PF_KEY_DEFAULT: continue query, _ = parse_query_string(query, type(self)) if query.match(self): # The query matches the item! Use the corresponding path # format. break else: # No query matched; fall back to default. for query, path_format in path_formats: if query == PF_KEY_DEFAULT: break else: assert False, "no default path format" if isinstance(path_format, Template): subpath_tmpl = path_format else: subpath_tmpl = template(path_format) # Evaluate the selected template. subpath = self.evaluate_template(subpath_tmpl, True) # Prepare path for output: normalize Unicode characters. if platform == "darwin": subpath = unicodedata.normalize("NFD", subpath) else: subpath = unicodedata.normalize("NFC", subpath) if beets.config["asciify_paths"]: subpath = util.asciify_path( subpath, beets.config["path_sep_replace"].as_str() ) maxlen = beets.config["max_filename_length"].get(int) if not maxlen: # When zero, try to determine from filesystem. maxlen = util.max_filename_length(self._db.directory) subpath, fellback = util.legalize_path( subpath, replacements, maxlen, os.path.splitext(self.path)[1], fragment ) if fellback: # Print an error message if legalization fell back to # default replacements because of the maximum length. log.warning( "Fell back to default replacements when naming " "file {}. Configure replacements to avoid lengthening " "the filename.", subpath, ) if fragment: return util.as_string(subpath) else: return normpath(os.path.join(basedir, subpath)) class Album(LibModel): """Provide access to information about albums stored in a library. Reflects the library's "albums" table, including album art. """ _table = "albums" _flex_table = "album_attributes" _always_dirty = True _fields = { "id": types.PRIMARY_ID, "artpath": PathType(True), "added": DateType(), "albumartist": types.STRING, "albumartist_sort": types.STRING, "albumartist_credit": types.STRING, "albumartists": types.MULTI_VALUE_DSV, "albumartists_sort": types.MULTI_VALUE_DSV, "albumartists_credit": types.MULTI_VALUE_DSV, "album": types.STRING, "genre": types.STRING, "style": types.STRING, "discogs_albumid": types.INTEGER, "discogs_artistid": types.INTEGER, "discogs_labelid": types.INTEGER, "year": types.PaddedInt(4), "month": types.PaddedInt(2), "day": types.PaddedInt(2), "disctotal": types.PaddedInt(2), "comp": types.BOOLEAN, "mb_albumid": types.STRING, "mb_albumartistid": types.STRING, "albumtype": types.STRING, "albumtypes": types.SEMICOLON_SPACE_DSV, "label": types.STRING, "mb_releasegroupid": types.STRING, "release_group_title": types.STRING, "asin": types.STRING, "catalognum": types.STRING, "script": types.STRING, "language": types.STRING, "country": types.STRING, "albumstatus": types.STRING, "albumdisambig": types.STRING, "releasegroupdisambig": types.STRING, "rg_album_gain": types.NULL_FLOAT, "rg_album_peak": types.NULL_FLOAT, "r128_album_gain": types.NULL_FLOAT, "original_year": types.PaddedInt(4), "original_month": types.PaddedInt(2), "original_day": types.PaddedInt(2), } _search_fields = ("album", "albumartist", "genre") _types = { "path": PathType(), "data_source": types.STRING, } _sorts = { "albumartist": SmartArtistSort, "artist": SmartArtistSort, } # List of keys that are set on an album's items. item_keys = [ "added", "albumartist", "albumartists", "albumartist_sort", "albumartists_sort", "albumartist_credit", "albumartists_credit", "album", "genre", "style", "discogs_albumid", "discogs_artistid", "discogs_labelid", "year", "month", "day", "disctotal", "comp", "mb_albumid", "mb_albumartistid", "albumtype", "albumtypes", "label", "mb_releasegroupid", "asin", "catalognum", "script", "language", "country", "albumstatus", "albumdisambig", "releasegroupdisambig", "release_group_title", "rg_album_gain", "rg_album_peak", "r128_album_gain", "original_year", "original_month", "original_day", ] _format_config_key = "format_album" @classmethod def _getters(cls): # In addition to plugin-provided computed fields, also expose # the album's directory as `path`. getters = plugins.album_field_getters() getters["path"] = Album.item_dir getters["albumtotal"] = Album._albumtotal return getters def items(self): """Return an iterable over the items associated with this album. This method conflicts with :meth:`LibModel.items`, which is inherited from :meth:`beets.dbcore.Model.items`. Since :meth:`Album.items` predates these methods, and is likely to be used by plugins, we keep this interface as-is. """ return self._db.items(dbcore.MatchQuery("album_id", self.id)) def remove(self, delete=False, with_items=True): """Remove this album and all its associated items from the library. If delete, then the items' files are also deleted from disk, along with any album art. The directories containing the album are also removed (recursively) if empty. Set with_items to False to avoid removing the album's items. """ super().remove() # Send a 'album_removed' signal to plugins plugins.send("album_removed", album=self) # Delete art file. if delete: artpath = self.artpath if artpath: util.remove(artpath) # Remove (and possibly delete) the constituent items. if with_items: for item in self.items(): item.remove(delete, False) def move_art(self, operation=MoveOperation.MOVE): """Move, copy, link or hardlink (depending on `operation`) any existing album art so that it remains in the same directory as the items. `operation` should be an instance of `util.MoveOperation`. """ old_art = self.artpath if not old_art: return if not os.path.exists(syspath(old_art)): log.error( "removing reference to missing album art file {}", util.displayable_path(old_art), ) self.artpath = None return new_art = self.art_destination(old_art) if new_art == old_art: return new_art = util.unique_path(new_art) log.debug( "moving album art {0} to {1}", util.displayable_path(old_art), util.displayable_path(new_art), ) if operation == MoveOperation.MOVE: util.move(old_art, new_art) util.prune_dirs(os.path.dirname(old_art), self._db.directory) elif operation == MoveOperation.COPY: util.copy(old_art, new_art) elif operation == MoveOperation.LINK: util.link(old_art, new_art) elif operation == MoveOperation.HARDLINK: util.hardlink(old_art, new_art) elif operation == MoveOperation.REFLINK: util.reflink(old_art, new_art, fallback=False) elif operation == MoveOperation.REFLINK_AUTO: util.reflink(old_art, new_art, fallback=True) else: assert False, "unknown MoveOperation" self.artpath = new_art def move(self, operation=MoveOperation.MOVE, basedir=None, store=True): """Move, copy, link or hardlink (depending on `operation`) all items to their destination. Any album art moves along with them. `basedir` overrides the library base directory for the destination. `operation` should be an instance of `util.MoveOperation`. By default, the album is stored to the database, persisting any modifications to its metadata. If `store` is `False` however, the album is not stored automatically, and it will have to be manually stored after invoking this method. """ basedir = basedir or self._db.directory # Ensure new metadata is available to items for destination # computation. if store: self.store() # Move items. items = list(self.items()) for item in items: item.move(operation, basedir=basedir, with_album=False, store=store) # Move art. self.move_art(operation) if store: self.store() def item_dir(self): """Return the directory containing the album's first item, provided that such an item exists. """ item = self.items().get() if not item: raise ValueError("empty album for album id %d" % self.id) return os.path.dirname(item.path) def _albumtotal(self): """Return the total number of tracks on all discs on the album.""" if self.disctotal == 1 or not beets.config["per_disc_numbering"]: return self.items()[0].tracktotal counted = [] total = 0 for item in self.items(): if item.disc in counted: continue total += item.tracktotal counted.append(item.disc) if len(counted) == self.disctotal: break return total def art_destination(self, image, item_dir=None): """Return a path to the destination for the album art image for the album. `image` is the path of the image that will be moved there (used for its extension). The path construction uses the existing path of the album's items, so the album must contain at least one item or item_dir must be provided. """ image = bytestring_path(image) item_dir = item_dir or self.item_dir() filename_tmpl = template(beets.config["art_filename"].as_str()) subpath = self.evaluate_template(filename_tmpl, True) if beets.config["asciify_paths"]: subpath = util.asciify_path( subpath, beets.config["path_sep_replace"].as_str() ) subpath = util.sanitize_path(subpath, replacements=self._db.replacements) subpath = bytestring_path(subpath) _, ext = os.path.splitext(image) dest = os.path.join(item_dir, subpath + ext) return bytestring_path(dest) def set_art(self, path, copy=True): """Set the album's cover art to the image at the given path. The image is copied (or moved) into place, replacing any existing art. Send an 'art_set' event with `self` as the sole argument. """ path = bytestring_path(path) oldart = self.artpath artdest = self.art_destination(path) if oldart and samefile(path, oldart): # Art already set. return elif samefile(path, artdest): # Art already in place. self.artpath = path return # Normal operation. if oldart == artdest: util.remove(oldart) artdest = util.unique_path(artdest) if copy: util.copy(path, artdest) else: util.move(path, artdest) self.artpath = artdest plugins.send("art_set", album=self) def store(self, fields=None, inherit=True): """Update the database with the album information. `fields` represents the fields to be stored. If not specified, all fields will be. The album's tracks are also updated when the `inherit` flag is enabled. This applies to fixed attributes as well as flexible ones. The `id` attribute of the album will never be inherited. """ # Get modified track fields. track_updates = {} track_deletes = set() for key in self._dirty: if inherit: if key in self.item_keys: # is a fixed attribute track_updates[key] = self[key] elif key not in self: # is a fixed or a flexible attribute track_deletes.add(key) elif key != "id": # is a flexible attribute track_updates[key] = self[key] with self._db.transaction(): super().store(fields) if track_updates: for item in self.items(): for key, value in track_updates.items(): item[key] = value item.store() if track_deletes: for item in self.items(): for key in track_deletes: if key in item: del item[key] item.store() def try_sync(self, write, move, inherit=True): """Synchronize the album and its items with the database. Optionally, also write any new tags into the files and update their paths. `write` indicates whether to write tags to the item files, and `move` controls whether files (both audio and album art) are moved. """ self.store(inherit=inherit) for item in self.items(): item.try_sync(write, move) # Query construction helpers. def parse_query_parts(parts, model_cls): """Given a beets query string as a list of components, return the `Query` and `Sort` they represent. Like `dbcore.parse_sorted_query`, with beets query prefixes and ensuring that implicit path queries are made explicit with 'path::<query>' """ # Get query types and their prefix characters. prefixes = { ":": dbcore.query.RegexpQuery, "=~": dbcore.query.StringQuery, "=": dbcore.query.MatchQuery, } prefixes.update(plugins.queries()) # Special-case path-like queries, which are non-field queries # containing path separators (/). parts = [f"path:{s}" if PathQuery.is_path_query(s) else s for s in parts] case_insensitive = beets.config["sort_case_insensitive"].get(bool) return dbcore.parse_sorted_query(model_cls, parts, prefixes, case_insensitive) def parse_query_string(s, model_cls): """Given a beets query string, return the `Query` and `Sort` they represent. The string is split into components using shell-like syntax. """ message = f"Query is not unicode: {s!r}" assert isinstance(s, str), message try: parts = shlex.split(s) except ValueError as exc: raise dbcore.InvalidQueryError(s, exc) return parse_query_parts(parts, model_cls) def _sqlite_bytelower(bytestring): """A custom ``bytelower`` sqlite function so we can compare bytestrings in a semi case insensitive fashion. This is to work around sqlite builds are that compiled with ``-DSQLITE_LIKE_DOESNT_MATCH_BLOBS``. See ``https://github.com/beetbox/beets/issues/2172`` for details. """ return bytestring.lower() # The Library: interface to the database. class Library(dbcore.Database): """A database of music containing songs and albums.""" _models = (Item, Album) def __init__( self, path="library.blb", directory="~/Music", path_formats=((PF_KEY_DEFAULT, "$artist/$album/$track $title"),), replacements=None, ): timeout = beets.config["timeout"].as_number() super().__init__(path, timeout=timeout) self.directory = bytestring_path(normpath(directory)) self.path_formats = path_formats self.replacements = replacements self._memotable = {} # Used for template substitution performance. def _create_connection(self): conn = super()._create_connection() conn.create_function("bytelower", 1, _sqlite_bytelower) return conn # Adding objects to the database. def add(self, obj): """Add the :class:`Item` or :class:`Album` object to the library database. Return the object's new id. """ obj.add(self) self._memotable = {} return obj.id def add_album(self, items): """Create a new album consisting of a list of items. The items are added to the database if they don't yet have an ID. Return a new :class:`Album` object. The list items must not be empty. """ if not items: raise ValueError("need at least one item") # Create the album structure using metadata from the first item. values = {key: items[0][key] for key in Album.item_keys} album = Album(self, **values) # Add the album structure and set the items' album_id fields. # Store or add the items. with self.transaction(): album.add(self) for item in items: item.album_id = album.id if item.id is None: item.add(self) else: item.store() return album # Querying. def _fetch(self, model_cls, query, sort=None): """Parse a query and fetch. If an order specification is present in the query string the `sort` argument is ignored. """ # Parse the query, if necessary. try: parsed_sort = None if isinstance(query, str): query, parsed_sort = parse_query_string(query, model_cls) elif isinstance(query, (list, tuple)): query, parsed_sort = parse_query_parts(query, model_cls) except dbcore.query.InvalidQueryArgumentValueError as exc: raise dbcore.InvalidQueryError(query, exc) # Any non-null sort specified by the parsed query overrides the # provided sort. if parsed_sort and not isinstance(parsed_sort, dbcore.query.NullSort): sort = parsed_sort return super()._fetch(model_cls, query, sort) @staticmethod def get_default_album_sort(): """Get a :class:`Sort` object for albums from the config option.""" return dbcore.sort_from_strings(Album, beets.config["sort_album"].as_str_seq()) @staticmethod def get_default_item_sort(): """Get a :class:`Sort` object for items from the config option.""" return dbcore.sort_from_strings(Item, beets.config["sort_item"].as_str_seq()) def albums(self, query=None, sort=None): """Get :class:`Album` objects matching the query.""" return self._fetch(Album, query, sort or self.get_default_album_sort()) def items(self, query=None, sort=None): """Get :class:`Item` objects matching the query.""" return self._fetch(Item, query, sort or self.get_default_item_sort()) # Convenience accessors. def get_item(self, id): """Fetch a :class:`Item` by its ID. Return `None` if no match is found. """ return self._get(Item, id) def get_album(self, item_or_id): """Given an album ID or an item associated with an album, return a :class:`Album` object for the album. If no such album exists, return `None`. """ if isinstance(item_or_id, int): album_id = item_or_id else: album_id = item_or_id.album_id if album_id is None: return None return self._get(Album, album_id) # Default path template resources. def _int_arg(s): """Convert a string argument to an integer for use in a template function. May raise a ValueError. """ return int(s.strip()) class DefaultTemplateFunctions: """A container class for the default functions provided to path templates. These functions are contained in an object to provide additional context to the functions -- specifically, the Item being evaluated. """ _prefix = "tmpl_" def __init__(self, item=None, lib=None): """Parametrize the functions. If `item` or `lib` is None, then some functions (namely, ``aunique``) will always evaluate to the empty string. """ self.item = item self.lib = lib def functions(self): """Return a dictionary containing the functions defined in this object. The keys are function names (as exposed in templates) and the values are Python functions. """ out = {} for key in self._func_names: out[key[len(self._prefix) :]] = getattr(self, key) return out @staticmethod def tmpl_lower(s): """Convert a string to lower case.""" return s.lower() @staticmethod def tmpl_upper(s): """Convert a string to upper case.""" return s.upper() @staticmethod def tmpl_title(s): """Convert a string to title case.""" return string.capwords(s) @staticmethod def tmpl_left(s, chars): """Get the leftmost characters of a string.""" return s[0 : _int_arg(chars)] @staticmethod def tmpl_right(s, chars): """Get the rightmost characters of a string.""" return s[-_int_arg(chars) :] @staticmethod def tmpl_if(condition, trueval, falseval=""): """If ``condition`` is nonempty and nonzero, emit ``trueval``; otherwise, emit ``falseval`` (if provided). """ try: int_condition = _int_arg(condition) except ValueError: if condition.lower() == "false": return falseval else: condition = int_condition if condition: return trueval else: return falseval @staticmethod def tmpl_asciify(s): """Translate non-ASCII characters to their ASCII equivalents.""" return util.asciify_path(s, beets.config["path_sep_replace"].as_str()) @staticmethod def tmpl_time(s, fmt): """Format a time value using `strftime`.""" cur_fmt = beets.config["time_format"].as_str() return time.strftime(fmt, time.strptime(s, cur_fmt)) def tmpl_aunique(self, keys=None, disam=None, bracket=None): """Generate a string that is guaranteed to be unique among all albums in the library who share the same set of keys. A fields from "disam" is used in the string if one is sufficient to disambiguate the albums. Otherwise, a fallback opaque value is used. Both "keys" and "disam" should be given as whitespace-separated lists of field names, while "bracket" is a pair of characters to be used as brackets surrounding the disambiguator or empty to have no brackets. """ # Fast paths: no album, no item or library, or memoized value. if not self.item or not self.lib: return "" if isinstance(self.item, Item): album_id = self.item.album_id elif isinstance(self.item, Album): album_id = self.item.id if album_id is None: return "" memokey = self._tmpl_unique_memokey("aunique", keys, disam, album_id) memoval = self.lib._memotable.get(memokey) if memoval is not None: return memoval album = self.lib.get_album(album_id) return self._tmpl_unique( "aunique", keys, disam, bracket, album_id, album, album.item_keys, # Do nothing for singletons. lambda a: a is None, ) def tmpl_sunique(self, keys=None, disam=None, bracket=None): """Generate a string that is guaranteed to be unique among all singletons in the library who share the same set of keys. A fields from "disam" is used in the string if one is sufficient to disambiguate the albums. Otherwise, a fallback opaque value is used. Both "keys" and "disam" should be given as whitespace-separated lists of field names, while "bracket" is a pair of characters to be used as brackets surrounding the disambiguator or empty to have no brackets. """ # Fast paths: no album, no item or library, or memoized value. if not self.item or not self.lib: return "" if isinstance(self.item, Item): item_id = self.item.id else: raise NotImplementedError("sunique is only implemented for items") if item_id is None: return "" return self._tmpl_unique( "sunique", keys, disam, bracket, item_id, self.item, Item.all_keys(), # Do nothing for non singletons. lambda i: i.album_id is not None, initial_subqueries=[dbcore.query.NoneQuery("album_id", True)], ) def _tmpl_unique_memokey(self, name, keys, disam, item_id): """Get the memokey for the unique template named "name" for the specific parameters. """ return (name, keys, disam, item_id) def _tmpl_unique( self, name, keys, disam, bracket, item_id, db_item, item_keys, skip_item, initial_subqueries=None, ): """Generate a string that is guaranteed to be unique among all items of the same type as "db_item" who share the same set of keys. A field from "disam" is used in the string if one is sufficient to disambiguate the items. Otherwise, a fallback opaque value is used. Both "keys" and "disam" should be given as whitespace-separated lists of field names, while "bracket" is a pair of characters to be used as brackets surrounding the disambiguator or empty to have no brackets. "name" is the name of the templates. It is also the name of the configuration section where the default values of the parameters are stored. "skip_item" is a function that must return True when the template should return an empty string. "initial_subqueries" is a list of subqueries that should be included in the query to find the ambiguous items. """ memokey = self._tmpl_unique_memokey(name, keys, disam, item_id) memoval = self.lib._memotable.get(memokey) if memoval is not None: return memoval if skip_item(db_item): self.lib._memotable[memokey] = "" return "" keys = keys or beets.config[name]["keys"].as_str() disam = disam or beets.config[name]["disambiguators"].as_str() if bracket is None: bracket = beets.config[name]["bracket"].as_str() keys = keys.split() disam = disam.split() # Assign a left and right bracket or leave blank if argument is empty. if len(bracket) == 2: bracket_l = bracket[0] bracket_r = bracket[1] else: bracket_l = "" bracket_r = "" # Find matching items to disambiguate with. subqueries = [] if initial_subqueries is not None: subqueries.extend(initial_subqueries) for key in keys: value = db_item.get(key, "") # Use slow queries for flexible attributes. fast = key in item_keys subqueries.append(dbcore.MatchQuery(key, value, fast)) query = dbcore.AndQuery(subqueries) ambigous_items = ( self.lib.items(query) if isinstance(db_item, Item) else self.lib.albums(query) ) # If there's only one item to matching these details, then do # nothing. if len(ambigous_items) == 1: self.lib._memotable[memokey] = "" return "" # Find the first disambiguator that distinguishes the items. for disambiguator in disam: # Get the value for each item for the current field. disam_values = {s.get(disambiguator, "") for s in ambigous_items} # If the set of unique values is equal to the number of # items in the disambiguation set, we're done -- this is # sufficient disambiguation. if len(disam_values) == len(ambigous_items): break else: # No disambiguator distinguished all fields. res = f" {bracket_l}{item_id}{bracket_r}" self.lib._memotable[memokey] = res return res # Flatten disambiguation value into a string. disam_value = db_item.formatted(for_path=True).get(disambiguator) # Return empty string if disambiguator is empty. if disam_value: res = f" {bracket_l}{disam_value}{bracket_r}" else: res = "" self.lib._memotable[memokey] = res return res @staticmethod def tmpl_first(s, count=1, skip=0, sep="; ", join_str="; "): """Get the item(s) from x to y in a string separated by something and join then with something. Args: s: the string count: The number of items included skip: The number of items skipped sep: the separator. Usually is '; ' (default) or '/ ' join_str: the string which will join the items, default '; '. """ skip = int(skip) count = skip + int(count) return join_str.join(s.split(sep)[skip:count]) def tmpl_ifdef(self, field, trueval="", falseval=""): """If field exists return trueval or the field (default) otherwise, emit return falseval (if provided). Args: field: The name of the field trueval: The string if the condition is true falseval: The string if the condition is false Returns: The string, based on condition. """ if field in self.item: return trueval if trueval else self.item.formatted().get(field) else: return falseval # Get the name of tmpl_* functions in the above class. DefaultTemplateFunctions._func_names = [ s for s in dir(DefaultTemplateFunctions) if s.startswith(DefaultTemplateFunctions._prefix) ]
configmanager
configmanager_constants
# Copyright (C) 2011 Chris Dekter # Copyright (C) 2018 Thomas Hess <thomas.hess@udo.edu> # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """This file holds constants used in the configuration manager.""" import os.path from autokey import common # Configuration file location CONFIG_FILE = os.path.join(common.CONFIG_DIR, "autokey.json") CONFIG_DEFAULT_FOLDER = os.path.join(common.CONFIG_DIR, "data") CONFIG_FILE_BACKUP = CONFIG_FILE + "~" DEFAULT_ABBR_FOLDER = "Imported Abbreviations" RECENT_ENTRIES_FOLDER = "Recently Typed" # JSON Key names used in the configuration file INTERFACE_TYPE = "interfaceType" IS_FIRST_RUN = "isFirstRun" SERVICE_RUNNING = "serviceRunning" MENU_TAKES_FOCUS = "menuTakesFocus" SHOW_TRAY_ICON = "showTrayIcon" SORT_BY_USAGE_COUNT = "sortByUsageCount" PROMPT_TO_SAVE = "promptToSave" INPUT_SAVINGS = "inputSavings" ENABLE_QT4_WORKAROUND = "enableQT4Workaround" UNDO_USING_BACKSPACE = "undoUsingBackspace" WINDOW_DEFAULT_SIZE = "windowDefaultSize" HPANE_POSITION = "hPanePosition" COLUMN_WIDTHS = "columnWidths" SHOW_TOOLBAR = "showToolbar" NOTIFICATION_ICON = "notificationIcon" WORKAROUND_APP_REGEX = "workAroundApps" DISABLED_MODIFIERS = "disabledModifiers" TRIGGER_BY_INITIAL = "triggerItemByInitial" SCRIPT_GLOBALS = "scriptGlobals" GTK_THEME = "gtkTheme" GTK_TREE_VIEW_EXPANDED_ROWS = "gtkExpandedRows" PATH_LAST_OPEN = "pathLastOpen"
src
class_sqlThread
""" sqlThread is defined here """ import os import shutil # used for moving the messages.dat file import sqlite3 import sys import threading import time try: import helper_sql import helper_startup import paths import queues import state from addresses import encodeAddress from bmconfigparser import config, config_ready from debug import logger from tr import _translate except ImportError: from . import helper_sql, helper_startup, paths, queues, state from .addresses import encodeAddress from .bmconfigparser import config, config_ready from .debug import logger from .tr import _translate class sqlThread(threading.Thread): """A thread for all SQL operations""" def __init__(self): threading.Thread.__init__(self, name="SQL") def run( self, ): # pylint: disable=too-many-locals, too-many-branches, too-many-statements """Process SQL queries from `.helper_sql.sqlSubmitQueue`""" helper_sql.sql_available = True config_ready.wait() self.conn = sqlite3.connect(state.appdata + "messages.dat") self.conn.text_factory = str self.cur = self.conn.cursor() self.cur.execute("PRAGMA secure_delete = true") # call create_function for encode address self.create_function() try: self.cur.execute( """CREATE TABLE inbox (msgid blob, toaddress text, fromaddress text, subject text,""" """ received text, message text, folder text, encodingtype int, read bool, sighash blob,""" """ UNIQUE(msgid) ON CONFLICT REPLACE)""" ) self.cur.execute( """CREATE TABLE sent (msgid blob, toaddress text, toripe blob, fromaddress text, subject text,""" """ message text, ackdata blob, senttime integer, lastactiontime integer,""" """ sleeptill integer, status text, retrynumber integer, folder text, encodingtype int, ttl int)""" ) self.cur.execute( """CREATE TABLE subscriptions (label text, address text, enabled bool)""" ) self.cur.execute( """CREATE TABLE addressbook (label text, address text, UNIQUE(address) ON CONFLICT IGNORE)""" ) self.cur.execute( """CREATE TABLE blacklist (label text, address text, enabled bool)""" ) self.cur.execute( """CREATE TABLE whitelist (label text, address text, enabled bool)""" ) self.cur.execute( """CREATE TABLE pubkeys (address text, addressversion int, transmitdata blob, time int,""" """ usedpersonally text, UNIQUE(address) ON CONFLICT REPLACE)""" ) self.cur.execute( """CREATE TABLE inventory (hash blob, objecttype int, streamnumber int, payload blob,""" """ expirestime integer, tag blob, UNIQUE(hash) ON CONFLICT REPLACE)""" ) self.cur.execute( """INSERT INTO subscriptions VALUES""" """('Bitmessage new releases/announcements','BM-GtovgYdgs7qXPkoYaRgrLFuFKz1SFpsw',1)""" ) self.cur.execute( """CREATE TABLE settings (key blob, value blob, UNIQUE(key) ON CONFLICT REPLACE)""" ) self.cur.execute("""INSERT INTO settings VALUES('version','11')""") self.cur.execute( """INSERT INTO settings VALUES('lastvacuumtime',?)""", (int(time.time()),), ) self.cur.execute( """CREATE TABLE objectprocessorqueue""" """ (objecttype int, data blob, UNIQUE(objecttype, data) ON CONFLICT REPLACE)""" ) self.conn.commit() logger.info("Created messages database file") except Exception as err: if str(err) == "table inbox already exists": logger.debug("Database file already exists.") else: sys.stderr.write( "ERROR trying to create database file (message.dat). Error message: %s\n" % str(err) ) os._exit(0) # If the settings version is equal to 2 or 3 then the # sqlThread will modify the pubkeys table and change # the settings version to 4. settingsversion = config.getint("bitmessagesettings", "settingsversion") # People running earlier versions of PyBitmessage do not have the # usedpersonally field in their pubkeys table. Let's add it. if settingsversion == 2: item = """ALTER TABLE pubkeys ADD usedpersonally text DEFAULT 'no' """ parameters = "" self.cur.execute(item, parameters) self.conn.commit() settingsversion = 3 # People running earlier versions of PyBitmessage do not have the # encodingtype field in their inbox and sent tables or the read field # in the inbox table. Let's add them. if settingsversion == 3: item = """ALTER TABLE inbox ADD encodingtype int DEFAULT '2' """ parameters = "" self.cur.execute(item, parameters) item = """ALTER TABLE inbox ADD read bool DEFAULT '1' """ parameters = "" self.cur.execute(item, parameters) item = """ALTER TABLE sent ADD encodingtype int DEFAULT '2' """ parameters = "" self.cur.execute(item, parameters) self.conn.commit() settingsversion = 4 config.set("bitmessagesettings", "settingsversion", str(settingsversion)) config.save() helper_startup.updateConfig() # From now on, let us keep a 'version' embedded in the messages.dat # file so that when we make changes to the database, the database # version we are on can stay embedded in the messages.dat file. Let us # check to see if the settings table exists yet. item = ( """SELECT name FROM sqlite_master WHERE type='table' AND name='settings';""" ) parameters = "" self.cur.execute(item, parameters) if self.cur.fetchall() == []: # The settings table doesn't exist. We need to make it. logger.debug("In messages.dat database, creating new 'settings' table.") self.cur.execute( """CREATE TABLE settings (key text, value blob, UNIQUE(key) ON CONFLICT REPLACE)""" ) self.cur.execute("""INSERT INTO settings VALUES('version','1')""") self.cur.execute( """INSERT INTO settings VALUES('lastvacuumtime',?)""", (int(time.time()),), ) logger.debug( "In messages.dat database, removing an obsolete field from the pubkeys table." ) self.cur.execute( """CREATE TEMPORARY TABLE pubkeys_backup(hash blob, transmitdata blob, time int,""" """ usedpersonally text, UNIQUE(hash) ON CONFLICT REPLACE);""" ) self.cur.execute( """INSERT INTO pubkeys_backup SELECT hash, transmitdata, time, usedpersonally FROM pubkeys;""" ) self.cur.execute("""DROP TABLE pubkeys""") self.cur.execute( """CREATE TABLE pubkeys""" """ (hash blob, transmitdata blob, time int, usedpersonally text, UNIQUE(hash) ON CONFLICT REPLACE)""" ) self.cur.execute( """INSERT INTO pubkeys SELECT hash, transmitdata, time, usedpersonally FROM pubkeys_backup;""" ) self.cur.execute("""DROP TABLE pubkeys_backup;""") logger.debug( "Deleting all pubkeys from inventory." " They will be redownloaded and then saved with the correct times." ) self.cur.execute("""delete from inventory where objecttype = 'pubkey';""") logger.debug( "replacing Bitmessage announcements mailing list with a new one." ) self.cur.execute( """delete from subscriptions where address='BM-BbkPSZbzPwpVcYZpU4yHwf9ZPEapN5Zx' """ ) self.cur.execute( """INSERT INTO subscriptions VALUES""" """('Bitmessage new releases/announcements','BM-GtovgYdgs7qXPkoYaRgrLFuFKz1SFpsw',1)""" ) logger.debug("Commiting.") self.conn.commit() logger.debug( "Vacuuming message.dat. You might notice that the file size gets much smaller." ) self.cur.execute(""" VACUUM """) # After code refactoring, the possible status values for sent messages # have changed. self.cur.execute( """update sent set status='doingmsgpow' where status='doingpow' """ ) self.cur.execute( """update sent set status='msgsent' where status='sentmessage' """ ) self.cur.execute( """update sent set status='doingpubkeypow' where status='findingpubkey' """ ) self.cur.execute( """update sent set status='broadcastqueued' where status='broadcastpending' """ ) self.conn.commit() # Let's get rid of the first20bytesofencryptedmessage field in # the inventory table. item = """SELECT value FROM settings WHERE key='version';""" parameters = "" self.cur.execute(item, parameters) if int(self.cur.fetchall()[0][0]) == 2: logger.debug( "In messages.dat database, removing an obsolete field from" " the inventory table." ) self.cur.execute( """CREATE TEMPORARY TABLE inventory_backup""" """(hash blob, objecttype text, streamnumber int, payload blob,""" """ receivedtime integer, UNIQUE(hash) ON CONFLICT REPLACE);""" ) self.cur.execute( """INSERT INTO inventory_backup SELECT hash, objecttype, streamnumber, payload, receivedtime""" """ FROM inventory;""" ) self.cur.execute("""DROP TABLE inventory""") self.cur.execute( """CREATE TABLE inventory""" """ (hash blob, objecttype text, streamnumber int, payload blob, receivedtime integer,""" """ UNIQUE(hash) ON CONFLICT REPLACE)""" ) self.cur.execute( """INSERT INTO inventory SELECT hash, objecttype, streamnumber, payload, receivedtime""" """ FROM inventory_backup;""" ) self.cur.execute("""DROP TABLE inventory_backup;""") item = """update settings set value=? WHERE key='version';""" parameters = (3,) self.cur.execute(item, parameters) # Add a new column to the inventory table to store tags. item = """SELECT value FROM settings WHERE key='version';""" parameters = "" self.cur.execute(item, parameters) currentVersion = int(self.cur.fetchall()[0][0]) if currentVersion == 1 or currentVersion == 3: logger.debug( "In messages.dat database, adding tag field to" " the inventory table." ) item = """ALTER TABLE inventory ADD tag blob DEFAULT '' """ parameters = "" self.cur.execute(item, parameters) item = """update settings set value=? WHERE key='version';""" parameters = (4,) self.cur.execute(item, parameters) # Add a new column to the pubkeys table to store the address version. # We're going to trash all of our pubkeys and let them be redownloaded. item = """SELECT value FROM settings WHERE key='version';""" parameters = "" self.cur.execute(item, parameters) currentVersion = int(self.cur.fetchall()[0][0]) if currentVersion == 4: self.cur.execute("""DROP TABLE pubkeys""") self.cur.execute( """CREATE TABLE pubkeys (hash blob, addressversion int, transmitdata blob, time int,""" """usedpersonally text, UNIQUE(hash, addressversion) ON CONFLICT REPLACE)""" ) self.cur.execute("""delete from inventory where objecttype = 'pubkey';""") item = """update settings set value=? WHERE key='version';""" parameters = (5,) self.cur.execute(item, parameters) # Add a new table: objectprocessorqueue with which to hold objects # that have yet to be processed if the user shuts down Bitmessage. item = """SELECT value FROM settings WHERE key='version';""" parameters = "" self.cur.execute(item, parameters) currentVersion = int(self.cur.fetchall()[0][0]) if currentVersion == 5: self.cur.execute("""DROP TABLE knownnodes""") self.cur.execute( """CREATE TABLE objectprocessorqueue""" """ (objecttype text, data blob, UNIQUE(objecttype, data) ON CONFLICT REPLACE)""" ) item = """update settings set value=? WHERE key='version';""" parameters = (6,) self.cur.execute(item, parameters) # changes related to protocol v3 # In table inventory and objectprocessorqueue, objecttype is now # an integer (it was a human-friendly string previously) item = """SELECT value FROM settings WHERE key='version';""" parameters = "" self.cur.execute(item, parameters) currentVersion = int(self.cur.fetchall()[0][0]) if currentVersion == 6: logger.debug( "In messages.dat database, dropping and recreating" " the inventory table." ) self.cur.execute("""DROP TABLE inventory""") self.cur.execute( """CREATE TABLE inventory""" """ (hash blob, objecttype int, streamnumber int, payload blob, expirestime integer,""" """ tag blob, UNIQUE(hash) ON CONFLICT REPLACE)""" ) self.cur.execute("""DROP TABLE objectprocessorqueue""") self.cur.execute( """CREATE TABLE objectprocessorqueue""" """ (objecttype int, data blob, UNIQUE(objecttype, data) ON CONFLICT REPLACE)""" ) item = """update settings set value=? WHERE key='version';""" parameters = (7,) self.cur.execute(item, parameters) logger.debug("Finished dropping and recreating the inventory table.") # The format of data stored in the pubkeys table has changed. Let's # clear it, and the pubkeys from inventory, so that they'll # be re-downloaded. item = """SELECT value FROM settings WHERE key='version';""" parameters = "" self.cur.execute(item, parameters) currentVersion = int(self.cur.fetchall()[0][0]) if currentVersion == 7: logger.debug( "In messages.dat database, clearing pubkeys table" " because the data format has been updated." ) self.cur.execute("""delete from inventory where objecttype = 1;""") self.cur.execute("""delete from pubkeys;""") # Any sending messages for which we *thought* that we had # the pubkey must be rechecked. self.cur.execute( """UPDATE sent SET status='msgqueued' WHERE status='doingmsgpow' or status='badkey';""" ) query = """update settings set value=? WHERE key='version';""" parameters = (8,) self.cur.execute(query, parameters) logger.debug("Finished clearing currently held pubkeys.") # Add a new column to the inbox table to store the hash of # the message signature. We'll use this as temporary message UUID # in order to detect duplicates. item = """SELECT value FROM settings WHERE key='version';""" parameters = "" self.cur.execute(item, parameters) currentVersion = int(self.cur.fetchall()[0][0]) if currentVersion == 8: logger.debug( "In messages.dat database, adding sighash field to" " the inbox table." ) item = """ALTER TABLE inbox ADD sighash blob DEFAULT '' """ parameters = "" self.cur.execute(item, parameters) item = """update settings set value=? WHERE key='version';""" parameters = (9,) self.cur.execute(item, parameters) # We'll also need a `sleeptill` field and a `ttl` field. Also we # can combine the pubkeyretrynumber and msgretrynumber into one. item = """SELECT value FROM settings WHERE key='version';""" parameters = "" self.cur.execute(item, parameters) currentVersion = int(self.cur.fetchall()[0][0]) if currentVersion == 9: logger.info( "In messages.dat database, making TTL-related changes:" " combining the pubkeyretrynumber and msgretrynumber" " fields into the retrynumber field and adding the" " sleeptill and ttl fields..." ) self.cur.execute( """CREATE TEMPORARY TABLE sent_backup""" """ (msgid blob, toaddress text, toripe blob, fromaddress text, subject text, message text,""" """ ackdata blob, lastactiontime integer, status text, retrynumber integer,""" """ folder text, encodingtype int)""" ) self.cur.execute( """INSERT INTO sent_backup SELECT msgid, toaddress, toripe, fromaddress,""" """ subject, message, ackdata, lastactiontime,""" """ status, 0, folder, encodingtype FROM sent;""" ) self.cur.execute("""DROP TABLE sent""") self.cur.execute( """CREATE TABLE sent""" """ (msgid blob, toaddress text, toripe blob, fromaddress text, subject text, message text,""" """ ackdata blob, senttime integer, lastactiontime integer, sleeptill int, status text,""" """ retrynumber integer, folder text, encodingtype int, ttl int)""" ) self.cur.execute( """INSERT INTO sent SELECT msgid, toaddress, toripe, fromaddress, subject, message, ackdata,""" """ lastactiontime, lastactiontime, 0, status, 0, folder, encodingtype, 216000 FROM sent_backup;""" ) self.cur.execute("""DROP TABLE sent_backup""") logger.info( "In messages.dat database, finished making TTL-related changes." ) logger.debug( "In messages.dat database, adding address field to the pubkeys table." ) # We're going to have to calculate the address for each row in the pubkeys # table. Then we can take out the hash field. self.cur.execute("""ALTER TABLE pubkeys ADD address text DEFAULT '' ;""") # replica for loop to update hashed address self.cur.execute( """UPDATE pubkeys SET address=(enaddr(pubkeys.addressversion, 1, hash)); """ ) # Now we can remove the hash field from the pubkeys table. self.cur.execute( """CREATE TEMPORARY TABLE pubkeys_backup""" """ (address text, addressversion int, transmitdata blob, time int,""" """ usedpersonally text, UNIQUE(address) ON CONFLICT REPLACE)""" ) self.cur.execute( """INSERT INTO pubkeys_backup""" """ SELECT address, addressversion, transmitdata, time, usedpersonally FROM pubkeys;""" ) self.cur.execute("""DROP TABLE pubkeys""") self.cur.execute( """CREATE TABLE pubkeys""" """ (address text, addressversion int, transmitdata blob, time int, usedpersonally text,""" """ UNIQUE(address) ON CONFLICT REPLACE)""" ) self.cur.execute( """INSERT INTO pubkeys SELECT""" """ address, addressversion, transmitdata, time, usedpersonally FROM pubkeys_backup;""" ) self.cur.execute("""DROP TABLE pubkeys_backup""") logger.debug( "In messages.dat database, done adding address field to the pubkeys table" " and removing the hash field." ) self.cur.execute("""update settings set value=10 WHERE key='version';""") # Update the address colunm to unique in addressbook table item = """SELECT value FROM settings WHERE key='version';""" parameters = "" self.cur.execute(item, parameters) currentVersion = int(self.cur.fetchall()[0][0]) if currentVersion == 10: logger.debug( "In messages.dat database, updating address column to UNIQUE" " in the addressbook table." ) self.cur.execute("""ALTER TABLE addressbook RENAME TO old_addressbook""") self.cur.execute( """CREATE TABLE addressbook""" """ (label text, address text, UNIQUE(address) ON CONFLICT IGNORE)""" ) self.cur.execute( """INSERT INTO addressbook SELECT label, address FROM old_addressbook;""" ) self.cur.execute("""DROP TABLE old_addressbook""") self.cur.execute("""update settings set value=11 WHERE key='version';""") # Are you hoping to add a new option to the keys.dat file of existing # Bitmessage users or modify the SQLite database? Add it right # above this line! try: testpayload = "\x00\x00" t = ("1234", 1, testpayload, "12345678", "no") self.cur.execute("""INSERT INTO pubkeys VALUES(?,?,?,?,?)""", t) self.conn.commit() self.cur.execute( """SELECT transmitdata FROM pubkeys WHERE address='1234' """ ) queryreturn = self.cur.fetchall() for row in queryreturn: (transmitdata,) = row self.cur.execute("""DELETE FROM pubkeys WHERE address='1234' """) self.conn.commit() if transmitdata == "": logger.fatal( "Problem: The version of SQLite you have cannot store Null values." " Please download and install the latest revision of your version of Python" " (for example, the latest Python 2.7 revision) and try again.\n" ) logger.fatal( "PyBitmessage will now exit very abruptly." " You may now see threading errors related to this abrupt exit" " but the problem you need to solve is related to SQLite.\n\n" ) os._exit(0) except Exception as err: if str(err) == "database or disk is full": logger.fatal( "(While null value test) Alert: Your disk or data storage volume is full." " sqlThread will now exit." ) queues.UISignalQueue.put( ( "alert", ( _translate("MainWindow", "Disk full"), _translate( "MainWindow", "Alert: Your disk or data storage volume is full. Bitmessage will now exit.", ), True, ), ) ) os._exit(0) else: logger.error(err) # Let us check to see the last time we vaccumed the messages.dat file. # If it has been more than a month let's do it now. item = """SELECT value FROM settings WHERE key='lastvacuumtime';""" parameters = "" self.cur.execute(item, parameters) queryreturn = self.cur.fetchall() for row in queryreturn: (value,) = row if int(value) < int(time.time()) - 86400: logger.info( "It has been a long time since the messages.dat file has been vacuumed. Vacuuming now..." ) try: self.cur.execute(""" VACUUM """) except Exception as err: if str(err) == "database or disk is full": logger.fatal( "(While VACUUM) Alert: Your disk or data storage volume is full." " sqlThread will now exit." ) queues.UISignalQueue.put( ( "alert", ( _translate("MainWindow", "Disk full"), _translate( "MainWindow", "Alert: Your disk or data storage volume is full. Bitmessage will now exit.", ), True, ), ) ) os._exit(0) item = """update settings set value=? WHERE key='lastvacuumtime';""" parameters = (int(time.time()),) self.cur.execute(item, parameters) helper_sql.sql_ready.set() while True: item = helper_sql.sqlSubmitQueue.get() if item == "commit": try: self.conn.commit() except Exception as err: if str(err) == "database or disk is full": logger.fatal( "(While committing) Alert: Your disk or data storage volume is full." " sqlThread will now exit." ) queues.UISignalQueue.put( ( "alert", ( _translate("MainWindow", "Disk full"), _translate( "MainWindow", "Alert: Your disk or data storage volume is full. Bitmessage will now exit.", ), True, ), ) ) os._exit(0) elif item == "exit": self.conn.close() logger.info("sqlThread exiting gracefully.") return elif item == "movemessagstoprog": logger.debug( "the sqlThread is moving the messages.dat file to the local program directory." ) try: self.conn.commit() except Exception as err: if str(err) == "database or disk is full": logger.fatal( "(while movemessagstoprog) Alert: Your disk or data storage volume is full." " sqlThread will now exit." ) queues.UISignalQueue.put( ( "alert", ( _translate("MainWindow", "Disk full"), _translate( "MainWindow", "Alert: Your disk or data storage volume is full. Bitmessage will now exit.", ), True, ), ) ) os._exit(0) self.conn.close() shutil.move( paths.lookupAppdataFolder() + "messages.dat", paths.lookupExeFolder() + "messages.dat", ) self.conn = sqlite3.connect(paths.lookupExeFolder() + "messages.dat") self.conn.text_factory = str self.cur = self.conn.cursor() elif item == "movemessagstoappdata": logger.debug( "the sqlThread is moving the messages.dat file to the Appdata folder." ) try: self.conn.commit() except Exception as err: if str(err) == "database or disk is full": logger.fatal( "(while movemessagstoappdata) Alert: Your disk or data storage volume is full." " sqlThread will now exit." ) queues.UISignalQueue.put( ( "alert", ( _translate("MainWindow", "Disk full"), _translate( "MainWindow", "Alert: Your disk or data storage volume is full. Bitmessage will now exit.", ), True, ), ) ) os._exit(0) self.conn.close() shutil.move( paths.lookupExeFolder() + "messages.dat", paths.lookupAppdataFolder() + "messages.dat", ) self.conn = sqlite3.connect( paths.lookupAppdataFolder() + "messages.dat" ) self.conn.text_factory = str self.cur = self.conn.cursor() elif item == "deleteandvacuume": self.cur.execute("""delete from inbox where folder='trash' """) self.cur.execute("""delete from sent where folder='trash' """) self.conn.commit() try: self.cur.execute(""" VACUUM """) except Exception as err: if str(err) == "database or disk is full": logger.fatal( "(while deleteandvacuume) Alert: Your disk or data storage volume is full." " sqlThread will now exit." ) queues.UISignalQueue.put( ( "alert", ( _translate("MainWindow", "Disk full"), _translate( "MainWindow", "Alert: Your disk or data storage volume is full. Bitmessage will now exit.", ), True, ), ) ) os._exit(0) else: parameters = helper_sql.sqlSubmitQueue.get() rowcount = 0 try: self.cur.execute(item, parameters) rowcount = self.cur.rowcount except Exception as err: if str(err) == "database or disk is full": logger.fatal( "(while cur.execute) Alert: Your disk or data storage volume is full." " sqlThread will now exit." ) queues.UISignalQueue.put( ( "alert", ( _translate("MainWindow", "Disk full"), _translate( "MainWindow", "Alert: Your disk or data storage volume is full. Bitmessage will now exit.", ), True, ), ) ) os._exit(0) else: logger.fatal( "Major error occurred when trying to execute a SQL statement within the sqlThread." " Please tell Atheros about this error message or post it in the forum!" ' Error occurred while trying to execute statement: "%s" Here are the parameters;' " you might want to censor this data with asterisks (***)" " as it can contain private information: %s." " Here is the actual error message thrown by the sqlThread: %s", str(item), str(repr(parameters)), str(err), ) logger.fatal("This program shall now abruptly exit!") os._exit(0) helper_sql.sqlReturnQueue.put((self.cur.fetchall(), rowcount)) # helper_sql.sqlSubmitQueue.task_done() def create_function(self): # create_function try: self.conn.create_function( "enaddr", 3, func=encodeAddress, deterministic=True ) except (TypeError, sqlite3.NotSupportedError) as err: logger.debug( "Got error while pass deterministic in sqlite create function {}, Passing 3 params".format( err ) ) self.conn.create_function("enaddr", 3, encodeAddress)
migrations
0102_dashboarditem_filters_hash
# Generated by Django 3.0.6 on 2020-11-24 11:39 import hashlib from django.db import migrations, models from posthog.models.filters import Filter def generate_cache_key(stringified: str) -> str: return "cache_" + hashlib.md5(stringified.encode("utf-8")).hexdigest() def forward(apps, schema_editor): DashboardItem = apps.get_model("posthog", "DashboardItem") for item in DashboardItem.objects.filter( filters__isnull=False, dashboard__isnull=False ).exclude(filters={}): filter = Filter(data=item.filters) item.filters_hash = generate_cache_key(f"{filter.toJSON()}_{item.team_id}") item.save() def reverse(apps, schema_editor): pass class Migration(migrations.Migration): dependencies = [ ("posthog", "0101_org_owners"), ] operations = [ migrations.AddField( model_name="dashboarditem", name="filters_hash", field=models.CharField(blank=True, max_length=400, null=True), ), migrations.RunPython(forward, reverse, elidable=True), ]
extractor
livestream
from __future__ import unicode_literals import itertools import re from ..compat import compat_str, compat_urlparse from ..utils import ( determine_ext, find_xpath_attr, float_or_none, int_or_none, orderedSet, parse_iso8601, update_url_query, xpath_attr, xpath_text, xpath_with_ns, ) from .common import InfoExtractor class LivestreamIE(InfoExtractor): IE_NAME = "livestream" _VALID_URL = r"https?://(?:new\.)?livestream\.com/(?:accounts/(?P<account_id>\d+)|(?P<account_name>[^/]+))/(?:events/(?P<event_id>\d+)|(?P<event_name>[^/]+))(?:/videos/(?P<id>\d+))?" _TESTS = [ { "url": "http://new.livestream.com/CoheedandCambria/WebsterHall/videos/4719370", "md5": "53274c76ba7754fb0e8d072716f2292b", "info_dict": { "id": "4719370", "ext": "mp4", "title": "Live from Webster Hall NYC", "timestamp": 1350008072, "upload_date": "20121012", "duration": 5968.0, "like_count": int, "view_count": int, "thumbnail": r"re:^http://.*\.jpg$", }, }, { "url": "http://new.livestream.com/tedx/cityenglish", "info_dict": { "title": "TEDCity2.0 (English)", "id": "2245590", }, "playlist_mincount": 4, }, { "url": "http://new.livestream.com/chess24/tatasteelchess", "info_dict": { "title": "Tata Steel Chess", "id": "3705884", }, "playlist_mincount": 60, }, { "url": "https://new.livestream.com/accounts/362/events/3557232/videos/67864563/player?autoPlay=false&height=360&mute=false&width=640", "only_matching": True, }, { "url": "http://livestream.com/bsww/concacafbeachsoccercampeonato2015", "only_matching": True, }, ] _API_URL_TEMPLATE = "http://livestream.com/api/accounts/%s/events/%s" def _parse_smil_formats( self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None, ): base_ele = find_xpath_attr( smil, self._xpath_ns(".//meta", namespace), "name", "httpBase" ) base = ( base_ele.get("content") if base_ele is not None else "http://livestreamvod-f.akamaihd.net/" ) formats = [] video_nodes = smil.findall(self._xpath_ns(".//video", namespace)) for vn in video_nodes: tbr = int_or_none(vn.attrib.get("system-bitrate"), 1000) furl = update_url_query( compat_urlparse.urljoin(base, vn.attrib["src"]), { "v": "3.0.3", "fp": "WIN% 14,0,0,145", }, ) if "clipBegin" in vn.attrib: furl += "&ssek=" + vn.attrib["clipBegin"] formats.append( { "url": furl, "format_id": "smil_%d" % tbr, "ext": "flv", "tbr": tbr, "preference": -1000, } ) return formats def _extract_video_info(self, video_data): video_id = compat_str(video_data["id"]) FORMAT_KEYS = ( ("sd", "progressive_url"), ("hd", "progressive_url_hd"), ) formats = [] for format_id, key in FORMAT_KEYS: video_url = video_data.get(key) if video_url: ext = determine_ext(video_url) if ext == "m3u8": continue bitrate = int_or_none( self._search_regex( r"(\d+)\.%s" % ext, video_url, "bitrate", default=None ) ) formats.append( { "url": video_url, "format_id": format_id, "tbr": bitrate, "ext": ext, } ) smil_url = video_data.get("smil_url") if smil_url: formats.extend(self._extract_smil_formats(smil_url, video_id, fatal=False)) m3u8_url = video_data.get("m3u8_url") if m3u8_url: formats.extend( self._extract_m3u8_formats( m3u8_url, video_id, "mp4", "m3u8_native", m3u8_id="hls", fatal=False ) ) f4m_url = video_data.get("f4m_url") if f4m_url: formats.extend( self._extract_f4m_formats(f4m_url, video_id, f4m_id="hds", fatal=False) ) self._sort_formats(formats) comments = [ { "author_id": comment.get("author_id"), "author": comment.get("author", {}).get("full_name"), "id": comment.get("id"), "text": comment["text"], "timestamp": parse_iso8601(comment.get("created_at")), } for comment in video_data.get("comments", {}).get("data", []) ] return { "id": video_id, "formats": formats, "title": video_data["caption"], "description": video_data.get("description"), "thumbnail": video_data.get("thumbnail_url"), "duration": float_or_none(video_data.get("duration"), 1000), "timestamp": parse_iso8601(video_data.get("publish_at")), "like_count": video_data.get("likes", {}).get("total"), "comment_count": video_data.get("comments", {}).get("total"), "view_count": video_data.get("views"), "comments": comments, } def _extract_stream_info(self, stream_info): broadcast_id = compat_str(stream_info["broadcast_id"]) is_live = stream_info.get("is_live") formats = [] smil_url = stream_info.get("play_url") if smil_url: formats.extend(self._extract_smil_formats(smil_url, broadcast_id)) m3u8_url = stream_info.get("m3u8_url") if m3u8_url: formats.extend( self._extract_m3u8_formats( m3u8_url, broadcast_id, "mp4", "m3u8_native", m3u8_id="hls", fatal=False, ) ) rtsp_url = stream_info.get("rtsp_url") if rtsp_url: formats.append( { "url": rtsp_url, "format_id": "rtsp", } ) self._sort_formats(formats) return { "id": broadcast_id, "formats": formats, "title": self._live_title(stream_info["stream_title"]) if is_live else stream_info["stream_title"], "thumbnail": stream_info.get("thumbnail_url"), "is_live": is_live, } def _extract_event(self, event_data): event_id = compat_str(event_data["id"]) account_id = compat_str(event_data["owner_account_id"]) feed_root_url = self._API_URL_TEMPLATE % (account_id, event_id) + "/feed.json" stream_info = event_data.get("stream_info") if stream_info: return self._extract_stream_info(stream_info) last_video = None entries = [] for i in itertools.count(1): if last_video is None: info_url = feed_root_url else: info_url = "{root}?&id={id}&newer=-1&type=video".format( root=feed_root_url, id=last_video ) videos_info = self._download_json( info_url, event_id, "Downloading page {0}".format(i) )["data"] videos_info = [v["data"] for v in videos_info if v["type"] == "video"] if not videos_info: break for v in videos_info: v_id = compat_str(v["id"]) entries.append( self.url_result( "http://livestream.com/accounts/%s/events/%s/videos/%s" % (account_id, event_id, v_id), "Livestream", v_id, v.get("caption"), ) ) last_video = videos_info[-1]["id"] return self.playlist_result(entries, event_id, event_data["full_name"]) def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group("id") event = mobj.group("event_id") or mobj.group("event_name") account = mobj.group("account_id") or mobj.group("account_name") api_url = self._API_URL_TEMPLATE % (account, event) if video_id: video_data = self._download_json( api_url + "/videos/%s" % video_id, video_id ) return self._extract_video_info(video_data) else: event_data = self._download_json(api_url, video_id) return self._extract_event(event_data) # The original version of Livestream uses a different system class LivestreamOriginalIE(InfoExtractor): IE_NAME = "livestream:original" _VALID_URL = r"""(?x)https?://original\.livestream\.com/ (?P<user>[^/\?#]+)(?:/(?P<type>video|folder) (?:(?:\?.*?Id=|/)(?P<id>.*?)(&|$))?)? """ _TESTS = [ { "url": "http://original.livestream.com/dealbook/video?clipId=pla_8aa4a3f1-ba15-46a4-893b-902210e138fb", "info_dict": { "id": "pla_8aa4a3f1-ba15-46a4-893b-902210e138fb", "ext": "mp4", "title": "Spark 1 (BitCoin) with Cameron Winklevoss & Tyler Winklevoss of Winklevoss Capital", "duration": 771.301, "view_count": int, }, }, { "url": "https://original.livestream.com/newplay/folder?dirId=a07bf706-d0e4-4e75-a747-b021d84f2fd3", "info_dict": { "id": "a07bf706-d0e4-4e75-a747-b021d84f2fd3", }, "playlist_mincount": 4, }, { # live stream "url": "http://original.livestream.com/znsbahamas", "only_matching": True, }, ] def _extract_video_info(self, user, video_id): api_url = ( "http://x%sx.api.channel.livestream.com/2.0/clipdetails?extendedInfo=true&id=%s" % (user, video_id) ) info = self._download_xml(api_url, video_id) item = info.find("channel").find("item") title = xpath_text(item, "title") media_ns = {"media": "http://search.yahoo.com/mrss"} thumbnail_url = xpath_attr( item, xpath_with_ns("media:thumbnail", media_ns), "url" ) duration = float_or_none( xpath_attr(item, xpath_with_ns("media:content", media_ns), "duration") ) ls_ns = {"ls": "http://api.channel.livestream.com/2.0"} view_count = int_or_none( xpath_text(item, xpath_with_ns("ls:viewsCount", ls_ns)) ) return { "id": video_id, "title": title, "thumbnail": thumbnail_url, "duration": duration, "view_count": view_count, } def _extract_video_formats(self, video_data, video_id): formats = [] progressive_url = video_data.get("progressiveUrl") if progressive_url: formats.append( { "url": progressive_url, "format_id": "http", } ) m3u8_url = video_data.get("httpUrl") if m3u8_url: formats.extend( self._extract_m3u8_formats( m3u8_url, video_id, "mp4", "m3u8_native", m3u8_id="hls", fatal=False ) ) rtsp_url = video_data.get("rtspUrl") if rtsp_url: formats.append( { "url": rtsp_url, "format_id": "rtsp", } ) self._sort_formats(formats) return formats def _extract_folder(self, url, folder_id): webpage = self._download_webpage(url, folder_id) paths = orderedSet( re.findall( r'''(?x)(?: <li\s+class="folder">\s*<a\s+href="| <a\s+href="(?=https?://livestre\.am/) )([^"]+)"''', webpage, ) ) entries = [ { "_type": "url", "url": compat_urlparse.urljoin(url, p), } for p in paths ] return self.playlist_result(entries, folder_id) def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) user = mobj.group("user") url_type = mobj.group("type") content_id = mobj.group("id") if url_type == "folder": return self._extract_folder(url, content_id) else: # this url is used on mobile devices stream_url = ( "http://x%sx.api.channel.livestream.com/3.0/getstream.json" % user ) info = {} if content_id: stream_url += "?id=%s" % content_id info = self._extract_video_info(user, content_id) else: content_id = user webpage = self._download_webpage(url, content_id) info = { "title": self._og_search_title(webpage), "description": self._og_search_description(webpage), "thumbnail": self._search_regex( r'channelLogo\.src\s*=\s*"([^"]+)"', webpage, "thumbnail", None ), } video_data = self._download_json(stream_url, content_id) is_live = video_data.get("isLive") info.update( { "id": content_id, "title": self._live_title(info["title"]) if is_live else info["title"], "formats": self._extract_video_formats(video_data, content_id), "is_live": is_live, } ) return info # The server doesn't support HEAD request, the generic extractor can't detect # the redirection class LivestreamShortenerIE(InfoExtractor): IE_NAME = "livestream:shortener" IE_DESC = False # Do not list _VALID_URL = r"https?://livestre\.am/(?P<id>.+)" def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) id = mobj.group("id") webpage = self._download_webpage(url, id) return self.url_result(self._og_search_url(webpage))
Stats
setup
# # Copyright (C) 2009 Ian Martin <ianmartin@cantab.net> # Copyright (C) 2008 Martijn Voncken <mvoncken@gmail.com> # # Basic plugin template created by: # Copyright (C) 2008 Martijn Voncken <mvoncken@gmail.com> # Copyright (C) 2007-2008 Andrew Resch <andrewresch@gmail.com> # # This file is part of Deluge and is licensed under GNU General Public License 3.0, or later, with # the additional special exception to link portions of this program with the OpenSSL library. # See LICENSE for more details. # from setuptools import find_packages, setup __plugin_name__ = "Stats" __author__ = "Ian Martin" __author_email__ = "ianmartin@cantab.net" __version__ = "0.4" __url__ = "http://deluge-torrent.org" __license__ = "GPLv3" __description__ = "Display stats graphs" __long_description__ = """ Records lots of extra stats and produces time series graphs""" __pkg_data__ = {"deluge_" + __plugin_name__.lower(): ["template/*", "data/*"]} setup( name=__plugin_name__, version=__version__, description=__description__, author=__author__, author_email=__author_email__, url=__url__, license=__license__, long_description=__long_description__, packages=find_packages(), package_data=__pkg_data__, entry_points=""" [deluge.plugin.core] %s = deluge_%s:CorePlugin [deluge.plugin.gtk3ui] %s = deluge_%s:GtkUIPlugin [deluge.plugin.web] %s = deluge_%s:WebUIPlugin """ % ((__plugin_name__, __plugin_name__.lower()) * 3), )
core
disable
# # Copyright 2013, 2018, 2019 Free Software Foundation, Inc. # # This file is part of GNU Radio # # SPDX-License-Identifier: GPL-3.0-or-later # # """ Disable blocks module """ import logging import os import re import sys from ..tools import CMakeFileEditor from .base import ModTool, ModToolException logger = logging.getLogger(__name__) class ModToolDisable(ModTool): """Disable block (comments out CMake entries for files)""" name = "disable" description = "Disable selected block in module." def __init__(self, blockname=None, **kwargs): ModTool.__init__(self, blockname, **kwargs) self.info["pattern"] = blockname def validate(self): """Validates the arguments""" ModTool._validate(self) if not self.info["pattern"] or self.info["pattern"].isspace(): raise ModToolException("Invalid pattern!") def run(self): """Go, go, go!""" def _handle_py_qa(cmake, fname): """Do stuff for py qa""" cmake.comment_out_lines("GR_ADD_TEST.*" + fname) self.scm.mark_file_updated(cmake.filename) return True def _handle_py_mod(cmake, fname): """Do stuff for py extra files""" try: with open(self._file["pyinit"]) as f: initfile = f.read() except IOError: logger.warning("Could not edit __init__.py, that might be a problem.") return False pymodname = os.path.splitext(fname)[0] initfile = re.sub( r"((from|import)\s+\b" + pymodname + r"\b)", r"#\1", initfile ) with open(self._file["pyinit"], "w") as f: f.write(initfile) self.scm.mark_file_updated(self._file["pyinit"]) return False def _handle_cc_qa(cmake, fname): """Do stuff for cc qa""" if self.info["version"] == "37": cmake.comment_out_lines(r"\$\{CMAKE_CURRENT_SOURCE_DIR\}/" + fname) fname_base = os.path.splitext(fname)[0] # Abusing the CMakeFileEditor... ed = CMakeFileEditor(self._file["qalib"]) ed.comment_out_lines(rf'#include\s+"{fname_base}.h"', comment_str="//") ed.comment_out_lines(rf"{fname_base}::suite\(\)", comment_str="//") ed.write() self.scm.mark_file_updated(self._file["qalib"]) elif self.info["version"] in ["38", "310"]: fname_qa_cc = f'qa_{self.info["blockname"]}.cc' cmake.comment_out_lines(fname_qa_cc) elif self.info["version"] == "36": cmake.comment_out_lines("add_executable.*" + fname) cmake.comment_out_lines( "target_link_libraries.*" + os.path.splitext(fname)[0] ) cmake.comment_out_lines("GR_ADD_TEST.*" + os.path.splitext(fname)[0]) self.scm.mark_file_updated(cmake.filename) return True # This portion will be covered by the CLI if not self.cli: self.validate() else: from ..cli import cli_input # List of special rules: 0: subdir, 1: filename re match, 2: callback special_treatments = ( ("python", r"qa.+py$", _handle_py_qa), ("python", r"^(?!qa).+py$", _handle_py_mod), ("lib", r"qa.+\.cc$", _handle_cc_qa), ) for subdir in self._subdirs: if self.skip_subdirs[subdir]: continue if self.info["version"] in ("37", "38") and subdir == "include": subdir = f'include/{self.info["modname"]}' try: cmake = CMakeFileEditor(os.path.join(subdir, "CMakeLists.txt")) except IOError: continue logger.info(f"Traversing {subdir}...") filenames = [] if self.info["blockname"]: if subdir == "python": blockname_pattern = f"^(qa_)?{self.info['blockname']}.py$" elif subdir == "python/bindings": blockname_pattern = f"^{self.info['blockname']}_python.cc$" elif subdir == "python/bindings/docstrings": blockname_pattern = f"^{self.info['blockname']}_pydoc_template.h$" elif subdir == "lib": blockname_pattern = f"^{self.info['blockname']}_impl(\\.h|\\.cc)$" elif subdir == self.info["includedir"]: blockname_pattern = f"^{self.info['blockname']}.h$" elif subdir == "grc": blockname_pattern = ( f"^{self.info['modname']}_{self.info['blockname']}.block.yml$" ) if blockname_pattern: filenames = cmake.find_filenames_match(blockname_pattern) elif self.info["pattern"]: filenames = cmake.find_filenames_match(self.info["pattern"]) yes = self.info["yes"] for fname in filenames: file_disabled = False if not yes: ans = ( cli_input(f"Really disable {fname}? [Y/n/a/q]: ") .lower() .strip() ) if ans == "a": yes = True if ans == "q": sys.exit(0) if ans == "n": continue for special_treatment in special_treatments: if special_treatment[0] == subdir and re.match( special_treatment[1], fname ): file_disabled = special_treatment[2](cmake, fname) if not file_disabled: cmake.disable_file(fname) cmake.write() self.scm.mark_files_updated((os.path.join(subdir, "CMakeLists.txt"),)) logger.warning("Careful: 'gr_modtool disable' does not resolve dependencies.")
Resources
widgets
import math import wx from pyo.lib._wxwidgets import BACKGROUND_COLOUR from Resources.constants import * from wx.lib.embeddedimage import PyEmbeddedImage KNOB = PyEmbeddedImage( "iVBORw0KGgoAAAANSUhEUgAAACgAAAAoCAYAAACM/rhtAAAABHNCSVQICAgIfAhkiAAAAAlw" "SFlzAAALEgAACxIB0t1+/AAAABR0RVh0Q3JlYXRpb24gVGltZQA4LzcvMTC+wSrGAAAAH3RF" "WHRTb2Z0d2FyZQBNYWNyb21lZGlhIEZpcmV3b3JrcyA4tWjSeAAAApxwclZXeJztl0F22zAM" "RBXXZtIIjmq6RFOHOYXvxHWuo8v0AH26S2/QdgBKdbvte2Q282lJsbMYDAGC1Pef334Mb8Pb" "L2PxYbfFr2WZMZZ5nrdrnstcFtzmUsq8FP9eylL8UcoVo1yv1+0aCCGEEEIIIYQQQgghhPwf" "8k66YRXWd9IXHQKiGDTYM/efhgxhfMRuIVsQfYFwgLrYTdXnohNIvXlekTWKjvqqPvVBjkn1" "UxTogyHkTiFAyDKvIJ0RgkbTl25FuKnHJHGKEvE35FX75KBmHepTkhoAbpqO6Sg9AvDSg3xK" "5ykmk5/i6FOgHfRNpLqvAfj04zbi+xH/E20bwYePSHPQBHUVc29jFHwEP8qrNq9B2EfJJxQf" "KiBaCWAGRgw9a4cMWPHVZZdW/xbBaCGo6bcugLX4YH9NANyfJrEUWD9orX93CPdJ66jV7xmQ" "k/kX128ZQTiE8b62vfOUPP9xEis/jOT+mwagX9R2O/fv69/7z0m8AL0ha9sVUHdbm/zb+p9Q" "ApYANAD335L97vHB5Df73oJR/uY/avP8D7v9w6Opu/26Aq39PUldf839h8F3umQVWP37BuQV" "qCk39z8E1zf5LQFRPpv7E37Ozdd/0Lz6RwR/+ccSSJDPrSNQCVsC3L13X4lPtgF+NfHG+5+v" "QOifbQXeCsAPADlL6/Lz48dWABaA9/8Rc/BS577H+Ue8068NuPb/ZO5zn1M4AqiNduv/0Ro/" "5HOvA+iLHfbWGKwXYuG7Oiagg/yQj2Hbayr5AnV/H+vi/+6wLoLK8+W5p7pRz+C56l+QjddV" "vd87oL0B/XkHDXna91T3APJNfpDLrqN2ZdrvbqOveWdvp4Gqa8/3YbdehBBCCCGEEEIIIYQQ" "QgghhPzDb4qlcnxk/7mEAAAASG1rQkb63sr+AAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAppDOhAAAmhG1rVFN4" "nO1923fbOJI3t3f64lw7M3tmH/bF53znO/vUXBK8P1qWaHvaF40kJ3FecnRjR9+kk17HcU8+" "Hf3vWyiAF0Dg1bLkbDPpDiWCBMBfVf2qUAChs587t8twcLhYmqvB388XS98IiEWi1evTw2hp" "rK7Y4dVJN1raxur4ZITHQW8YLT2yGgwvF1DcOYIKIvyz6p+e3i47ffjn8GD0aantaRNtqr3V" "+tpc+wCfFtr71cn5GZQ8gpIPUGJqP0HpQvsnXPF+NeheTGiVB+dY88FwsbTm/qrTPaF97Jyd" "QUtwgNPjVWfYw4uGIZYNj/HQOcWTnZ/xcHjOK+iF+H0wwovCDn4LB3g4ZyeH/cUycFedESsc" "sdpHQ9bIGauPHU4OaC/Paa+MVffCBOTgQGg13QsLDyGcJHAg7GDRw6oSMv+uQkbb1w7g7A18" "/wk+vYfjWPsEJbNtYmbeETPzvjB7zDE70q4Bl9+0d1B2o80LsbEZNvMCbAwlNtOpgI1RgM3U" "Z9hYpDY6ps3gGTN4xgwen8HjM3j81bD/BlqZrIZDfuxfAGrOGE7wD9UAfMEBPNQ+gnJ9BBhB" "tUDpsldmwYSKEU3iFKE5roCmpGlFaEqaNt6sdSKErquAcNjvsJIhO2Yh/YFD2kF7XGhTDuhz" "DugQwIxAF/e1AXz6DOdmpVarxNKM7M3arTVtaLfjunabh9ETjtExnL9GdTuF0g/3Zbc1/EAN" "bIz7web5GjZ31J8Ne8rdI/RkzcIaa49sW3X0Z8e2FVN6F9F5h7ohalCmRECxCCXyQBnI3DhK" "PypR6qKtTcAVFmvTQ8WJbAmnEXDTAuKtrxUna+M4PVPixFm8PkY792tFFucwhByGkHMnXjqG" "41i71b58lZGRKcbsDoPJYTCNGUxjBpM6Il+HaS+B6SM4t5stBo2FIfjUbujZDAaNwaAxGDQG" "g8Zg0BgCNE84NAfAMNfg3Tvw72cKEgfoBw5QNY9GZZrBx7UZQFheCpBvFwCEY7sMRLM7RkaE" "IUQYQjZDyGYI2WzAR8UqjPjoY+BwBU7UwTAeN49Avf4JSvaZDfyKkVQSVTGQZsChJJOiwbMM" "Za3hc2MwSVANzGcczENQuPeYgvklSTd8qaSU3oxh6U8ZlPMNUb5RRPmkocESvwqEdQGKaW0A" "pRP4+1H7UAiRGWwWo82G6/eD0Xcco1dgizdKdLw4v6dm/aLRDL1VwIfsHB/Mr2ASq3Mno/s7" "8NeCXiVgZjsMM3MigRZnRQ2G2rSIwKJxFa1CR5HBzeXOkt59P8C9oY3y3JVlzlICa2qXlLh+" "Rbv8WJxZ5iCaVuUsg2nY9U3TMRiEGMlQDKOitN9mQMxi9zjB7h3G8FNAZryWd67jP9U2i4nU" "fOimpD50ifd0CIPOJNPK/tOcV7Zc5i/hA4lqQBcr3CFCRpWuOPSXYMNnUZCdhJs3rY+bzdPM" "Y0531syuDFtzu+3ESWeu7NWR/F5MhNVBETmvdJyAWCOKNA6riKI754aLStfj2fuNojgA8Mh8" "Dh8666RXTfv6GKfdlE8+StoX2RX4LrIbwBa7DK58aMSb1z2qcsM4+KU+g0XB8QfBrPHDQKGW" "ewnE1Jt8wViuydhMHJrhEKFhxroKssTn3hgHJRWxtYgKW5dh6zJsXYYts2L6YRJJ2NIYZ3C8" "RpTlOI5wRnOq/aOOklYCslpgo0YSdZMS5GTjpo1KigoYAznIaivT3yaIfp+MP8baP0qylj6D" "M2BwYnCXgdO4q17aDE1GkErFpLOsFFBSAKjDEyswUGK5uYDrJsfU5aC6dp56JnOdMZjRWMmk" "IS5EoBMtI5r0lQBlKvphsxNTFfMGxmYWJKh1012bcu9w8Iad9Sn2YrDi3OcpxuDvsPwM3RPo" "onZdONabKtMupGCol/rtBtHPvaGX1bg09InxNP15DTyz2XaKYT6aObZdhypNZ14lH7hlODtx" "OnB97FwNu2Oui+8TnZSxixd74CgEHY0hMCMbu6wxo6FiRgyKqo79xveKXRaq7zhUF4o1MTRp" "SmNHOgk/xMmuMc/IF6vYWBlqV4hqaExbkGSYjhlKhPuPKZEi7Rkf5rEotcQhA1syrHwOls/Q" "mvKlRlOO19SX/Qf9MIg/rC+aGSQf4hiyP+AufDCIo8rheuCuEsWjjCgWGLCXzTiO67qa2HPT" "WKZMPy3Jc8eBUBysIyuv+W2zwgCbng25NEImDcQYXfMBumaEmq5Goq7adhmIZlQZwyGgR6Mi" "upLkUxWOjFM88ZyJOZ7fdc6E4+hIOIqGTv04RdEs0F6Xz7jhzJvFjhRHa8pwxKOFR8QRcRsg" "XnHciPiRcvweJ/htgQKsWSUKkDI9CQMUJsly4seKCDJLVlo7+iFq5DHKVFurmvhf1Kk0PmlV" "zei9hkZfKbrk4Xo0lhKSXFvtiWDztsrmY22Vudbh86AOnwiFozg1xQY+A6avwwHnA/6d6rEZ" "KXngKQf6FerrnKdBcIFng/A94nwQiXwQjTdEq3d1+2pKJaqxJYs3s84rGVLKGeByDOuN0omv" "0lIWgZahSDO5NQfpVpz8pdHrnQbplgpIXBabt3C2GYzVloJI09BJDFptyUMNIOPhZEKuyM13" "SqMrV3EnObk0b4RxffxhcJFxXwYze2kxcpz2GHD3/lH7VTLzKxweFZu5Xz/VGcNaZWk859GJ" "ciGJxKJKM8/z+XkZDzHRTmFD9hyujZfy4Utjp/8GxzRG11QcOzlNp61raCZzQZn4s0aKHYBW" "po3o+ZCfD9l5DDzjxRGmx1XSYw4pdkTo6o+Zp6qGaRxP0VcOPmgRneum77eoUDUdm6Hqih5o" "XMMBuUpcDSWumH9rsoQnJk7xpQ3CmROPFh6ZFbvcjGWXU27HbFVYsQpaKhWUZrnVOmhUN2Pk" "3XWsJnZ55jKxY0kFKdEyHSQ89qQfWEDkz3lARD/0YwyHsQIOYq5kQ6NqmMbLLUIac6oQndhC" "NB9rX51wvpAYEUNVRlg9HuKYWipMp746wgx48iPgcxWBm5r1IE79dlJzjlPBOevJLgGwD+jI" "P+NYMg7inyaR0EdU0HcwVuITbMUzF5tbIFw0YPeVa6FoQJbBldQZZxJfMV4/jl3NcN20yxD8" "liNoad0Gqwi8KqsZawDmKp00cL9iqFNtWJkHGA166uD0KMUJhohHGEj+XoyYkg+rLQCNITPL" "CXGq5ENzrrJdcyeQXcD5G0z3lr0sswnInOpDagmyoDwUjKdlJX+7IcCeJBHKO1ygQrPo87UV" "izJobtPMo1k9M56MSSy3/lx2PCYpe7P0OB7NHa+P5sqge5H41Pe4KK9u7lw92XV3W8W1PJnk" "+VitevPqmUfZ0yqVL45YBkkGfD1AqQopXbKywHfB7x3StcUBlhLTwBcnJCKl11AnxaVlFlM1" "BarCF/nN53Qqgscv2TwZQx8e0HOYX3brgf9XDv5LzERO8fWwT/hSPuVR+o70fiqY+mJgq9mq" "arZSCqLjlrwQJoHWZVDLc1s8kWbxN6LgiDLgizSyat7nEaWQu8ARZCaF6YsZzJqR5rNMpPlP" "zLrtY2qpdqyJi7NqvMGQSMEuH0g23k6CjhQrUDRmi6aeYpURTgvjifhDjHWf5z/oI7I5uExC" "BBP1q/C0e7sMs2/gRiiLISbsFplXlCKUwTluGPArSucyt4TLJGSAhMzThUwbwx6CFg66eMlg" "wMqO2eE1PazC7FiOdYi/8EsTMlKXsiWXuSXNukRYl+BwlPToBfRnmmxAMeOccJPZT+FTMkSa" "8qlMStxT7R+gtvF2FeHRSwD+/JBVfgKfj/p0B5aQbbFi4J9VpsiMi/j+K7TsipYZd6/HbFhF" "XET/hNnFCBFfkrXA3O9nfH5ZbgOO3roqZUuayc1icrNaudWR2zMutwEgM4WnpdmTXyTpPUtk" "pLrmssI1zSQ6ZhIdtxKtI9FHiSXSxCt1ndkxYpRJysZllwVlzSRnM8nZreSa2CKTwA1Godcx" "XpItqq+5rHDNndjVNFuR1hFpGluNcXevdOV3xGec4vOXOeebicth4nJaaTWRVh+DyGnmXfSI" "j83j85c555tJy2PS8lppNZFWiIjMEjxiqaTnL3PON5OWz6Tlt9KqI62nXFo9/o7qb8hv2eDk" "KZeP6orL0iuayTJgsgxaWdaR5fdclh1cK/EpmbyOkrfCrhOrk882k9OUyWnayqmOnPaSwR21" "FbaPkzwgT0vkAXla0kxmMyazWSuzJl7tFS6Sm695tfT8Zc75ZtKaM2nNW2k1GWz301maJMZ/" "lESH2bLLgrJmkouY5CKhY08SNZprE62LoniHk0jx5GesNnL5ZUl5s06aPNNLj10zg2jYJcI3" "S/hmC99GtNbV6ggT2K2aFqtpI4B+zAD0Ga4a4CL9K5wbZ2swUpgsZd8Cw5iQbN8M3UkfsOjZ" "p9hxCcP7amRXAL+QAM5AG5/Lg1hPOzExyHic9/yzCIrFwkC+sxDmDTe0K6hjY09MPDX7MmOP" "LbGoV1WMvVo9uwLoGQeIlkzoIAoXHaxroavul+WLj2boxE9LSTD2xFLbSUsnrnyvW3SrV3Cr" "KfdIlszX1/1ds38PV0PhfDwu7TnCFfFwV5lSEMOain3LompOAs+c5KFqzu0IvqlRdcfzqWHm" "obpesSn3qJJSPOju70op9rhSDHHztVuMAUvUILACcNF5ONqE/s3DceJMrImZhyP+ycNxNqd/" "lSD4Bv1bUQ0edPd3rQa0hOYFRDWwlTh6tmtK0YOVuMbJzJ9K9uInpe6UzE1X+SDzaDaZztZF" "uZsu7Eocj7g4WIC+7rNzwpH8/vCQWkb1DvXsGprM5l9QVkJZzDjzbJ5an+zxU5t3XTnKy9g8" "vdGL8myeNZwzWPHgv4qU9aC7vytFeM4Vga7q/IiLu2+0C745wi/l1kICY+wEeQ+njCruUM+u" "QWJEkkR6ZYPRfCrIjBANg/6f221VsH5fjewK3sdJ9PQbXxlFt+j+tZyO1p+YZAvXzDntMBkX" "DE5krMDjZY05N3GiHvR+ZX3fRPK3F3Zvl70wM685R1U4wZXMdNTUgX9vccvJeF5snrwsMdZu" "Vr3+8HbZPezRf36G8v+E0dY/YAAeYoA9x4zFNc9YXMCd77UvGts441dQoo+octdwrqdF+F4t" "VapLuP6cvcWy6h6+RMVLe7Sf6dMq0/JjUMt3qMA0/ZQoL6/he+3/aEb6V7jzudD6CfhZtnR+" "wX9VhdXwJ67uc6nV7L1nuPnSidbl9/xfbal5WOpqJvw1NKL9BJ+ncIZ+oudm+Ot5PpzzoIT1" "z8ErPfjXhBL6bSW0updiAFh+oebIW/wX6dkeZa58hS+33Gjv+LXf0B4JVz/NXB0vuLpmkUdy" "j6c50j09qBVcEO4pgW+bwxFXJeT06Sn+PuAvqB/xLyJ9wPs+JXdYwh1PcK+0T6BZedfLLaS7" "1nX5hixjXNUS4/SvKA0Zq/SupIeZ6y3pyfegN+9xBDEvkcLzzJVnuJD0hv8WzQLHHvFdpnQX" "e/1NsEOlZs8AE5VmP8OXr3/nHpBa7mzt/j3RMuApIwnLY7Tf4hqizF+5hsdYw3vOAqr+Z+5e" "s0y2wzfIGjhljJZT9gTrch2iRv4OeE60/8dsld/7LbRJM0Of1uy6g9jfoBUMUX9ucuX7In6r" "k195nat38p17yZ2ydeKVlKJrsPMeZ+c3qMdvoO1fWoZuGbplaAVDWy1Dtwy9dYb+M2foIaAW" "7zvBrsdktMZ+PKvl7JazW85e52zScnbL2Vvn7EfrUTVoYsvRLUe3HL3O0XbL0S1Hb52j47z0" "AN9ea7MeLTu37KxiZ6dl55add5aXHkDfKZpUf1uGbhm6Zeh1hnZbhm4ZemcMnclLtwzdMnTL" "0AqGlrm2ZeiWoTfF0Aqt2PHKu7RHLTsXs7PZsvMDYOd25V3Lzttn512uvGsZumXor4mh25V3" "LUNvn6Ef1sq7lrNbzv6aOLtdeddy9vY5e7cr71qObjn6a+LoduVdy9G7y0tve+Vdy84tO39N" "7NyuvGvZeXd56V2svGsZumXor4mh25V3LUPvjqF3sfKuZeiWob8mhm5X3rUMfV8M3YX6qS5l" "+C7ZVZIxdPpDG2+Fq7bF1nEP94XW12USs3XxyqhiZpelP5ZmgMSW2P6aqcXZkr1kr82ToFfQ" "u3XesIA55BH1ffqbMTB0oNnwdwb1+c39TU3di7NrnKH/ELrmSpF4sa75tXWNemjyB9S2Z1zb" "suwvR6M/cH2j6yKAvbemcU/SFkHnMj1so1FFNGpJ9pEfjcoz919vLCo/x65j0Xad8e5iUVOy" "0P8tsWgdhh5Auwt8qu0w9NO0RW3WcnQpR8vxcsvRLUf/kTj6f2u+4LnAb/tYM/tdsfdCzmCI" "+CywLHuHTv9uibPLelGPQScwJjGgNEAmnCOD2qj1MYPSkcoY/kbAmnHegF7tw/cIOGsG14sM" "+h/QUgeeLMLnZlb2Fp7wGi2N2uXv8P0mQYV6g/+fPN+32PI+/Veo9VvoZbF1yNr5rTaWuOYb" "6KvIND9os6LxZAMteSK8+779nFJ++9sZ6Y8lhP9oWSWHRxoRxhg0EvHgrw3XxxZlwacJ2tQs" "Qc3HKCZC26ORiTzOr6Z7jwTZx2Xb0Tt129vDnSCmPmANlosYU4ainFUT95pRmytpV3lEJa/p" "rBqL5Hnh+/GRtqSBT8C2ZuDjP2O9+5keMt37LvvezZZ0Tmyznr7MQDcckDvVlgDHCdReZyBP" "eezgJ8xG7Zla8gz+p/FccH+erxb2f4VWr5MIm+ub9l+yzm41TqnXp3qys0AKFP0JWjBj1gBk" "Ywq2TstnUIuBkY3B5Uxl7KBMtxG1vIA+rKPwFpH6CPL8kDDJ+shmKiGVe1ctXdmDsVjMMp+2" "qhGqluvarAGSn8M1VOrMt5rQT0dhs2T7NitEMp9QUtc4rnrLWfYtfP9Fm+RKXLznHdcb8a5/" "Rd6Sfc4MNeumYktPM9dXb2UPyqk8f4V/5fqrPv08N3dR9PTpXdWfPr+lvKcvbkV++mz94tP/" "mPP0v2jxr1TnjXXzEJDvVPXvmQKFshafK5Go0tpjAY31duR8ixqRicZ+w1HdOxnHtIfifar+" "PVWgUdzaMyUW5S09EpCQ26jryV9ox/R+8JNDzIt8Rm6l7EN9wTbZukpP7uK1Z4nXJg/Oaz8C" "2X/G68v9NLXadXyq3PkjZnHqRwZP4I73eG2SEZGeKpN5qxkbHOJz3GJvthsbrLd8F+2aJ9pl" "PUDtQt+JNX3UFhjjfVod9UFMR/3R7fL16SH92c8rdlil54jjsLP0w2qtTjqHs8k6n6S8vtF6" "H8csefdaa2n3Yyj/jHnn/WwkurX1N+rW7xIBm0kEbLcRcBsBtxFwGwF/9RHwo/QMlDIOfr+1" "HLaqbVGO95nDnmEcYmOGeoqZ6zGuVQikHDaNa8bCegacG8aVhXOJ3b/NnRdW5ZK/B/R+w9lg" "iuWXROrrc/p7mXlrGrHORL0SZP4t1Er3ZppnpByi32erTtgqh215YnXb9SQV4ZoSAyRBcac+" "N8JZ0XSWx0A/TOWxAz9cgv6PPAsb75K1z+OSA6j5NzrPtCVJlPejnlQIyoOgFY1xXDmFo41z" "2NnoyMFZn2rrge5LKn9K5kKZTNLv28E+ba+u5ptQFqFHspMINB5x7TwCLcR4L/4OV58hUtta" "aadq+S64k68K9x+1I+jNZ8xwLHBOcj/BY5tZhvJ+3GU0ZiSjMethyCRT679hjJZ98jhSu8ZY" "fozrYD4ns9R/hj7q6N/y/7olUn+GmMUZsd1IvLgP9aTtgLR89BMu9NNFnx/xGWMbbTHCiM3E" "FVUOrqyiq1PpuMDHK8Zr0dl9zTr+mnnqrKTXV0QFUlz3b7n35q+UkuO9dU0YcA2jecldaUJR" "H+raPeViC9cWMS4mWLePmkDnoR3UBGrdEerCDCMTG5kgQsufbkUT/pzYNn1qURPkPPM30ENR" "F/6Se/d/w3GsvRdGCd9QjivRhCfaG42+Ef/rjrQgv/16GhDw97io9btJZpmgN6bveE2QCyhb" "uDiemyAPRDjCIziOozHnNjTgR7iGPXFd6b9Q3llV8t/zFWvXuDb7Q/Luo3h2O1KXW60n6yn6" "bjq6o2vM2FiPvVGwPtbzdhx5qVF/Ip7devSb3349SdCVegT/d/iarQCt0FuLt+I1fg9NEs8B" "nQ/4Hgcr2U9WC253DXlxL+4yMrEf2shE8Gefhed+iznLT/j2f+MYuGL9KsZ1JMb9a8H9N0Xv" "AEn5nnOcVaPzv7H9H6Dn2U9Ltux189qvp2lz1BkD1+tOMNc2RW2y0OtO+ciL/uvgW0XxCmDq" "a+fosWk8th2vO8YnfguYxU9cTQf+orzzMz/K70Dkz/V/px3ilZ+hrk/J6hK6LphhfI1MTG12" "X7hyW6tLyntSNyYn6H+pXw5QN1yMuNPcuY+6QcdkgfAmC/0/wmu3Mzp7DL2Ln3JdojQ7H7OL" "Jb2tt4dc8KXgrgjz0EEFXfhuB1L/7g7yjTDLYnO/PuVxdpCZ+WZvKhmgAbt9U2n78v0zzmR8" "4RbE3g35Ap9tjgNdcdDjcs++00DHQOwNwG16g6Ie1LV5H7Mtc5Qxi8x9zM5kI3MX8/0WZmDo" "v+y7jTxxrzbfWEZ7GI9dsznFLa/DWm95uzKZ4bmHJ5PnUiZxhIjSFrcbuRf3Ytuycu6TUwVZ" "/QVnBRd8/DgEbBf8E33be4yzh6m0fkhndrcqn/V270MiHsjBw9yjhzlI+q+LkY+DXnFbI1y2" "s4X4DmK89+U5YkbHL9tavfEn5VuH7d4VbJQovnW4+d0u5fcgq+xeYUp3lO1eQfOM9d62XN8X" "rN3BIl7t1e5g8fXuYOFU4uJ4p8FjfI6PLQ//IXhYvuch8LD8WxktC7cs/FBZ2K/MwrhHwups" "CDS86h+Mbpedw9PFMooM/LMK2TfLCoIoWnX6CVvvYfb6rdbXWOZzkewE9+9QQhmbck4fzv+T" "Z0kO0NYXcJaNgsaYD56tLnl+X74+3lt20L2YLI1V5+B8gYfhYmnN/VWne7JYmqvO2Rl0EA5w" "erzqDHt40TDEsuExHjqneLLzMx4Oz3kFvRC/D0Z4UdhhhwGePGcnh/3FMnBXnVEHz45Y7aMh" "a+SM1ccOJwcTuOOc9spYdS/MxdKHA6HVdC8sPIRwksCBsINFD6swi2mEmJ7iuHBOZZxkvDLn" "EJt0V/7L0is4giF7ipA9d0i7b8I3fNJw0MVLBgNWdkz7Fg5f4+Eo6eEL6B/z0fFc0xylmq45" "/5T0eKqJu1O/whX5VL/Do5e3y/D8kFV+Ap+P+vQtqfDokCqbE7m2CzaaFplxUTAbu1NrRcuu" "aJlx93rMoiq4FaiqiIvon9Hrzu2SPfe3HBuQBVjVz7fLV30A0TdWx/w4Gr4BwOHO0QnAOjrp" "LpZeNLMjg1UUbqaiVe91H3p8NqIPdng6oIf+KYq6f4C2form1KdFtJL+iH+/oGp80D9lhyHV" "ioODQ/x20MXDEKqZw5VdesMRrdRY/a3/98XSocch+3rBDn16/1F4Qg9/G9JrxnDssa8jWt3f" "hh3UvNM+at457dzR8JSeOx1e0kOXHU6HqKKHwzN6W+9wSB/m/GpIv50O8dvxCG3xeDT6FO8W" "Pce47nc84hrt1esQu/j6DPs/GmB1cCc9vO6iFffC11CBtjo/s2+X8M9i6a7wELGDyQ6GdIBj" "SK8H+3JWeABf26PKZa5GB0hCo/4rbHyExrc6O4cbzs672Nrq9Aoe/vQANLvz8xF91ssBw4FH" "wqfwDF80tjuRSTmbFp4doqi6JwjE4Sm11R6t5/BnWtw7Pbtd9kfHyYk6JP8DJ/kOkvVCmyaT" "sYzi40BpPyYfTkZ7ipIsjc9NNY/PBR43I1tgcuOuTG5NGzL5uBKTP0AW6Az6aPIjNNLwYkRF" "MDiHi0DUtjN2VqOL19HyJwvuv7iKwGX1D0P69P0hnLYd3TICK3DgKxRarh4YQWBAaU8s7Yml" "oVgaiqUd6Ge/A+x0MjxEjRieUsbt95GE+534RVmL6I5HLM9lr8t6umdbnumy7jtTB0Q07EZL" "uMoBJQYRv4GKO2/Q2joHb4BGVtnaDN1zA+JYrLZAd2zXt4KkNkN5P9RwSLtLTYJ3N+5nv6yf" "/cGQuouXPfp4ukmcVfcSVTJ9gItzJNZsZTndLK3MSCqD0G90AL3vXYSs8tb4W+MvMn4SMOP3" "BOO3PN00PcPwmfH/5Og+tWePGX+mtCeWhmJpKJZWNX5fNz3fdW1mBq7uB2BTvsr2S+w+0AMf" "qvJYRb7uBhsy+5weNrL6nE62Rt8a/XaNnjh64DnE5x7/J9PQiWH5cDFafaa4JxWHYnEoFVe1" "exusyrQCk1mCCQYOztBWOv0Sw3d1h3jE5/ZpWroDNuVvwvLz+tjI9PO62dp+a/v3Zfsmt31T" "NH5QPsO0CXf4tu55puG4zPLTsp5QFgploVBW1eZNm9oAGBGzATpUsL3AUBm9HQQ0HVdo+Kaj" "O4ZlW3ybHcum9mlkgofmlp/b00amn9vR1vZb299ysJ+1fAjXCfED2+ahftbyM2VhtiwUyyqP" "8XXfdk3uSYmrgx+1bCfH8INSj08MYhgk3l5LJ57vwNcNDPHV3Wzo8NW9bI2+Nfr7Tu+JRm/b" "uksMh7pqavZEtxxiuD7P7qWFPaEwFAtDobCyy7d00wxcCESY69MNYlvEapjbA7dsBBCBc0fq" "6JZvEAhuNuDwc/rZzN/ndPMeLN/CP9u3fNNtLf9BWb7a3Zt2Mr7n4b2djOh5TG8nY/jqVk30" "WLfNQIcBshHYDQbu4BHzKmluyKquNTJiRe9az93a7z3Zb6CaljM93bMIsUxmxK6t276XOO5M" "aU8sDcXSUCytauSBblomhKzcI5q66duW4TUwdE+3iO/GdmkRPfA9G3p2d2vP62Mji8/rZmv2" "rdnfk9mbyngdjNUNbNOL43VT91zPc3luPlPaE0tDsTQUSysP1U0d4uBk92rL0C0LjCpoOFi3" "LN0GHvKCxKxMy6OphA2M1vN62mxGPq+jrf239n9f9q/Mzzsw/DRcx+fBOwwnPWL4xGH2nynt" "iaWhWBqKpZWDe1e3A9dxLTFbrYrwq9g/8XTbI7Zh5yX97xDr5/W0kf3ndrS1/9b+t+v/iWD/" "ovlnCntCYSgWhkJhZecPtwReEHh5ufXqUT9dG+RYvh/kJf3v4PjzetnM8ed1tDX81vC3avhW" "oBu24/iEZ+2IQTXTSdbipcU9qTgUi0OpuIb5gw/0TCtvMY2Yr7fKKMDVfRhSG0HeKp87UYC6" "p80oIK+jLQW0FLBVCnB0iwQkydvD+N11fZ7xS8t6QlkolIVCWY2o33BsxxLnrFQz9GV+H+Jo" "wzIsP2fG707hvrKLTaN9VS/bKbrW3rdr75Y0Q0dPCDN09ETtGTrLgQgZR+el03SVkni+btHr" "/M1P2OX2tJkjz+toa9itYd/X3DufvPsJgtGsacN5btoWM216ohefCPmJMD5R2VMbOveAdJlJ" "w7n3tBITGGBTc+/rPWvmm9c7dw/mGzDW2aD5VtjAoLXRLdrodOKOZ/FSeCPHSMFYktWwPMXG" "zvWSc2FyLkzOVX/PxfPSBSm+CWNMHgu7k5nB7NXUA6rRJRar+y7JqWk6U6+cq/aSi6KDxeaG" "PVeYrbKHJVXRrreW21quZLlBRMwdW67l6cT37EwombW5yDKz1ksvKwmhYbyawwSRbTa339xu" "Fhse9l8RPyt7WVIV7X5dG3bxT7vtULvtULvt0Ne47VCWvc2YvYVt/C+1c/h/qPW0brKNf/bc" "tn7KPt32bj/d+K7d7lGx3aMpbXSXv93jv0g/BlBls0df2k6yymaP8vaQZZs9yltWlm/1KLdQ" "ttXjN4Ct/JPiZRs9mpIEHspGj5kfB/qDbvSYbf9hb/RYfbtdys/b4la68dpH/FGuDIMqUIy5" "tVjfi3lY1jhX4iCxpfinHOOWXEnDs9fmYU4Kerdu6XRze2eL3mEMTBTgFvr0Z+X85t5hxcJ4" "y5haUZIv8f141EWnHbITFnxRMp+usPh4i8SzFWyo5azNVZjCQMsWXg+IswZQyVWfT87ZpgdD" "wKshm44PHJ94wepw8JLWxIYd85nF852GbhBi22VDMFp90uDrvlDz66HQrtQQqdeQRXRPeqS0" "pavClsxmj8RafC2C97oQPKNKS2sjUJPLzxRenMgIsGSsSIWmfPXCdPwkeYQbI1le6cBzPrPF" "WhwjMKx4vxZPty3DNsomiKEWR6zFJxYx+Noyk77G7TtG2TJwqMXN1uIDJmZgx0/k6IblGX7Z" "ghKoxRNr8Xzft7xkkUtAiOOUrXODWnxx1ymbeHYyrCe675mWWQHdQHirFS5xzeSJTN33A8+r" "gMtYqiXwbcuJ98FJvpZWMxGqSZq/EvpWWstUqCWB4krAqbSWmVBLIpYrQWaltcylTUK4ilwJ" "+lNaSyTUkqjrlaDLZbXMDaGWxHSuBLsqrUV8DTox4yvBxktrIeJkUIa0S8Egqry0qVuG5RKv" "Zj+AqbK1EN1xAyeoi4lIUcTSvcC0441lKstHpChi64FLoJ66uiJQFF1P43m27dbVW4GiwN2Y" "xHfjd/Cq25BAUcTXIdAghNS1Z4GiSKAbpuPa9cllLFXjW75jB7WpTuAoy6D7hTrEqs27AklZ" "pk5M03St2k5gJlXj2b7h1PdIAk3R16I81wvc2u4xkqqxAyNwvbq+WiQqqMb1Idix6wYOIlOJ" "8VrpkygXnog9STmnAmXl45KSTj2ykqSUsk49tpJ0JqWdenQlaXDKO/X4SrKnlHhqEpZo3Snz" "1GQskWvSrzUpSyS+tHM1KUtk4RSqepQluYRUcPUoS/JPqRrVoyzJWaZKXY+yJM+dmlg9ypLC" "iNTg61GWOGIr7YJqkk4KrKr3ZCZWI0Z5NXARKEsKOWtISaAsKf6toTMCZUnBeA0NFihLGhnU" "sCeBsqRhSg3rFihLGjTV4BpfqiYzgqtBfAJjiaPJGiwsEJY4sq3hEiZiLdlRdg3/JNCVOOKv" "4SxnYi3Z7EMNzz0Xa8lmQmqEEZFYS7OMjDx7D39enoS3y5fsRydWq7UfrTD5j1bsaa+0uTbB" "NTX8xyvYD1eY7IcrjOIfrlhraHU06N4uj2gu0lgd0UwkHPAdKReOV+woLCVYHXWHcEcXGzvq" "/swmUen/8O2Y/thI9yVt6GKI09QXwwN6WP0POy3D/1CKIuIAAAC+bWtCU3icXU7LDoIwEOzN" "3/ATAIPAUcqrYasGagRvaGzCVZMmZrP/bsvDg3OZyczOZmSdGiwaPqJPHXCNHvUzXUWmMQj3" "VAml0Y8CavJWo+P2MtqDtLQtvYCgB4Nw6A2mdXm38aUBR3CUb2QbBmxgH/ZkL7ZlPsl2CjnY" "Es9dk9fOyEEaFLL8Gd2pmDbN9Lfw3NnZnkeVE8ODVHsbMfZICftRiWzESCc6imnRg46eq97F" "j3DVYRgnRJk6GKQFX7oeX6ZDsdxFAAAEeW1rQlT6zsr+AH84xQAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeJzt" "molt6zAQBV1IGkkhKSSNpJAUkkZSiD82+GM8bEjZsWT4mgcMdJDisctDIrXfK6WUUkoppZRS" "Sv3X9/f3/uvra0qF34OyHpdM+xLpX1NVn91uN+Xz83P/+vr6c37LdaceVdYtVb5/eXk52GPr" "9K+t9P/7+/svSnWsej+j/2n7z+D/mT4+Pn7aAHMBbaOuK4x2wXWF1ZH4Fc69WZp1zDiztPqz" "dU4Z0j+kV1A+yjFKc6SKV2lW/+f8kf1fdUvwRR//ic+4iC9ynMz5o8KIX+KaZ0uVV13XsZ6Z" "zUVZHvJjbMrzLFumn1ScWRtIu1S+z+D/Drab+f/t7e3wjoh9eKb3x0wjfUGbILzS4pz2R/ye" "Vh3LN7yXkV73fT6TadKeurIt5xz46P6faeb/7Dt9nkxK+LDsWO0mx1TKUPcz/VTeI6/036gd" "Z/+u8EofH9b5bA4gHmXk/SfvPYrW+D+FzZhv6ef5boDtsWH26+yb9L18NxiNFfk+mv0/x5D0" "VZYlyzur7xKPoq38jy/xbfa1nk5/L+jjSY612fdm81HWg/x6e8jxPNNkzOk26WSZbvk76K/a" "yv+lslG+A5Zt+3t79zXtJP3A+wRp0aZ45hT/ZzzGJPIizV6+JT3q/K+UUkoppZ5Tl9rnzXTv" "ZS/51pTrIJewYX0bzb5r+vfUX7X2ebU/rDnUmslszXqN0v99bSO/80ff/EtrIayb9PNrKMs5" "6kf84zG7v5Te6HqW1yytUb8m7mzNaVbmv4r9stz7I1/WPPKc9sIzuc6ebST3XjlnDZd7OSaw" "d7MmvNs6y5nriXWP9WbWmvq6UoX3Ota9TCttV8f0GZBXXqMep8R6JfdJl73upTKfo+6XbG+j" "/s9aG7ZmP75rNPZXvNzHLegjrPOtCT9WL+yXY17/tyH3IRB7GXXMtcq0VabZ8xrZt/8TQZzR" "/ZH/R2U+R33+P8X/GX/2/pB24py9GY74M//JWBN+ar36nJd7Avh6VKf0QbdPXs/yyrDRPhP3" "sz9znXmPynyutvB/30cpn1CmPC8x1jF+MpbRnteGn1Ivwhg3+I8AG9O+EHNt938fc3KP8pj/" "+X8i8yj1+93/szKfq2P+z7kdO/R+knUt9fEpfYO/iMs8tlX4MbtnGLbk/TrnYcZw4mLntDV7" "nfgz9yiPlYN/a/EhbSdtyp7ZyP+jMp/zLsh+W9YpfUffzrpij9FYRdxMr+fX/dn7wZpwwpbq" "lWHUg7mk+zfn8tE3GM/350Z59TDaQN+LTBsTP/Oelbn3tUtoab1APb70v1JKKaWUUkoppZRS" "Sl1NOxERERERERERERERERERERERERERERERERERERERERERERERERERERERERERERERERER" "ERERERERERERERERERERERERERERERERERERERERERGRO+Qfh5eOajemXSYAAAFTbWtCVPrO" "yv4Af1WJAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAB4nO3W4WmDYBSGUQdxEQdxEBdxEAdxEQexvIELt6Yh/4oJ" "54FDm0/7601szlOSJEmSJEmSJEmSJEmSJEmSJEkf0XEc577vT+c5y7V397+6T/dvXddzHMdz" "mqbHz+wY/Sz31L11FsuyPF7HMAx/vod077JjlX2zYXatzfs9tX/VN7/+je5ftut7Vjnrn+V6" "nX37xtm/ul7T/ctzvu9f/9fneX7aP9fs/31l23ru1+/btv36zPfnv/2/r/oe1/er90Cu1Xf7" "nEXVnx3Xa5IkSZIkSZIkSfr3BgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAA+EA/CvmsuD1UqYgAAA7XbWtCVPrOyv4Af594AAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB4nO2djZEc" "KQyFHYgTcSAOxIk4EAfiRBzIXunqPte7Z0lAz8/+WK9qame7aRASCNCDnpeXwWAwGAwGg8Fg" "MBgMBoPB4D/8+vXr5efPn3984jr3qufic6WsAGX498H/Uen5iv4zfP/+/eXTp09/fOI69zJ8" "+fLl388uvn379jvvsDdlBPT7R0bU+7SelZ5P9b8CNtH+rvZf9VH6dpWmk9ft3/mdXVTyrOQE" "XRq9XqXLrmftvHs+cGrnq3rr7B/la991ubRvex6aD3kFqv6veWX1jvufP3/+93voLdL9+PHj" "9714hrqoLwtEOr0e6TNE/p4m8oi8uRdlq15IF9f1eeqgaSMvT0cd9Hr8jc+q/8ffr1+//n7u" "Cjr7c01l0fIjTZTPM1mfIz33Mvu7DFGe2wibx9/QmaaJ74xbXHM9RRqd8zi0fUU+pEcXyKnp" "VO74oAvassod11Qfqmctn/F91/76zBWs/H9WZtb/6X+dvIHM/upvqFNWd+wcelZ90S7igy/Q" "Pqh+gTxWcna6QD7KIT/3FVWd/fmQz8vfGf/vMRe4xf7oPPoj9e7kpf6V/X0d4sC22D3+Rlsg" "f/73foas9FHai0LzoU6ZLvC3LivtkbleZX9k1Oe9/ExvK1tcxS32px1ru+/kDWT2V3+H7836" "KH3d/Y/qNu5x3f0kviOzP3rQNpbpQtOpzWkXyO/2xz/yTPzlGc03riHjM+xPX1F90J8BdfXv" "6m8Z3xyaHpnpW/o9nqUPdGulyIv7+E3A/5HG7yEnfS8D9caHZLrQcjL5yV/HQ/qH/++yqPw6" "l6n06bodDAaDwWAwGAw6OPeX3X/N8m/BPbiEKzgt8zR9xduewmPlxKVYz2RxgXtiVf7q2RWf" "1nGYj8Kpzq7ouOJt7yGrxrarZyrOqvIfVVx6t/xb+bRHQeXWPRNepytydfH8e7XrTFbl1fz+" "CedVpT8p/1Y+rdKT84bOKfoeBed4kIV8nANZ6azSgcYVu2ceaX/045xcxXlp3F5j5lX60/Jv" "4dMqPRGjC8CzwvMh88r+xO1UFpWz01mlA7U/cmbyZ/7/yh6aE/tXnJdz1sq9VhzZbvnU9Sqf" "Vtkf7lj5I+UUPf/MRsjc/X+qA8+rkn+XK1uhGqvgRvR+xXkFSKtcTJd+t/xb+bTOT9KHo4xo" "D/Q1nt21v44ZnvZUB6f2vxXqb+AalHevfFNmF6773MHTn5R/K5/W6Smzt847GRe07MxGAeUW" "s7Q7OngN++vYycf34ikviE9Tzgt5sutV+pPyb+HTMt7OZQPKKVZlMyd3rpTnkWdHZ5mOPe9K" "/q5eg8FgMBgMBoPBCsS+iPmcgnUga5hVLKpLE3PbHf7nHtiRNYBuHlnmriz3BudiWHd7DH8F" "4h+sv3fWJt369Zn7GTOuUdeUgfhOrPBRZXbXHwmPXQeor8a3uvavZ2NIr/rLnucZ7mm9nfeK" "e+6X9MxBpjOe6fRJf/M4hsdos/J38spkzNJ113fLyPS4g1UcSffkV+dxlIPwOK3u1dfnSaM+" "B50rl6PxQOXslA9wmfQcUcWf4fPIR2P+Wpeq/J3yXMaqzOr6jrzEG1XGE6zs3523BF3M0vkv" "+Drt/+jKzzNk5zvJqzpnQjnIUp2NyPTvfEdXfpWX7td3Gasyq+s78mZ6PEHHj5Hfimfs7F/p" "f+dsEfn6p8sXedD9js/S/p7F4rPyPa+ds4RVmdX1HXkzPZ4gG/+VW/Q2X+37udr/M11V/V/L" "7uzvHPSq/2veXf+v5n9d/9eyqzKr6zvy3mr/gI4tPobhn3R86fgrl2k1/qvcbv+AnuGrzp9n" "ulrNWXw89TFOecWsfEU3/mv6qszq+o6897A/9a7W/3ova5vc1z7kPJrP/z2NzpF9Tp/N5bsY" "gc6F+Z4BGfw+5XXlV3mtZKzKrK6v0mR6HAwGg8FgMBgMKujcXD9XOMBHo5LL1x8fAc/iAlm7" "+x7M1TqC/dLPRBVnq/Zjvmc8iwvM9jIrsriA7tnV/f8n61e1FbE2vZ5xbtife54Hcuh15yJ3" "uDzSVGv0zi6ZHvRcoHKklb5u5RtP4Pvv1T5V7I+YE35jhyNUP6PxK67rnnn273u8UfnCLI8s" "Xp1xRh0vWMX7dji6LtapZxPh1zN97ci44gJPUPl/7I8Mfm4l42hVB95HNA6n5/goX/uFc258" "V31UZyZ4XmPr9JMsRu39hbbH+RWww9GtuA7yq/S1K+OKCzzByv8jK30v41V3OELOUmhfz8rv" "5NF8uzMzIQ9tlnJcN1U5jG3q3yh7xdGdcJ2ZvnZl3OUCd9DpW/us+niv6w5HqO+1zPq/jt9d" "/9+xP2c79Sznbt/SvQPab3c4ul2us9LXlf6vz99if/f/yO7jP/rHT1bpvD35uFrZX/POxv8d" "+6Mjv3Zl/D/h6Ha5zk5fV8b/nbOOFar1v3LeWUyA69pvO44Q+bCfzjGzZ7I5cFZelUe1fj6Z" "W1/h6Ha4Tk+3U/cdGZ8VMxgMBoPBYDAYvH/A5+ja71G4kre+W+Me777X2MAJdmV/T1wUa144" "ANaUj6gDdjwB61pierqvstsHXAGO4RQaT+xwpY6vBWIWvm4kfhbwfay+Dsdv6HqVMxjx0ZgN" "bUvjC+ir43ZVxs7+XV67abROug/e5bhXHUH2uyO093iO65Sr6QKR5mrfynTE9ewcC3ELjbM6" "B6O/z0U90A16JdaF33H5KUNj8dVZAbVFxdHtpHGZtK7KeVJH/S2hK3UMKA9LXA/7aKxQ0xEn" "pdwqXtihsr9er+yv8XHaPW0SPXl8S/Py+HbFq2X8idtc/ZhyyIqdNAG1n8cfPY6b8XtX6rj6" "3THS+/sEnTs93bfl8ngc2usTcPs7b0A++puUyJjpBlRc1I79Kx5DsZMGPSrvmcmrfJi/R/BK" "HU+4Q8rlA1dd+ZYVeI4xLrOZ77WgDzlfRZ/QsaniDb39Vv1xx/4B9X/K4yl20ijnqOOgypF9" "z+y/W0flBPH5HXeonJ/ux7oCHdv043st4oNv9L0c3FMdZNeVX8ue787Xg8r++DLl1B07aVQm" "n3cq3853+oe3mZM6BtQGuqfHx2fXrbaTU/5PoeMHc8zs3mqP3eq67yVajVt+X8uvZOnWrrek" "8bIrnZzW8fS5zHdd2f83GAwGg8FgMPi7oOsYXc/cax7Z7UmMdZC+K2WnTF2rEu/O1oLvAW9B" "Xo/nsO47PUdSobM/nADpduyvsRbWOzz3FvR5grcgbxaPJE7uMRvntIg9Ot+lUO5W4xUBnnWf" "ozy0xyA8Jqv8v+ozS6t5E0OpuBgvF/k0lqMccscpaT21/iovfM6OXpBdy1G5TtCdMXGOR7kI" "jaV3PsO5e+WV4Qs8Rqr18/ONzsFW/p9ysjK9btnebG//2I3Yp8d8sW22b5u2AificWLsre2i" "04vL7nKdYGV/7OplZrH/FY/oNgowB6hsepKfc0HeX7K8qxiw7g/SeDex1uy3oyruVX2N7q1S" "riXzGSu9uL9DrhOs/L/bX+cJt9qffklc/VH2136xa3/8BnmpzyNft/9qbwd+RHlV5Q/Arl6q" "+p5gNf+jnnCMugflFvtrue6Hb7U/OqQc1cuu/clDxw61ue532ckHf678n8vrPj/TS3bP5TpB" "tv7zfUU6t8jOX6tuHCt70f51/8M97K/zv+rccqCzm/dxzZO+zLNdPj7/y2TRfRgrvfj8z+Ua" "fEy8hfXi4PUw9v+7Mfz+YDAYDO6FbP23imWAt/Su+Y5nOoWu17rxtoqdnmBX1/csM8tP4z+r" "vZEBXZe+BVw5+1CB+Nfufs1bsKNrT/8I+1f5aexHYxV+xinjCB3ELTyeDnemvC79jzNxzH2V" "D+Oefyd2qnXwdyRWsZKsbhqT0Xbh8iiycrK6wv+4rjWO7zKpvYhTO1e4i8r/a4xfz0vRz5Tz" "rThCLwfdwZ1o+ehFz9WgH5cniznqdz9/SzvSeDryeBvwugU8lux8QLYP22OzxM+9rhWHp/lW" "+uB54sYVB7tjf/f/QNuWjlMed804QgcclfJxrsPu/137oxc9j+kyB/Rsj0LTZTZWfWX297mI" "nq2r8lL9KLfY6cPL4d4JVv7fZcr2WlQcoeuENN37H+9hf2SirWUyB96S/Stu8Vn2z+Z/+EL1" "l7qPAp9UcYSuU/x/1/8Du/4O35TpPJvD7/h/rVsmzz38f2b/jlt8hv/3D/X3c7B67lDnKRlH" "6OXo2cGqfXta14XOM6uzmW43xWr+F3D7V/O/zndm5XT277hFv3fP+d9bx73XO4P3hbH/YGw/" "GAwGg8FgMBgMBoPBYDAYDAaDwWDw9+ERe9HZ+/SRwX4T/6z2vbPH0t9pEWBvTPZ5hD51b6nD" "32lccYnsS/N8ff8I7wDSD/s3nslTdnU5zUf37fGp7K+/Y8K+I/bZ6T63LM9qb/Ct8nd79dWG" "+h4Qh9Yb3bKHTPsE+T2rbVfo6vLIMnVfpPaNrP842K+W5emfam+eP7vaG7Jrf97LRPr439+x" "ofZ/bbyG/f13B9Q+9MMO7COuoH2p28sW1/W3RTqs7E/boU87PP+s/3Od/HmXm+6h1H2bAdqb" "vmuJfX76jO6x1Xy1TZKG7yc4GUNUF/6uoaxvK6hbV576gsz2jL34hlWZ5Knv71GZ9f1yJ/b3" "ve5c53+tJ+eSdJxUWbjPd/SKzHouRPOlPajcV3zTyX5xPV+hvgB5qr5Nu9zx59nZAc3H95av" "5MePa/4BdKfvYlM9Mub7fKXSsc95tE7aX31Pr+5l1/mU5pG924/24P3wdEzgnFM2n3FgQ//t" "zGocZv20M5Yjy+ncsLM/etUxC//p7Ujtr/5d95qT54n99Vwi7VfLzN5d5fOsyv78Tzu+MidA" "vuzjQH50RxvO/Dq6q/yq53vl3XWByv7qNwFtMYsV6JlRXd9QV50fVucbMvtTro7lel3PpXqf" "0nMfnf2RydvXM9DFXXbnFpHuqtzdeHfSnvTdOtqXPtp5isFg8KHxD4gkaqI/dFX5AAAKtW1r" "QlT6zsr+AH+vfgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeJztnY2R2zgMRlNIGkkhKSSNpJAUkkZSSG6Qm3fz" "7gtIyVmvHdt4M57V6oekCBKiAJD6+XMYhmEYhmEYhmEYhmF4Sb5///7b78ePH/8duydVjnuX" "4dn58OHDb7+vX7/+qvfavmf9VzmqDMP7gbzP4vbwlv65u7aO1W8nf65HVw17Pn782NbVSv7u" "/2x/+vTp199v3779/PLly3/6ovYXta/yKSovzuUY55FO/Vyu2s+x2m/5k3adW2laX9WxYc9K" "zp3+Lzr5f/78+dc29U//LbmUDJA5MmI/51T+yBSZ1/5sF/RrziU/txPaAuUb9uzkXzLy+K/o" "5M8x5EJ/tQyRc7UV91nkxzXgPr46hj4AymM9MezZyf+s/k/5d+8M6HnkXn+rLSDX2rYs/cxY" "yd96AOj7lZ51w9BzTfkj15JVXes+SF/3mMB5+FmSx3a6IduJ9YzlX23EaQz/UnXi/nO0H13N" "WJxtH6dfZ/spWVneKQ/6beZd13ksl7KsbdogeoYxyeqaYRiGYRiGYXhFGMffk0ew16f/828v" "71ny3foeXOprujb1rniEy+jtagfP5mdInfCW9r67lvfznfzP2PGPfIZ5nvd1vsQuvZX8/4b+" "8xZc/vSzYc/Dpo5NJv136dvDF+Rr6SOdz5D6JD/OXfkDTedvpIxcj/3IvizbL+3f2qWX8rcf" "4lHbQMrffjYfcz8pfYnOLLkgG2y+7Oec9AvYZ1ggI+x2BedR57QPk/Zntx3aDPdCnpkW8u7s" "2Zleyt919Kjjga7/A3VoveC+bT+OfXtdjNAufsh90HZf9/9KO+t452/MZ0r26/RZXZLes+t/" "QLbpAy7sqymZ4W9xf0OW/L+TP33fPkDH+1ifwM7fmPInLfwA5NPJ/yi9V5E/z/b6m7KxvIv0" "xdsX5/re6Qb0idsJusW6GHb+xpS/z+vkT5zKmfRS/pzX+cP+duxbSz9bQX2lPy39d/bt5bXU" "bdHVkf19PEfIY+VLhJW/MX2IvKd15fF45kx63qYeHlX+wzAMwzAMw1BjW+yb/Dw+v2dcPfaA" "GWO/H7Z98bNNvosLvRV/w/zDZ2dn0+r84NYJ6A7HhOfcwPQtQl7r82tfZz/M8qCvRj+co7Or" "IP+V3dd2MHx82I7QG9h/PcenSL9Qxu7bZ+dz7LfjL8doH9iR8UkNx3T93H4X13uR8uf6bl6n" "fYG271rm+A+6eUSe65fzz+y38zXoiOn/51jJf6X/V3bw9KWnTx0bKe0i+7FjMM4cy3ZZ4JPY" "xQsM/+da8u98fuC5XyUvzwUszvR/cFyAy8m5ec6w51ryL9DJ6TsveIYX1uHOc/X8X+kGtzk/" "/x2rUMzcrzXdu1ztW73jeXze2QIYw+f1xI04ndTP3fifZwDk+7/LyrFMe+Q/DMMwDMMwDOcY" "X+BrM77A54Y+tJLj+AKfG9vcxhf4euQaq8n4Al+DnfzHF/j8XFP+4wt8PK4p/2J8gY/Fyuc3" "vsBhGIZhGIZheG4utZV064YcYX8SP2zE915D45XfEXZrrazYvSOu4P3cfmX7kO4p/7QzPDNe" "1wfbG7a5wmvwrGRs+WN/wSa3aksrm5zlb38iZfL6PC7jyp5gm8HqXigzeszyz/bodQqfwaZs" "2ys2u/rfdrTumzyZhtcQw6+HDb5rN13/L2zTYxtbYP1P2vb50G59vdfn8pqEq+8LkUfK3+uO" "sQaa18R6dJARuF523+QyKX8/O1dtxnL1NZ38HW/kY/Yfs5/+SXrsP/q+mI+RT+73enj3jHu5" "JtjHIfuFZbl6Lv6p/Lv9nfzTF9TFItGv0e2kf/QNud0x/BTW8+TB8Udn1//teyvSjwO3kn/X" "Hmz7dzwB/T19R9297NpGxqiQXvopH/WdgbbsekkdcORHv5X8C6/jS+wArNacznvNe9nJ32XI" "7wv7mkeVf5ExMunH262vz3Gvp5lpdW1mF5eTPr8uv9X+3X2srs3r8pyufp5h7D8MwzAMwzAM" "sJpbdbS/myvwN/hTdnGsw+/s5tat9nnOhecKHb0/3oKRf499GLah5ZwaWPnnd+3FtpHadsw/" "3+Ww36nw90Tw/4GP+Vrbk/AtcS+WP9+z8T2/6jwRy8x+toybhyP939nmrf/Z5rs+ttPZRmv/" "jNsicf74erABcq2/UehvCTnGxHKmLPiI7q2nbs1ZWzsc7adv5joBKX9AD7gtYNenLdg3i/wo" "e84bsd+vm1PS7afd+rtAr8K15d/1n0vk7zkf6O781qC/ybiTfz4POp9uwTPpFecKX1v/Xyp/" "6210sGNt7MNDPuRxpP9T/rSNTJP4EMcIPLI/5xI8bqKP0a9uIf/CPj3359088rw2x387+ePH" "q/Rz/Pfo/txhGIZhGIZhGIZ74HjLjJlcxX/eit376nAdeOe2PzDXi7wXI/81nt/g+Hrmx9GP" "mYNjv12ms7KheA5e+upsh/K8oJUP0McoE9dm+bH/On4fn6bL09mjXgFsoGkPxW7nNRo5r7Op" "F55Xx89+t1w7FNs/dv5ujpftu/bnkjZlzHKl39H9v/NVYlN+dvmn/qNeufdVDE83TyjpfDsr" "+VPP6Uf0/DR8P9hm7R+0/9D3tio/x3KOl/dXfs8yz2/FTv6W2Z/Kf6X/U/45/9d+ZI5hq+eY" "5/Lu1ofcyd9tFEiLNvbsbcBY/1v/3Ur+hf2Qfs5zLuMS2gN5nNH/kG2DNNm2T9zt7xV8Qh7/" "rWT8nvL3+C/n+NkHmP7BYjX+28m/yHn+3fjvVeQ/DMMwDMMwDMMwDMMwDMMwDMMwDMMwvC7E" "UBaXfg8EH/4q1s4xQEdc4p+/5NxLyvDeEN9yS1j/mLVzMn/isSjfpfLnuo5K6+y3Fro4lI6M" "Jz7iklhA4pa8Ds5RrPtR/Rpio+DacfSOnfJ3eIkL7GL3KZO/6+64X8pLfJWPkXbOFyDe3DHn" "jtVNvDYQawhln2UtMseb7/o1+Z85l/MdP0tejkW6pH6JOfLPsVHvsa5ZrtdGuTiW638RD04/" "5X47Oj1KPJfv29/+oS3sdADxusSSeU5B3hvH6We7/kP+jglc4ftO/eJYykvql3MpJ+leS/9n" "XH7i5zJ9mzbtfdSzv7fh7ym5HtxuXU+7+3LeHV4bzPezaod+hiK37nsfcOa54vkyOXeANpQc" "1S/QLhyfei127Tr7K/3H/6Pzsk173leXHv2P+0pZua9a963K6rWiYCW3jA3t0qRsOY+FvBLn" "le2etpkc1a/PI0/PVXor6MFV/z877v0T+XOO59xkmn4edvHgTrebh0Sd5zcqLlnnqxsrdjrT" "eWU79Pg4y32mfun/3XyFt7Irw5HehU7+OX+j4N3AfZV7QsaeI3QGr+mY13jukOPVrXOPWMm/" "a6+MU6wfVu2b/C/V57t1Sj1v6gxH/b/wPIvVu0wn/6Oy80ys8joP5ERdsjbcaqxmnZnyZ0yY" "6wR6nS+vK9i9W3uOmd8dunLw3UP0Ta5Z13GmfuHoW7sce495i7yjrvLNeRoJYwXIekG/p970" "u/SR3jvT7nfvhKuxgMc5l6wTeslzele/lPtIrpzz7PNWh2F4M/8AoIL6IK3Xo8IAACoXbWtC" "VPrOyv4Af9TwAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB4nO19K7jsKNb2kkgsEonEIpFIJBYZicQiI5FYJBIZ" "iY2MjIyNLJl/Ufuc7p6e6fnU/9SIWnPpPlV71wmwLu+7LlTm5302ngDas5EtxtdGYIejwwJw" "XcUFawDfhX7D82Id4IEKEAG2ChvQniTBd92T2bGEwfHNfHP88UNvAJWb3UEr1XEztr5sTxUU" "4HidQOEo6TDwYbmvKz/3CRKg3FQspF+NA683gbhzXJ3b3s+YXkJsMSn8QxHzldIPDyvUa9so" "7kZ5TiI49ZZkUEPMXzkWyNI+TwYwJmyrNLiPSW0r/u7rbpB37ttHF49yxbD4jZngATxRqoNx" "CQ/RFAkrr5eyhUiTfQz6oa7BZaG3HX9xj7mufn6CWykuozVjg4k2LNb6uMXAwYJtDp4dBHVP" "oPjvqDlwXPjT/TwvGw8vP7z8t7hOxDoSnpNNwpsFcCm2FSAV9sScLRzVHjJwwCcPh3VLcWAC" "vrTNX7fg2ubAH9UvuJn7Nvw0HTx+AIULtB43N1PqG4HH4U7d1UJR1+HW7fPrp6iUdU3g93uP" "jvs1yCUuQqZOyYoLGGs6GAlrm07AvG2BOdgP/OcCKqd1gVXFfDKohtklO9HvEYGbqx24XUbh" "YdeSKc8LqlJFJUhXYzBNZwPGPrv4KS90aWiTZpj11QnRuFiGPsrKHKgSy0XLxfLjKRWW1DwP" "LOk29nM0xeHAf9Y1m3rgYvA/pKJKH/Dg9lwbPBlPHE0lTyMoN+Q24DqnFj0Jnarq/dOLB1lB" "o/fCg0gNtqsIkEygczabzgNNg1jqyPlCY1idJseYSr0TdARluy7K9hL8qM8JMy4YamUolM8/" "1Dw/nS0x6SRwnU8BPQD9f3gUGhKMC//a/QkfXTxKdMKht1Znm5pgfEksPOS4lX3gRvMOUWpd" "0G8lW1Bh0f0BiDb9GFgSWb/NPOEXqj8QqFlvaACARp4X/DA2N+GBrR82Skbxl0db8IUFd3Yp" "ms83Pywc5EB3jgqNBm5N4Mem3RNtzAXKaz4/9ejJTNpq7w+zFT2A3Q/aJXeDWohpekZUeAaB" "EPSEJBGBr2tQ9jibRbeQbfL4CWpBT5nx1Nf63oCrnhw+fv6ShuXc4NiGkboG6UI5+rXiCYYL" "1qQCOFWtq0scDkPDdrRqYusPTAvo5edDvALvgHmvBaEL5x6NO6RtF2oLUC7UBSCX+OPvRGvx" "FcLqd/6hVf9FwsKAM/TcqMGUkZWSOHjrVcCFSsr8uXMSj6MSiZ5chLMIDujJn44rOwZ9BwRz" "rRhGEOMdUSgeS0mt7vemWN2bhMaoCrkxC8v6/itLj/qo6GRYjB9dO0rEo47vYwiIeCSdp0TR" "17feDxCeohNYYGnXHiDsqOvREEBszI/7cm6wbSSBqMZe1znOhO96QkfPnqBRPRXGbmYQ5GuE" "ROr2rGU7Cjyo/fgWYdP8Piy14qKem2rG72uHMEKfW3Ao9eIkvx0AuofHoJHb9sxw/TQMbssZ" "y3FglFjGk/kJ+nbPtfboGNkuePVIboz7jW9yn0q+gM81rPHB4P9I4Bx1qYnx6uuHl48LZuCn" "Fgzt19dh7BiVholbWhcZOj48x01ASqM58wL9AqziJNNxXRUBoQB9PUiFFgxrBND+M8bKGLrj" "r/npsrp0v1GTPX+CASwJN8bHBrXfu/3s6udzDcQ+kOOiM/i2797cNlum0WeVqJcMUkyN2I2q" "qPkRrT8XtygMjSZ33S43QyN+QnsIgl2v0wrX4pdV1FcCsgw3mdIxf2prfoJllGNHu79yFsvH" "+R/Q40TYLhsSPfTLS7Tc7usIxUDdV93HsU0SA/sw5YCQA+P77ejkvDDOXAba8nh/kPOuds9x" "305aogs+IwTGDYOEjOBCRZcJmaUplYK6JnnYQX105T9C++oLWextKMJXSXDhgcmx8oDxC7h8" "vTKXK+j94Fwyt/Yg7d4pkGzcOLfWdGwYBRzBQFouQr2Ao+8YBJVl8YWLjYNSU9/0gcaDbT5k" "mEmB6f5s/vTyJ04NYYZkxKJHM7kljYa8I6spP+i8zyQFAXMfHN8JA181PROy7Vkcx0JSIy1r" "InFHUC3QZRL+IudmrcEIwuEl1qktz5MzHjfq0OTMyDjUTTmZGYHPihmKLBus6ORfKm47SILB" "+sZFFkLGsYYd1mNsv374zu6x5w3LnVuDji9zYZ9nuEkVF0UIMuUsegPSMdoXdIEbOpJrTMbT" "587BBqHN7RzImQgP5aOLRynmHNR7EjfKb/DLxW5kqPik6Lfw4ZV7QHL1UJg+EMZrwneMa9e9" "vqELI7gPa1gXZnmREtZFx/eayEGpzULCOcJ1TRCw2940UD25XwTTbJKQxmdXj67Yh91OlRTV" "I5ZfbpmHR++kcANwCyxahR4S/1V1mzbIk/fDVqab07C45TBFS5E3Kny3/Rhdr3ud/Dc1Rlzp" "1La7+npR2BWgeiHhgscHCXUVSIA+7v/zpnVwmrLa9vVU2aO7bzNQKYj4tFvgXtU249ba8+Ng" "IC2aZCYS4So9tiXEwMpmWZI8v16Sg9i3YF82najfyHxoHbjM6wUz2KE+gIQyIBlQuhD6cf/X" "NwcVz46zC/3VDvwsTnO+artGmT1CtYr8YAuo7YGzlUOn8vYEaY5VkikBUumQj0BMxd8G0q6E" "i/+JHQK3x6dtYjwyE0ZIk1JxsLIcw7lGvR7l4/j3WBy6aY3kjrL1T22sR0H93RC39NJ9OrYq" "Gr7LE3UMxGYF2DodQMqrUkiZLgPy2e+KsDbC8byxwzaOapDlAadj5kdPcE8tDRD6rTYdSBfS" "/frcyn9LnclK5ttVwM7sFjq6SseDvp2K/cl2PGd6juOM6ATxIPH/CDFGKnFtmS07kw1J8o0U" "ADcNPwPeHuJP7ChZcg3ZZGXHCs/JRgbKFw3lmQnS+tGl/5ZyxdhIlhAfy8Fh7MfH26HopT4Y" "xhAALKGVuK8z/4sbROxaCIu5RfHKxq4B0nFx8OzYN3AbgT+4g8iM3kusBpD3xSUOyKckgTsP" "4rw/Hv1RrHIYjTazcFADN2C8YZmGuOlePYQHhP3JUue2XxeG9ZmzKW2jhMc+wEQzIx7Cowy8" "XycN50n+wh3JrXUPzYtDwcotUo1uEGXjr4Szss/zH3NzlcDuTM/MPMitLxO14BtSKXxMdF8x" "u+nywTx19X1FCkTIemzC8SQUSNMRDivvTggdXxUy7L9zB2MB268t8nJIkVYuoBmzpYj0Gv/O" "1NaPJ4CR74yZhSh9C+BvCbLtOl3orKfbNqdGaGx3sYa8QIzSesZ7NrpQX5k/DAG2DUXrG9Ld" "GNBos6L237mjg8N2ouZLqwwv+0LpIk3S/rJoO8DX8fH6F+cE0LGhb7/rKWdSAm0gwySsNb8s" "IJRFg3j8KD+qOhO2Z8BV67WFF0a8NJ6Z6sAgCejgFgjztd+5w0U0jIEGIZazcT8QbOSYB5D1" "Qa71DoifFll2tO5zOm1SHqooRwf/sFrfedpHcYQrdzARKU56+/bn4XWIWfQtxSaVp4/owCKi" "WRAJPSdJhv3OHYM48LfoGHu7mW2IG0wvfoS5jxmDwiH+j8f7/y7jQu+u4NjRzEE9qJ7457yx" "WZnLDHx6BPTwOmaJGyPCrH9vaLkyWGqB+Me8SXwx1thpMxNBKHz5p3YQZjHFAxOl1g1OS4CI" "mkzAzasa2i6f69PrP9Jy2V3DcUJToF4jbxby/i5sgCUEegLi4oGLDa/E91nS435piOSUg1Cu" "AIhxEB7rdSY3KIQFHPlVO0ICoZJsIHpG63jXjgazgaKLTZv3y/ILLHxQZgxW9dag9muCkSeb" "Trr0YsyUL6EkRU6VuaoKSANB12ne+1ELPYJ1LR8vVOZRQUQ5k6Oo0mfV7Fft8OAlWVrvrlyA" "n9ph1KWk4zWQT61qcqgPy9Hxqfh1Ijnj1kLYenCDzKzWdmylrWw9C4MQjx4VybhZ7OjHeZ8V" "3L41dAP9habSEQvXbUWDgXqeK/yqHe9NG7G+iz6oTL9rxz2LcnIMNI0D+ezqp/wUL2f9D5pF" "wHIS/sB+UIYYpm5C31ugrlxnWxV7oauHkmcao+NZ2wN2Up9XJxuGhwp7RmWwbTHv3gGMewsC" "3Xe+BwNM/9U7kB03qCYkkef+ePpj2vjD0DCfC4GOnm7d9onz7SYR+tp1xUA1c0PoFEPVsW2c" "8R84SBiD42Vm8e+5xnQMks48UEpa//SOsECDj++Q+cjc/+gdobsWNJ1LfK6PI2AOF30XYZ9r" "EVJO4v+gJ5d+SVUhwmvyVwGAgUyMm1rX9USYBE5LlcGlBffMoVXjBgyjnM/E9/3dO7SaZ8wS" "70x+YShd5a/eIUJqdugo0Wbyx/Ufo7+59Fy380LlBX2SQXVI91KhpKARBs4CANVn6/eY7hpN" "H+4LqDw3hwxPi7c6yO3KW/dtNnXtdvaO3cc7M47mtT3I/O53Hemnd4xuHuj7r//4+o+XBKSk" "M3BL/s5NoqS2pYOoq3vzLgB0C64ioQPzbnSaGj8T4OuNZGnxsGLMQzaz8z2wykUJsxmgHq0e" "1Q6FLIClG9GuT8gKspz1MLlo/naHy0cXj5I7Hj267/VNViWlE/b3m8qqiHL8pwDA5MI0nUgY" "DR04cuTZ1AZL7I2AyXi67UEc9DrKMg3aEWXALqmsAdfdnzBOPGed6+SD+JkniKbK7s02o+mH" "JcHDR8wx1ta3bX3uoV5qrm7t0r3TU/0wDEN6AYvH7UxYhjP9nMhVg/aETTteBeL+XhV+WGOw" "vY6AAWEBGuh2A0dIBXUi4ecNMYrza07XS/1Ugj8siNnncoM97tyOhlh9NkNCEFc227sAkEbf" "F6hc7jOWbXs0IV05/+G7rdfcSjRu6RTYEzVK03OEd4LcXgyqRJ/3aKgPgo30jHr2gru2o9/9" "OP+V4BxQ65Rdl3qdF/DzujG2G3il4n4XAPy1SjgjY74lgc++E663Y0Z7ZPOXG93fAx26vW8d" "94hAd8UwiVFzUK/juRKaXxXMgc4gPwgzeUIyxJB7fL7/BTWzp7iHfcs+eHtxKGG/stvRgmGh" "PwWAjtD+UZMl8qfMbMGs9jT0gqTPgnhtV0nXhoBH7a+mQ+ga0vTsMRLqEpII2xJr11HW/Ywz" "aUpoG9wsx/+A+uP6iRpLuppSiPfFxPCiFcTCyPbITwFg+sjnhcqyu4aPPCHzjVsQnrhOd9n0" "tmHE3Pi2olqAjsB4iVxSdHaaAdJeWkrt3WFcKAHKHshamVBFlo/r/+4gMYqa3qMFoWiO4Ped" "7HkGMPdTAJBMIch5Ds1RA1APzJ4Q7SNSQNOxJjSvYZ85EAInMskBnsSL4LZJFaxFxzhYyfhJ" "ctXECjSoE5YqeZ79Yh/Pf4vLvNMaLyOJDXiw3dHcO8YyUn4XAKqLAfXiGdbhTzfP7aJo75PV" "mFWO814Ip2sE9A27mqXjpyjkvqAspYifMhiH/Ncpz0MH9zoo2ZA7lxxRMz69/jThKfoliPnU" "YjbuF0I4Af1coBQfswBwtfWayeyrZTzquu1T6bkQkILY7Nor02pz8MRwjIS4CN8lPCYZdHsz" "P4yjCKx8TgYpcDcRYpnUAn/u4+k/1GGkaeREE7VXbAh/khYBob3wiFiXnwLAWto+O3X4nSmk" "a28DKSNX4cjNU5purmNSvXj0lHtbwHNYdjGkrDk1iRFfrBqsMEvpGPXBGIoRttWZN9o+ngBU" "cKE1h4u42bSkbBozpVP8Itid6kzuvYhYkOqF552rW+E1bfah+A4Mur9RAD0idX32kcZwz5gq" "eI1i9tWJuu7jl+MjaU0rs/lAu1ohkAn+t8+ufmrg0lmU3awVGJGhtNIkHj81ipWgbQZ06nWI" "XSCHJY5AjvfdhToONGg424O4mKG7dHXsFzPAO/oKzpFPpDFBL3KLvwS+mQUKG8YRz1IqNcDH" "+//L7GncJmojBFkeMjq6JFoIKGGtZOZA3z4negqeFAaE10wQrK+zrNsCF+uHtqm9NlqQ0cA4" "fGAbxjbdIgLljFgBMd9fgA96BScQDe5GLan3u9GP+z+w+lheAvILQTo/MQiiBzvYzGgvSxie" "VkIn9QcM/HZPbhIfGc8ERlPygrzJDPUGxqTqsO/M3lF7PWtoN5nAF03lr8B3WFH5cPxcdu/N" "k85PL/+2LsX22vG5CvSNTjO3zUhLUvDJbIpLliKbcR0P8pQeiV5X3ASzaIG8MXd0+R7joAto" "QAcCp6zRM/BlEh82/k58lpIXtsGpi0k7ee6P8z8fAzh0WwaDW+khkQv6pbUkLB/Orkytt2WW" "Io8FeqblJUnehkHqa9zMFxFS5GwhM3X6OODagXkT3+s/E1+eV8XpvSmDQWJD0vXp9U/5IXJ6" "v4RhoqQ1U7HNbtaXo7OIESPCFDz9NDN5j9w2IqoVoNJS/erR9N+DQ4GCUQTlvyY+uFuPvCMK" "QgBIzce933t2oWXgBddrT8PXVMlscSiPVUgD8M21aI8PDLvdlDgQuixAdLC19sjD1YJM23tw" "CLQZlfwfiS/YKstMIo0UZF95DB/vf59rLDTuC0fMlv3RYkQ+LMHPLm9rEiL9RDuGfDeWWy4V" "HLVE1kPtF0GcnxHkI4lpx+bpbP/8r4nPn6FJ1qzQFvII4vPeH0S/cb1dK94YZUUJlfKWX6st" "LaCZg6YL2rBjqRybs+jngF74v6VM9BKYcbExfhHrEEOQ30OT/5T4nkOTOaGOCGdOjRHk8/3/" "+xqT9UjIBDhCFmto6uerSsGOI1qkLWD6VoFvp5lNy2EgOXIYERckABPu1boUA1otvGjza2jy" "HwofP0OTJLcJ+16W8XTEj/e/OWQokTgWUN2FXdq2mqPXd1sSogF3bBjpzzu1jGSV1G6X14b0" "b85Lq+iNZPkMSBqm3oQoRPqvha+foUlu/EnMIE3v4/xfKAD5gbwOGfAanJIY7vA1KTYSSC/2" "9cxZzTGHuCCxUVLmjGsfLG7L1vtYSL2tBsqJ8A6Rg8rLPxQ+/xiaZGaTBAHnJjazf/z8vV5F" "fxVKlm2LEhSq6XTeyHulQ5e1m73MQ6wCY2C97tkwyoV2HjUdw8J4POSD81w5WQK33f9j4fvX" "0OR9MdowNiLXtCHWj/Of6znqZGw6J5YM+zFIIsE8SE62AiZdC8Q1z/aPNrY5xyEWSe0xOyKQ" "yR747ll4Qc/XSy2XefV/bXxofx+aDGQcDaIiXfDP1//b67kIVbkuYWurZ2JidzI0rI2m/ZiD" "wGotuSBRDqrMwgBPZJYt1gTWwTpOihQJZEenl8ulTdn+pfHl+PehSQlW+Ec9s1f4fyEBcjbp" "m3fRSDPzsRi7FvvScCLxHdfbixcMAbmhgqMjZzYqeKU5H/CuhO9re0iQrjxXkKj2CO3cQhZR" "341P578PTVYEEfmFe0to9Z9ePMxGfxWJVw0dPOS1TMCGx/06dyR8sG9ZgJwtUV08E8qrzdoh" "4SHlnrn78EbPHnFAEH0zZqFS+CUdu5iNbxXEvw9NjqPQBnKvRPXy8f4PK8tOfOxZzVn8mY42" "/Wobl3IDMdExFWs0+PppJ1jJGfxmg1w63GWu3rz3INx+uVA5muXSMe3fjY+zCvYfhiY3jjhR" "oWFwZfXH8e+G6PaINSA5b3OmTdp5lwn1SwQt0dt1iqR1Fjnm3AdCZHg3SIdWmb7W2CamXw+o" "r50hQ/KjbAEYZ0wOIP8wNImxf7d5U/cCpX18/nHZs95r0PDsAdn6zGKuczoBZronL9D8gsAO" "HeO8s0Ah/l0luYPceiPXPcRKpHPHYDOXf1cgZXo8jVBJR/IPQ5OCrvswqEDoNO3H+78LA9Xe" "Hvs1uAI1Z7WVeP9jju1Uv0f03PtVGfQjr1LUG0NDxj90ZHjHHPSG+ExgjMaBOKf16+lkZ3NU" "4j8PTTZ9LAwCX52akyAfllyCa9msBN74nmx0zoRsr3OgizptIjLX4zW3YgFlXF0IXPIMy5vc" "5Ht4Yd9Mb7mLUdN/bFB3SzeN7Ok/D03upYkAXmEs1R9f/mxiKNTAMYc/8b/rgwbt8w7PM5Md" "hN2MXjei2/Y68BCFy96Dw8NeunVzrM+acUK5OCrBjehogEd4jB+wWf4PQ5NtNQKDTX7te1Mf" "Z8A5buiRUliWHUN9W/mrixefaAdPznRDm5cxI1cz6Acqmvs6O70mXxiHRxTb24K0JpxIfInd" "0ODB6DWCTJGJ/zw0yYPv8lxiBab7x/u/hhGXRD9dZk17VjYqglPkPIeb2dtlmY0wLKAhq9gN" "QbTL2L685/aF5KH2jEu4CJ9tpJxtncHG343DcoudvU/3b0OTraSa/LwyiQoIH/d/1uEjg8Nw" "JyS0RpDLv0Ah0nswnhdWhBGmWVep2MJvZa0sqYonqotIJ7q/92Dncv0xzuLa6BWDI5rNvw9N" "UlOWGt0QE1m6j99/klpCHdBoxHyWeLK3SPNADTbbWXppVx9shHdRE8EMERzhfYJ5cQ8Xc+Ct" "7LMhYKuzH355I6ItTxjdC9WRqva3oUmiWJX3kG3WyxEUf7z+B/GozHnP8YHR9Z987/wqMG9A" "ooEbXduTiV4oYFAPEcpx7avCg3a2rWVmtwHpz3buJ5pPQT1CgPsejIPdgnDk70OTSiMKvKgQ" "DNaeno+n/3GV5jWxDVLRw+4XuoDrgXdWJu2FKQzUqYPZbkBwb++N57Jd3cx7M6x2tjoL+g4Y" "x/q1ht7DWZHozWYqYVfv0l+HJicKSmswbqWJoq9EuHjoj/t/C5RcL0iT3MzJRAzhdQPOcQ9a" "llzajEcr5ZW1WAt/7FqlVD56JxE3+VGHgXERm4S5jr65yYztAiNL4lIu8i9Dk7sHVtbcZ8dR" "18isqOXp4/MfXAviEOxguLc/ZNzbFzF5s5TldU3bNsa1OFpYXTjD+F5whap3UesWRb7nDSYI" "74yHrTEWZnITUpoDwUtp+/Hn0CQQR6QWzhPT8NTdnJ2P28cB0JUYHoyv8GgzJ4HArsL4lLeT" "Bsd7vBwUAbGaHh47O9Z+RqD2S+4zN9BrmhSWzHU8CHD2tWTKjuXoiCtDqH8ZmqQImQyNUuEP" "kfdNernGj+e/NxspbgDSgAip5gT21CBsRQMORx0bec1svYc6EsyR/0mN3u2Sbx+xQuw8QVyO" "jJpcNo9k8Oj9RqbgcR/gz6HJhVGJW+K1MTxrqO7dTsM+3v+XUyV864LO0JXvcwFUdcZsZcH1" "kmKaQX1BuOvm7RaezbT+MeP9GzDAQXsfyUv5k8qYGxTTurx0atEH8sfQZBZMST1yngkRD6JQ" "Umfz+8fzX0xiuFKzo+kNxZ7rEGw/q+KQlJ4pIbDWW6uJRsLmCG/W5wt3aSYCa16UQ1YodEBw" "/Fcy0/eyDvN7aNJ4gUiXR1JusgTNiYxlEQRDYvp4BdSJsIGq6TZHwbOp9x2RrI1RhdZkMjdc" "zNirZJxTkRvJPVy7RgKnZiq8MOmRHQPbowDcDk9QA5D6xzUocoRa35kTeFGREFoWPgilfkeg" "QWUeTi314/n/aln03DeX0r5uO/puP9O5IlC3r3jSfRaHt5UaFhAdL+BO5PYYAN5XOt2KJrSX" "176G2Tp4IgzqraXRgxA7hsRS5xTtjpS5FwyBrmPkm4XRmfWx8dwV/fz9F0VsbUfCp2E9jwsX" "aAjyFsKoQkdf5nWFs9dZblrsq61GWXMg9FXptSIVek0bJss6y91HbrgBz3XtLvVEWIkag8k1" "WG4UHJrBofYCmzvefbbUqyVYTz+9fjIm+d3YHO64B0ZyamqiERiiHYU4iJsLeUHKxuQXKrFX" "EAkRobMTiYCp0hBJkNIRmPcEkzkvuad1gmIp9YFas2wYOusMc+G8DrkgOLIINcDASvWaPn7/" "abSBnIGQ0POYSTyQa53tDsK2DYjZpONeolPXeJpbi+gHstZzDoCtR0QXuOEWwOMohgAriZci" "RaO5s0hu1oZBX5vhXEawC1r5vdkZJdLMG4uSxNI/3v80YLUErKx3ndceX3vZN6EcHBK5ECL0" "3TCrWe0G8a5Ak2Z9mKW2yf/nxVBFaq9tyNp2Ou9RyB4diL8E79Leck6+r1t3zPSdeuAq9rGK" "NRwIi2M/omofn//lGJSslGadN7W1lz9LX9EaUJ3RJywgc1oob1QNfJHqw5NcLSXq6JSS+2iE" "kux5g8H4xfPKXAljSy8XCcunWUfUu9qQ/oaNEtF6JmMiDCrHKCzf0X/c/7d57UWfcSiaeQeY" "W/W8shxxYOVhoDdYxLzd4H4Q/8H+pL5SrqXQL+bJe2iSaIXxzCKmZ/jDGhE9dwiYjvfdoPvV" "l4iKhD/60+n/zLaRdRJOHWh73GcXD/P6P3Rxqp6Ibe0s5aJ1olv3WcLz2m90/wahK/SAFCGr" "aGba5y4yXezduT+HJpWcd0HhUoi0vkbDxL7rtr4RVWWtgqsHJf2dZM/LbAIbs2n4gYva/nH+" "l01zJuc2mVibdxYtJs4eFlntvoUzKKWtmUc5kax7Y9eBzNasx78PTebdO6Oirekcdt7w+oBu" "gSKXzggB7WK1HbkpBL08g9e+zdzxh2Vf8DG2FR38nHDo6PfnfferMTH03UYjkd9ZWIOBcBWk" "cRQaXZfcc45/H5osW8IlKiYcoQaxQIMdRLxm88PSuUGH2Zlmc5QMvcssqIPePr/+M1nPHNSV" "Fwg75zojaEVMrNedWwFST2SLyhFeR+maQY3LqWbfflkh/cvQ5EXl6hjxCG4Xtw70/DCvfsXg" "L6tBDt3ygQqWS+Vt94IBsRA+Xv/dV1micYYitQESE6XiPBgI0YZGirLO6ypjB7m9Ohp423eE" "fKTNnnetlyX9ZWhSZ7Dl2PoB5tzmZL8557T8zJWqy8N2njPAdg1EZ5mNaOc+Pj//8jPpiWif" "WURrkGdD4ygDyrkQwoOq1JWN9NdTyQG3hqzUnHzoDREyUcH8OTSpKPG9P09HFJVRMzSFDWbr" "Y2OztlBvcANUgFlhg5ZXKKM+H8f/QK1041g0iGDwTEem2Z5wlQiLyYTjYe/jmsWwbB5cpFs5" "gmP7Mjbz4lUOfwxNNmYsuoryvMsAJ5sXpBGFBp5D0NbxNPhpPET3bgSy76Ej+Hj8l9CzDUh6" "Nee+D1uqCrJfqc/Bt+gbtFF0nMFtiXZOy0NfzPFgoId46NH84n4NTWIIDXMAFtcUUEV4u4bH" "2Ic74sD3Y1fBF4wqblwCmNY/mf+P1792gzpPCPWxM0Bmvh+DwtJSzybGZdvy9fMdFe/HbQWW" "W23ZnEMHhIfqNWYXKPwMTdbk1tlOaQO/jllY0HjQqBOl5tU9pzQKecRIGE+RPOSeMHyaj+d/" "HBMz9KXMEAjMW//2Qgk6f2QxkSJa2U8kK0t492nMkj3vc5jlSrj+gNRnpojIDAV+32lbUnon" "hhi8mgfGRxWeI692kZd92j6lP1d+cB+vc8+gP57/a7PeQffXS8NyxbXExc5rQJZJ8Hw+Xnjw" "c7g//VzV8GAsRBvo5PXMkgGpjLCO+zWvB+mdVwMXj9v8yV6jE+j453cLgETTGbVNB4jhFvhY" "Zl84PCV8HgATOF/smYlwElDzMYaF4+6EV/7AbG3fg5iTimY/NJ79vLs6vfLMgQ+TX6PUlHYg" "+48d+03gO2ueOnDN1n+yHw7iHI1f1vnhc2rYjnF3XSRGh6N9HP+iFbt5qw3X1/ssYhgn1eiw" "TofO/j3Ub7n21vTUMCwK9ajH/7q74n6Wxk2LHoPE+wpZlVK0iaU04jYrIY+UfUB+dYdqsGN0" "nUPU+uD1UC7FWSj9eP/Xjo+gvdd6tT83EjDGV1hG3KO+bxsDjBu9t6+LM3oOi4GKgDAIf7AW" "rhDBYzioUqPqR7GiZx+bMOD2EwwCplSXVesa+PKEvbsEi513rSIvNLPe1o+P97++7kO+UWBb" "BXtPs5MEumPIbq9dlQO2K5V723ut57ze1c4LThEhgTOVgTyu3sdW7YLseXjpLCFDCuaZYrIu" "oOoIbGbW1+XB+CcOhNLBXCDXn87P7ePrZ3UsEM68t7iady0vFvTfM9ul+brx7U6w7eJYKJtj" "DYOO0+Jv9U0RRPCRc8oZomG3I/wjMHtjDcHIwPAltXVEV0NCAROlWoBB6c1aNrss2I/n+3j9" "CyhaJYextdjnd4DRwOGKSGIGaFRiMvn+PCT3xipjwLzmCG5r97OUX/fXkJXwq9D3vyN7RCtC" "EDyZIeLH/FMvvGf/A8OPYPg5lK0uXgddn4/Dn5nGQ+3MKz6Z7DPvgyuVBf01xutdpAZxnYeE" "xHCmaicKcq85tbxGRMisKX46DOPoE7qflzlHbdzsk3gykqX5LT9zBpZyYUcieXZVs4FwYTtS" "Dw8Cq+fj+PfEg5wXIMxBn1wmF/q5kwr/P40jxAfsbgnb7TDaZWWNvbSTZH5vknHltq2vIQAh" "x7JQXkgpPr5vtevIkS6uxLwIkdS2PUh5uxk3tFO0LU0CvQrhP97/9Dh5o2O2zhGZ36dxE4R8" "3CMI3jUi+TLQkQuHbLVtI5f9VYnRyg677P1l/M6kzlaGzshiF02QFIOkzZgF92pBzGM3Br5a" "HwrkXT4LNL1nYvYKxBX98fVzCTJXUnMVS2cD7TbeCObnDSdzOHEfG3rxVFRblFKbW3fEAM0p" "SYuXOfg1eKWO3Fdq/doNI5Qhbk4relCSxNqUE+IJwUsQZ+Kywd5URYwsB8IBwfnH6z+zpXvp" "XlJ/qETdpT20BFKldV56w65jr5Kns8wHpSZEDrwEiSdpNzT4UxXLSr0c35SP7SZIpeZVqRtH" "4LscWxH7guFjcgjDzaaBijz6kouhHte/fh7+iTR92oUYnu1oorDOO6/88mxwQVrwtCWSWNRa" "Fjt0rlE/hBOx9/cdDp7zeZnvazErxrN1NsIdW6upzNbohgzhRPWZYzS/xpza89DdKmSElUIj" "IX3e/2U+x3NhbWihuf/qRzNjXuce5pc4dTnzvLWVG+K4iN+Cz1XpeYeHQjtmCyJZkGk91kSn" "Cz3K4hyCwTSR7YomoY6S3td8vkP9k9Izu8T3mmdd2H78/ptXZ2oGaFNJWFUOk5EiMUE1Rh5/" "cjQG1xJ7/OHc60Hkl+lsap93uFTwzuGW3XQ2PB3vL07BoCCNXPuk9fOrUqV0x/sOmGF8DMZp" "qMzNPolULppXbz4+/3iMlc+vvFm85sh757e3AG0sB0qye2dnfcl2finqXQ8X0eZzIT93+Oj3" "WJuJgebomB5Hl0awpWwhN46GVZzWfENu4RZm77OFOi5AbXElrsHoh5Sxf9z/01IGF3U/By6W" "jzqv6GFC67zWuszMD0UjRxyDZyd5WKtE5f91h1NXuuSZx4pEKYyYMjHX0bUZiVa1iGFnV6zg" "UI6zsnGNveerz8iSzwsDzRZzlB8/f8K2lUDlZyIpqu2q56lzXNZU8uL0e94B6qtmM2f3iW8C" "0f7PHV4Qdzpe67wiAJXde7kYqmQjsxUYIc+GdOB9qSxuxnlXRkt2CI/ChFiUEjSWg3w8+41C" "KwSg6K7COIhpPY8tO7QIs1gJNRxsPS94bOrzjneVluX3HW6zXewgChngK1Pb07wse9WeAK8v" "0JTiVgCh+7srPDwN2MwIpK7AbyAen+Le5+jUh2VOcPleT//+FrzZ+Y5PdgtxUrYgoxN3SAFG" "M/vdgd89b/2PO/xgfmuSUs8Dd0Pfz+2ylHXCpuMZa6FqRZgTfPuJcc+pjtQUBIJLVizPC+DP" "Kj/e//54a+HcfVGQeMFVuekTBpwvTdv83gPEwuGBPZ0LpNWwcP2+yuY954qQCB7OXnj6QhbL" "j/cX3tpLeKun00DwW5DyzkmZvtRZQl0WVKqm4p6QB5mP5//60UtxBckuAuG9gFDW23cb/7zD" "00FHXPSaV8LPi4HY4jn54w7PMlMes5flQVzok1lcnN95Pceo8Edq977M6cf11aLCTe5AGuKM" "dNSCtoR2A0R/vvyDDnrOK7LZzEIOxLpct5+s/LzD1ayF99nrNsvba5k2TP64yqbaUt9fcv1u" "nWx8VUHPrxA8EQqiuct8prIhgrg7uhLBOJlfMdxn6XPejfnGQ5+H/7/kIAs+6lZCiX7mLLa5" "rhmgy5hf/yZmmeTVanDxL1fZ1I3Kd2EA+U8gvJqwSAwSM8nb+/6+AUlgmMjyddj5Fbv1uDHq" "zaTJ+7cIyM/3/3/lK1/5yle+8pWvfOUrX/nKV77yla985Stf+cpXvvKVr3zlK1/5yle+8pWv" "fOUrX/nKV77yla985Stf+cpXvvKVr3zlK1/5yle+8pWvfOUrX/nKV77yla985Stf+cpXvvKV" "r3zlK1/5yle+8pWvfOUrX/nKV77yla985Stf+cpXvvKVr3zlK1/5yle+8pWvfOUrX/nKV77y" "la985Stf+cpXvvKVr3zlK1/5yle+8hWA/wfdmhmZdymm9wAACEVJREFUWIXNmGtsHFcVx393" "XruzXsd2/G7tpIbEpapUIRFRKZFQ1YBUCVSFQKUIlbYiKhUSVZEA9UMpBfohH0gUtXwDRENb" "SqgQuJZCH0lMyyOKSGr6MnUeTuzY3SSt147ttXdn5s69fNiZ9XjspClxJI50tLOzs3d+8z/3" "3jnnCK01/89mrPaAmzZtemI1x7sqQLFohhBisxDiy0IIRwiREUK4Qohc5HVa661CiLwQoi5x" "3o2udYQQthDCjMYSQghxTYDRACK61gRywAOADTiRZ4Bs5EHqe3zcB2wHrMjNaMwrQlpXARZ7" "POC/gRC4A/hX4rwBiDAMzwJ1gI5cAb1AD3AweqAwchV9aiGEBrROLQqx0iJJwcXKmYmn37Jr" "167NW7Zs2S6E2KiUIgzDmsfflVL4vn967969w0eOHBkEfgXISGUZeRJ2GeQywBTcF4DPAM9H" "YM7hw4fva2ho+HYmk9mglEJKSRAESCkJwxAp5RJAKSVSSkql0sj4+Pi+xx577AXAjyD9FUCX" "QK4EaCSUuxl4Anjn4MGDlba2tgdd190ohCAMQzzP46233mPs3DgzMzPIUFbngmVTv2YN7a0t" "3NDZShAEBEFApVKhVCqNjI2N7du9e/fzUVSeB74GTEegKgJUywAj9eL5ZFFdCB3t7e2vHjhw" "4Kbm5mYAisUpXn7lIIUPxlEqxLSyWI6LZWUQhiCUPjKoIIMypmnT0trBuq4OlJJ4noeUkoGB" "gaf6+/s/B7wC/BooJ0KvAKW11lYKLrkgLMAeGBi4u6Oj46ZsNotSioG//p3jx45imA6ZfBuO" "k8cwLQzTxDAMDMOM3EAYgsr8NFNTBT68MEH3uh5yroXv+3R2dj5yyy239L///vv7E2LohAsh" "xKKCCfXiheC89tpr32pvb9/tui5SSl7q/wujZ0ew3SbsbD2mYS0CrQBYPWcihGBhbpLihdOs" "be0glxEEQYBSihMnTvxk//79z0UKVlLzUiX3wXR4nYaGhocsyyIIAvpeOsDZs6exc62Ydl10" "+dWZEAb1jZ3ccNNnmbw4wXw5rC2u7u7u+1ncL53o3rVtKw34DeBFYMczzzzzkOM4G5VSvP63" "fzI6egYr24ww7KsGS1s210jXpzZxafojNA5hGGKaZs+2bdt2AA3R/X9BYhNPA/YBTwOfP3r0" "6MNCCKamp3n7rTcx7Pw1wcWWy6+lpWMjxeJFwCAIAjo7O+8DXgU2A7+7HKBBNe5v9vf3D+/c" "ubNFKcXhw6+DsBBm5prhYmts7sK2s2BUVXRdd/3WrVufBX4EDCUAjZXmoJnL5R7UWuP7Phcv" "nEeYLp9kzn2cGYZJS8enmS/NoLVGSsmtt976dRbf0ZdV0ABMIcRGKSVvv/MeWiswnFWDi62x" "uQsZeDiZPGEY4jhOD8sTiRUVNOJX1rlz4yDMVYeD6sp2MnUgDLTWhGEISxMPAxC1jbq3t/fG" "IAjcIAhyQRBgmiazs7PXDRDArWsg8Mq1JKOpqanNcZw1tm1XyuXyeLFY9GqA+Xz+t01NTeu0" "1lahUGD9+vXxU103s50cXmUOKSWWZdHb2/vjxsbGdWEYBoVC4YfFYvHlGuDg4OCXHn/88S96" "nlff2tr63PWGq5pGa2qZUKlU+untt9/eYxjGh4cOHXoDUgnrk08++Q8g19fXRxiGGIYB1xHU" "9+YxDLMW4qGhoUtDQ0NvAKX4mhVT/vgPdfk86OsHWFmYAxYVZDFRqFkSsJZJxKu4rbUlAlz9" "0lSpEM+bZ2F+Ht/304A1TyuoATU1NXWmUqnQ2dGGEAYof9UBZ6cL2HaGwgdnCYKAiYmJAtWC" "TLGY/i9T8GbgB48++mhmdHQUz/PI1zegwzKrqaJSIR9dOI0wbMrlMkopjh07VgD2At+hWnQp" "Ugoq4KvAzMzMzLampqaRSqVC140doEO0rKwa4PTkOQK/wulT/4nrmbHZ2dmHge+zmDQrUvmg" "Bn5GNd25MDIysq+angc0rW1ByRJ6FUI9PzfJ5IXTVDzJpekphBCMjY29SDXdLwB7gEkuo2Bc" "p8o9e/a8UCqVRjzPo87NkKurR1aKqPB/h1woFZk4cxyNxdC7g0gpAcaGh4f/xGKlF1d4KyoY" "AwaAf+bMmX1RbUs+l8HN1SPLk0h/Dj5B00kpyfRHo5wfexsMl8HjR2rF0/nz5/8QwcUeJAB1" "siaJC3SLauqdBdw777zze93d3Y+0t7ejtSaQUF6YQxg2mdxanOwazCsUTeVSkdnpAmHgMV/2" "GXp3EN/3cV2XwcHBgpTyN8DvgQWW1yXhFYsm4AZg14YNG/Q999xzt+d5hGGI1qAwkL6H1iGG" "6WDZLpadRQhBKL2o7KxgWg5KG5w6OcTMpek4QaVQKPx8dHT0deBhYIBqfbysaEoDJiFt4BDV" "uvXZ7du339vV1XW/YRg9QRBEoBrLzkbCK9AajUAIgQIWSnOcL5yjXC4jpayqKsTYxYsX9586" "deqPEVAz4AGnUvNwaYhTKsaQTdF8sKKQZ+66664dPT099woheuI2R9zqSLY/pJT4vk8QBGit" "UUqNTUxMvHjy5Mk/R0CV1GcaTmmt9ce1PmIl41ZbJnJn8+bND9i2/c3bbrutNdk4SjeRFhYW" "Rk+cONE3PDy8A/hKYjF4KbAlyq3Y+kiFOu4wJLtaTgJ2B9U3z14SNQSLxUuy/aaAp6hWjX18" "gubRsv6g1lon+om1PyRuFA94RwQ3x9I0PWkq4U8DDwEvcC3tt8somWxgxqp+F/glqQbmFRSM" "IdJQSRGuroF5BVDjCp6Eiy0NuZKvCHbVgAlIUqDp4+TvSbj4WLEUuPb7kpCm2tX/BV0HrAK/" "xpabAAAAAElFTkSuQmCC" ) def interpFloat(t, v1, v2): "interpolator for a single value; interprets t in [0-1] between v1 and v2" return (v2 - v1) * t + v1 def tFromValue(value, v1, v2): "returns a t (in range 0-1) given a value in the range v1 to v2" return float(value - v1) / (v2 - v1) def clamp(v, minv, maxv): "clamps a value within a range" if v < minv: v = minv if v > maxv: v = maxv return v def toLog(t, v1, v2): return math.log10(t / v1) / math.log10(v2 / v1) def toExp(t, v1, v2): return math.pow(10, t * (math.log10(v2) - math.log10(v1)) + math.log10(v1)) class ControlKnob(wx.Panel): def __init__( self, parent, minvalue, maxvalue, init=None, pos=(0, 0), size=(44, 70), log=False, outFunction=None, integer=False, backColour=None, label="", ): wx.Panel.__init__( self, parent=parent, id=wx.ID_ANY, pos=pos, size=size, style=wx.NO_BORDER | wx.WANTS_CHARS, ) self.parent = parent self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM) self.SetBackgroundColour(BACKGROUND_COLOUR) self.SetMinSize(self.GetSize()) self.knobBitmap = KNOB.GetBitmap() self.outFunction = outFunction self.integer = integer self.log = log self.label = label self.SetRange(minvalue, maxvalue) self.borderWidth = 1 self.selected = False self._enable = True self.midictl = None self.new = "" self.floatPrecision = "%.3f" if backColour: self.backColour = backColour else: self.backColour = BACKGROUND_COLOUR if init != None: self.SetValue(init) self.init = init else: self.SetValue(minvalue) self.init = minvalue self.Bind(wx.EVT_LEFT_DOWN, self.MouseDown) self.Bind(wx.EVT_LEFT_UP, self.MouseUp) self.Bind(wx.EVT_LEFT_DCLICK, self.DoubleClick) self.Bind(wx.EVT_MOTION, self.MouseMotion) self.Bind(wx.EVT_PAINT, self.OnPaint) self.Bind(wx.EVT_KEY_DOWN, self.keyDown) self.Bind(wx.EVT_KILL_FOCUS, self.LooseFocus) def setMidiCtl(self, x, propagate=True): self.propagate = propagate self.midictl = x self.Refresh() def getMidiCtl(self): return self.midictl def setFloatPrecision(self, x): self.floatPrecision = "%." + "%df" % x self.Refresh() def getMinValue(self): return self.minvalue def getMaxValue(self): return self.maxvalue def Enable(self): self._enable = True self.Refresh() def Disable(self): self._enable = False self.Refresh() def getInit(self): return self.init def getLabel(self): return self.label def getLog(self): return self.log def SetRange(self, minvalue, maxvalue): self.minvalue = minvalue self.maxvalue = maxvalue def getRange(self): return [self.minvalue, self.maxvalue] def SetValue(self, value): if self.HasCapture(): self.ReleaseMouse() value = clamp(value, self.minvalue, self.maxvalue) if self.log: t = toLog(value, self.minvalue, self.maxvalue) self.value = interpFloat(t, self.minvalue, self.maxvalue) else: t = tFromValue(value, self.minvalue, self.maxvalue) self.value = interpFloat(t, self.minvalue, self.maxvalue) if self.integer: self.value = int(self.value) self.selected = False self.Refresh() def GetValue(self): if self.log: t = tFromValue(self.value, self.minvalue, self.maxvalue) val = toExp(t, self.minvalue, self.maxvalue) else: val = self.value if self.integer: val = int(val) return val def LooseFocus(self, event): self.selected = False self.Refresh() def keyDown(self, event): if self.selected: char = "" if event.GetKeyCode() in range(324, 334): char = str(event.GetKeyCode() - 324) elif event.GetKeyCode() == 390: char = "-" elif event.GetKeyCode() == 391: char = "." elif event.GetKeyCode() == wx.WXK_BACK: if self.new != "": self.new = self.new[0:-1] elif event.GetKeyCode() < 256: char = chr(event.GetKeyCode()) if char in ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]: self.new += char elif char == "." and not "." in self.new: self.new += char elif char == "-" and len(self.new) == 0: self.new += char elif event.GetKeyCode() in [wx.WXK_RETURN, wx.WXK_NUMPAD_ENTER]: if self.new != "": self.SetValue(eval(self.new)) self.new = "" self.selected = False self.Refresh() event.Skip() def MouseDown(self, evt): if evt.ShiftDown(): self.DoubleClick(evt) return if self._enable: rec = wx.Rect(5, 13, 45, 45) pos = evt.GetPosition() if rec.Contains(pos): self.clickPos = wx.GetMousePosition() self.oldValue = self.value self.CaptureMouse() self.selected = False self.Refresh() evt.Skip() def MouseUp(self, evt): if self.HasCapture(): self.ReleaseMouse() def DoubleClick(self, event): if self._enable: w, h = self.GetSize() pos = event.GetPosition() reclab = wx.Rect(5, 55, w - 10, 13) recpt = wx.Rect(self.knobPointPos[0] - 3, self.knobPointPos[1] - 3, 9, 9) if reclab.Contains(pos): self.selected = True self.Refresh() event.Skip() def MouseMotion(self, evt): if self._enable: if evt.Dragging() and evt.LeftIsDown() and self.HasCapture(): pos = wx.GetMousePosition() offY = self.clickPos[1] - pos[1] off = offY off *= 0.005 * (self.maxvalue - self.minvalue) self.value = clamp(self.oldValue + off, self.minvalue, self.maxvalue) self.selected = False self.Refresh() def setbackColour(self, colour): self.backColour = colour self.Refresh() def OnPaint(self, evt): w, h = self.GetSize() dc = wx.AutoBufferedPaintDC(self) dc.SetBrush(wx.Brush(self.backColour, wx.SOLID)) dc.Clear() # Draw background dc.SetPen(wx.Pen(self.backColour, width=self.borderWidth, style=wx.SOLID)) dc.DrawRectangle(0, 0, w, h) if sys.platform == "darwin": dc.SetFont( wx.Font( 10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL ) ) else: dc.SetFont( wx.Font( 7, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL ) ) dc.SetTextForeground("#000000") # Draw text label reclab = wx.Rect(0, 1, w, 9) dc.DrawLabel(self.label, reclab, wx.ALIGN_CENTER_HORIZONTAL) recval = wx.Rect(5, 55, w - 10, 13) if self.selected: dc.SetBrush(wx.Brush("#FFFFFF", wx.SOLID)) dc.SetPen(wx.Pen("#FFFFFF", width=self.borderWidth, style=wx.SOLID)) dc.DrawRoundedRectangle(recval, 3) dc.DrawBitmap(self.knobBitmap, 2, 13, True) r = 0.17320508075688773 # math.sqrt(.03) val = tFromValue(self.value, self.minvalue, self.maxvalue) * 0.87 ph = val * math.pi * 2 - (3 * math.pi / 2.2) X = int(round(r * math.cos(ph) * 45)) Y = int(round(r * math.sin(ph) * 45)) dc.SetPen(wx.Pen("#000000", width=1, style=wx.SOLID)) dc.SetBrush(wx.Brush("#000000", wx.SOLID)) self.knobPointPos = (X + 22, Y + 33) dc.DrawCircle(X + 22, Y + 33, 2) # Draw text value if self.selected and self.new: val = self.new else: if self.integer: val = "%d" % self.GetValue() else: val = self.floatPrecision % self.GetValue() if sys.platform == "linux2": width = len(val) * (dc.GetCharWidth() - 3) else: width = len(val) * dc.GetCharWidth() dc.SetTextForeground("#000000") dc.DrawLabel(val, recval, wx.ALIGN_CENTER) # Send value if self.outFunction: self.outFunction(self.GetValue()) evt.Skip()
filesystem-ajax
helpers
# This file is part of Archivematica. # # Copyright 2010-2013 Artefactual Systems Inc. <http://artefactual.com> # # Archivematica is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Archivematica is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Archivematica. If not, see <http://www.gnu.org/licenses/>. import logging import os from archivematicaFunctions import b64encode_string from components import helpers logger = logging.getLogger("archivematica.dashboard") def sorted_directory_list(path): entries = os.listdir(path) return sorted(entries, key=helpers.keynat) def directory_to_dict(path, directory={}, entry=False): # if starting traversal, set entry to directory root if entry is False: entry = directory # remove leading slash entry["parent"] = b64encode_string(os.path.dirname(path)[1:]) # set standard entry properties entry["name"] = b64encode_string(os.path.basename(path)) entry["children"] = [] # define entries entries = sorted_directory_list(path) for file in entries: new_entry = None if file[0] != ".": new_entry = {} new_entry["name"] = b64encode_string(file) entry["children"].append(new_entry) # if entry is a directory, recurse child_path = os.path.join(path, file) if ( new_entry is not None and os.path.isdir(child_path) and os.access(child_path, os.R_OK) ): directory_to_dict(child_path, directory, new_entry) # return fully traversed data return directory def check_filepath_exists(filepath): error = None if filepath == "": error = "No filepath provided." # check if exists if error is None and not os.path.exists(filepath): error = "Filepath " + filepath + " does not exist." # check if is file or directory # check for trickery try: filepath.index("..") error = "Illegal path." except: pass return error
PathTests
TestRefactoredMach3Mach4Post
# -*- coding: utf-8 -*- # *************************************************************************** # * Copyright (c) 2022 sliptonic <shopinthewoods@gmail.com> * # * Copyright (c) 2022 Larry Woestman <LarryWoestman2@gmail.com> * # * * # * This program is free software; you can redistribute it and/or modify * # * it under the terms of the GNU Lesser General Public License (LGPL) * # * as published by the Free Software Foundation; either version 2 of * # * the License, or (at your option) any later version. * # * for detail see the LICENCE text file. * # * * # * This program is distributed in the hope that it will be useful, * # * but WITHOUT ANY WARRANTY; without even the implied warranty of * # * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * # * GNU Library General Public License for more details. * # * * # * You should have received a copy of the GNU Library General Public * # * License along with this program; if not, write to the Free Software * # * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 * # * USA * # * * # *************************************************************************** from importlib import reload import FreeCAD import Path import PathTests.PathTestUtils as PathTestUtils from Path.Post.scripts import refactored_mach3_mach4_post as postprocessor Path.Log.setLevel(Path.Log.Level.DEBUG, Path.Log.thisModule()) Path.Log.trackModule(Path.Log.thisModule()) class TestRefactoredMach3Mach4Post(PathTestUtils.PathTestBase): @classmethod def setUpClass(cls): """setUpClass()... This method is called upon instantiation of this test class. Add code and objects here that are needed for the duration of the test() methods in this class. In other words, set up the 'global' test environment here; use the `setUp()` method to set up a 'local' test environment. This method does not have access to the class `self` reference, but it is able to call static methods within this same class. """ # Open existing FreeCAD document with test geometry FreeCAD.newDocument("Unnamed") @classmethod def tearDownClass(cls): """tearDownClass()... This method is called prior to destruction of this test class. Add code and objects here that cleanup the test environment after the test() methods in this class have been executed. This method does not have access to the class `self` reference. This method is able to call static methods within this same class. """ # Close geometry document without saving FreeCAD.closeDocument(FreeCAD.ActiveDocument.Name) # Setup and tear down methods called before and after each unit test def setUp(self): """setUp()... This method is called prior to each `test()` method. Add code and objects here that are needed for multiple `test()` methods. """ self.doc = FreeCAD.ActiveDocument self.con = FreeCAD.Console self.docobj = FreeCAD.ActiveDocument.addObject("Path::Feature", "testpath") reload( postprocessor ) # technical debt. This shouldn't be necessary but here to bypass a bug def tearDown(self): """tearDown()... This method is called after each test() method. Add cleanup instructions here. Such cleanup instructions will likely undo those in the setUp() method. """ FreeCAD.ActiveDocument.removeObject("testpath") def test000(self): """Test Output Generation. Empty path. Produces only the preamble and postable. """ self.docobj.Path = Path.Path([]) postables = [self.docobj] # Test generating with header # Header contains a time stamp that messes up unit testing. # Only test length of result. args = "--no-show-editor" gcode = postprocessor.export(postables, "gcode.tmp", args) self.assertTrue(len(gcode.splitlines()) == 14) # Test without header expected = """(Begin preamble) G17 G54 G40 G49 G80 G90 G21 (Begin operation: testpath) (Machine: mach3_4, mm/min) (Finish operation: testpath) (Begin postamble) M05 G17 G54 G90 G80 G40 M2 """ self.docobj.Path = Path.Path([]) postables = [self.docobj] args = "--no-header --no-show-editor" # args = ("--no-header --no-comments --no-show-editor --precision=2") gcode = postprocessor.export(postables, "gcode.tmp", args) self.assertEqual(gcode, expected) # test without comments expected = """G17 G54 G40 G49 G80 G90 G21 M05 G17 G54 G90 G80 G40 M2 """ args = "--no-header --no-comments --no-show-editor" # args = ("--no-header --no-comments --no-show-editor --precision=2") gcode = postprocessor.export(postables, "gcode.tmp", args) self.assertEqual(gcode, expected) def test010(self): """Test command Generation. Test Precision """ c = Path.Command("G0 X10 Y20 Z30") self.docobj.Path = Path.Path([c]) postables = [self.docobj] args = "--no-header --no-show-editor" gcode = postprocessor.export(postables, "gcode.tmp", args) result = gcode.splitlines()[5] expected = "G0 X10.000 Y20.000 Z30.000" self.assertEqual(result, expected) args = "--no-header --precision=2 --no-show-editor" gcode = postprocessor.export(postables, "gcode.tmp", args) result = gcode.splitlines()[5] expected = "G0 X10.00 Y20.00 Z30.00" self.assertEqual(result, expected) def test020(self): """ Test Line Numbers """ c = Path.Command("G0 X10 Y20 Z30") self.docobj.Path = Path.Path([c]) postables = [self.docobj] args = "--no-header --line-numbers --no-show-editor" gcode = postprocessor.export(postables, "gcode.tmp", args) result = gcode.splitlines()[5] expected = "N150 G0 X10.000 Y20.000 Z30.000" self.assertEqual(result, expected) def test030(self): """ Test Pre-amble """ self.docobj.Path = Path.Path([]) postables = [self.docobj] args = "--no-header --no-comments --preamble='G18 G55' --no-show-editor" gcode = postprocessor.export(postables, "gcode.tmp", args) result = gcode.splitlines()[0] self.assertEqual(result, "G18 G55") def test040(self): """ Test Post-amble """ self.docobj.Path = Path.Path([]) postables = [self.docobj] args = "--no-header --no-comments --postamble='G0 Z50\nM2' --no-show-editor" gcode = postprocessor.export(postables, "gcode.tmp", args) result = gcode.splitlines()[-2] self.assertEqual(result, "G0 Z50") self.assertEqual(gcode.splitlines()[-1], "M2") def test050(self): """ Test inches """ c = Path.Command("G0 X10 Y20 Z30") self.docobj.Path = Path.Path([c]) postables = [self.docobj] args = "--no-header --inches --no-show-editor" gcode = postprocessor.export(postables, "gcode.tmp", args) self.assertEqual(gcode.splitlines()[2], "G20") result = gcode.splitlines()[5] expected = "G0 X0.3937 Y0.7874 Z1.1811" self.assertEqual(result, expected) args = "--no-header --inches --precision=2 --no-show-editor" gcode = postprocessor.export(postables, "gcode.tmp", args) result = gcode.splitlines()[5] expected = "G0 X0.39 Y0.79 Z1.18" self.assertEqual(result, expected) def test060(self): """ Test test modal Suppress the command name if the same as previous """ c = Path.Command("G0 X10 Y20 Z30") c1 = Path.Command("G0 X10 Y30 Z30") self.docobj.Path = Path.Path([c, c1]) postables = [self.docobj] args = "--no-header --modal --no-show-editor" gcode = postprocessor.export(postables, "gcode.tmp", args) result = gcode.splitlines()[6] expected = "X10.000 Y30.000 Z30.000" self.assertEqual(result, expected) def test070(self): """ Test axis modal Suppress the axis coordinate if the same as previous """ c = Path.Command("G0 X10 Y20 Z30") c1 = Path.Command("G0 X10 Y30 Z30") self.docobj.Path = Path.Path([c, c1]) postables = [self.docobj] args = "--no-header --axis-modal --no-show-editor" gcode = postprocessor.export(postables, "gcode.tmp", args) result = gcode.splitlines()[6] expected = "G0 Y30.000" self.assertEqual(result, expected) def test080(self): """ Test tool change """ c = Path.Command("M6 T2") c2 = Path.Command("M3 S3000") self.docobj.Path = Path.Path([c, c2]) postables = [self.docobj] args = "--no-header --no-show-editor" gcode = postprocessor.export(postables, "gcode.tmp", args) self.assertEqual(gcode.splitlines()[6], "M5") self.assertEqual(gcode.splitlines()[7], "M6 T2") self.assertEqual(gcode.splitlines()[8], "G43 H2") self.assertEqual(gcode.splitlines()[9], "M3 S3000") # suppress TLO args = "--no-header --no-tlo --no-show-editor" gcode = postprocessor.export(postables, "gcode.tmp", args) self.assertEqual(gcode.splitlines()[8], "M3 S3000") def test090(self): """ Test comment """ c = Path.Command("(comment)") self.docobj.Path = Path.Path([c]) postables = [self.docobj] args = "--no-header --no-show-editor" gcode = postprocessor.export(postables, "gcode.tmp", args) result = gcode.splitlines()[5] expected = "(comment)" self.assertEqual(result, expected)
projectedFit
add
import eos.db import gui.mainFrame import wx from gui import globalEvents as GE from gui.fitCommands.calc.projectedFit.add import CalcAddProjectedFitCommand from gui.fitCommands.helpers import InternalCommandHistory from service.fit import Fit class GuiAddProjectedFitsCommand(wx.Command): def __init__(self, fitID, projectedFitIDs, amount): wx.Command.__init__(self, True, "Add Projected Fits") self.internalHistory = InternalCommandHistory() self.fitID = fitID self.projectedFitIDs = projectedFitIDs self.amount = amount def Do(self): results = [] for projectedFitID in self.projectedFitIDs: cmd = CalcAddProjectedFitCommand( fitID=self.fitID, projectedFitID=projectedFitID, amount=self.amount ) results.append(self.internalHistory.submit(cmd)) success = any(results) sFit = Fit.getInstance() eos.db.flush() sFit.recalc(self.fitID) sFit.fill(self.fitID) eos.db.commit() wx.PostEvent( gui.mainFrame.MainFrame.getInstance(), GE.FitChanged(fitIDs=(self.fitID,)) ) return success def Undo(self): success = self.internalHistory.undoAll() sFit = Fit.getInstance() eos.db.flush() sFit.recalc(self.fitID) sFit.fill(self.fitID) eos.db.commit() wx.PostEvent( gui.mainFrame.MainFrame.getInstance(), GE.FitChanged(fitIDs=(self.fitID,)) ) return success
neubot
privacy
# neubot/privacy.py # # Copyright (c) 2011, 2013 # Nexa Center for Internet & Society, Politecnico di Torino (DAUIN) # and Simone Basso <bassosimone@gmail.com> # # This file is part of Neubot <http://www.neubot.org/>. # # Neubot is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Neubot is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Neubot. If not, see <http://www.gnu.org/licenses/>. # """ Initialize and manage privacy settings """ import getopt import logging import os import sys import xml.dom.minidom if __name__ == "__main__": sys.path.insert(0, ".") from neubot import system, utils, utils_hier from neubot.config import CONFIG, ConfigError from neubot.database import DATABASE, table_config def count_valid(updates, prefix): """Return the number of valid privacy settings found and return -1 in case of error""" count = 0 for setting in ("informed", "can_collect", "can_publish"): name = "%s%s" % (prefix, setting) if name in updates: value = utils.intify(updates[name]) if not value: return -1 count += 1 return count def check(updates, check_all=False, prefix="privacy."): """Raises ConfigError if the ``updates`` dictionary does not contain valid privacy settings""" count = count_valid(updates, prefix) if count < 0: raise ConfigError( "Invalid privacy settings. Neubot is not allowed to use the distributed " "M-Lab platform to perform tests unless you (i) assert that you have " "read the privacy policy and you provide the permission to (ii) collect " "and (iii) publish your Internet address." ) elif check_all and count != 3: raise ConfigError("Not all privacy settings were specified") def collect_allowed(message): """We are allowed to collect a result in the database if the user is informed and has provided the permission to collect her Internet address""" return utils.intify(message["privacy_informed"]) and utils.intify( message["privacy_can_collect"] ) def allowed_to_run(): """We are allowed to run if and only if we have all permissions""" return count_valid(CONFIG.conf, prefix="privacy.") == 3 def complain(): """Complain with the user about privacy settings""" logging.warning("Neubot is disabled because privacy settings are not OK.") logging.warning("Please, set privacy settings via web user interface.") logging.warning("Alternatively, you can use the `neubot privacy` command.") def complain_if_needed(): """Complain with the user if privacy settings are not OK""" if not allowed_to_run(): complain() USAGE = """usage: neubot privacy [-Pt] [-D setting=value] [-f database]""" POLICY = os.sep.join([utils_hier.WWWDIR, "privacy.html"]) def main(args): """Wrapper for the real main""" try: __main(args) except (SystemExit, KeyboardInterrupt): raise except: logging.error("unhandled exception\n", exc_info=1) sys.exit(1) def print_policy(): """Print privacy policy and exit""" filep = open(POLICY, "rb") body = "".join(["<HTML>", filep.read(), "</HTML>"]) filep.close() # Adapted from scripts/make_lang_en.py document = xml.dom.minidom.parseString(body) for element in document.getElementsByTagName("textarea"): if element.getAttribute("class") != "i18n i18n_privacy_policy": continue element.normalize() for node in element.childNodes: if node.nodeType == node.TEXT_NODE: for line in node.data.splitlines(): sys.stdout.write(line.strip()) sys.stdout.write("\n") return 0 sys.stderr.write("ERROR cannot extract policy from privacy.html\n") return 1 def test_settings(connection): """Test privacy settings and exit, setting properly the exit value""" settings = table_config.dictionarize(connection) if count_valid(settings, "privacy.") == 3: return 0 return 1 def update_settings(connection, settings): """Update database privacy settings and exit""" for name in settings.keys(): if name not in ( "privacy.informed", "privacy.can_collect", "privacy.can_publish", ): sys.stderr.write("WARNING unknown setting: %s\n" % name) del settings[name] table_config.update(connection, settings.items()) # Live with that or provide a patch sys.stdout.write("*** Database changed. Please, restart Neubot.\n") return 0 def print_settings(connection, database_path): """Print privacy settings and exit""" sys.stdout.write("database: %s\n" % database_path) sys.stdout.write("settings:\n") dictionary = table_config.dictionarize(connection) for name, value in dictionary.items(): if name.startswith("privacy."): name = name.replace("privacy.", "") sys.stdout.write(" %-12s: %d\n" % (name, utils.intify(value))) sys.stdout.write("\n") return 0 def __main(args): """Initialize privacy settings""" try: options, arguments = getopt.getopt(args[1:], "D:f:Pt") except getopt.error: sys.exit(USAGE) if arguments: sys.exit(USAGE) settings = {} database_path = system.get_default_database_path() pflag = False testmode = False for name, value in options: if name == "-D": name, value = value.split("=", 1) if not name.startswith("privacy."): name = "privacy." + name settings[name] = value elif name == "-f": database_path = value elif name == "-P": pflag = True elif name == "-t": testmode = True if pflag: sys.exit(print_policy()) DATABASE.set_path(database_path) connection = DATABASE.connection() if testmode: sys.exit(test_settings(connection)) if settings: if DATABASE.readonly: sys.exit("ERROR: readonly database") sys.exit(update_settings(connection, settings)) sys.exit(print_settings(connection, database_path)) if __name__ == "__main__": main(sys.argv)
mopidy
httpclient
"""Helpers for configuring HTTP clients used in Mopidy extensions.""" from __future__ import annotations import platform from typing import TYPE_CHECKING, Optional import mopidy if TYPE_CHECKING: from mopidy.config import ProxyConfig def format_proxy(proxy_config: ProxyConfig, auth: bool = True) -> Optional[str]: """Convert a Mopidy proxy config to the commonly used proxy string format. Outputs ``scheme://host:port``, ``scheme://user:pass@host:port`` or :class:`None` depending on the proxy config provided. You can also opt out of getting the basic auth by setting ``auth`` to :class:`False`. .. versionadded:: 1.1 """ if not proxy_config.get("hostname"): return None scheme = proxy_config.get("scheme") or "http" username = proxy_config.get("username") password = proxy_config.get("password") hostname = proxy_config["hostname"] port = proxy_config.get("port") if not port or port < 0: port = 80 if username and password and auth: return f"{scheme}://{username}:{password}@{hostname}:{port}" return f"{scheme}://{hostname}:{port}" def format_user_agent(name: Optional[str] = None) -> str: """Construct a User-Agent suitable for use in client code. This will identify use by the provided ``name`` (which should be on the format ``dist_name/version``), Mopidy version and Python version. .. versionadded:: 1.1 """ parts = [ f"Mopidy/{mopidy.__version__}", f"{platform.python_implementation()}/{platform.python_version()}", ] if name: parts.insert(0, name) return " ".join(parts)
requests
adapters
# -*- coding: utf-8 -*- """ requests.adapters ~~~~~~~~~~~~~~~~~ This module contains the transport adapters that Requests uses to define and maintain connections. """ import socket from .auth import _basic_auth_str from .compat import basestring, urlparse from .cookies import extract_cookies_to_jar from .exceptions import ( ConnectionError, ConnectTimeout, ProxyError, ReadTimeout, RetryError, SSLError, ) from .models import Response from .packages.urllib3.exceptions import ConnectTimeoutError from .packages.urllib3.exceptions import HTTPError as _HTTPError from .packages.urllib3.exceptions import MaxRetryError, ProtocolError from .packages.urllib3.exceptions import ProxyError as _ProxyError from .packages.urllib3.exceptions import ReadTimeoutError, ResponseError from .packages.urllib3.exceptions import SSLError as _SSLError from .packages.urllib3.poolmanager import PoolManager, proxy_from_url from .packages.urllib3.response import HTTPResponse from .packages.urllib3.util import Timeout as TimeoutSauce from .packages.urllib3.util.retry import Retry from .structures import CaseInsensitiveDict from .utils import ( DEFAULT_CA_BUNDLE_PATH, get_auth_from_url, get_encoding_from_headers, prepend_scheme_if_needed, urldefragauth, ) DEFAULT_POOLBLOCK = False DEFAULT_POOLSIZE = 10 DEFAULT_RETRIES = 0 class BaseAdapter(object): """The Base Transport Adapter""" def __init__(self): super(BaseAdapter, self).__init__() def send(self): raise NotImplementedError def close(self): raise NotImplementedError class HTTPAdapter(BaseAdapter): """The built-in HTTP Adapter for urllib3. Provides a general-case interface for Requests sessions to contact HTTP and HTTPS urls by implementing the Transport Adapter interface. This class will usually be created by the :class:`Session <Session>` class under the covers. :param pool_connections: The number of urllib3 connection pools to cache. :param pool_maxsize: The maximum number of connections to save in the pool. :param int max_retries: The maximum number of retries each connection should attempt. Note, this applies only to failed DNS lookups, socket connections and connection timeouts, never to requests where data has made it to the server. By default, Requests does not retry failed connections. If you need granular control over the conditions under which we retry a request, import urllib3's ``Retry`` class and pass that instead. :param pool_block: Whether the connection pool should block for connections. Usage:: >>> import requests >>> s = requests.Session() >>> a = requests.adapters.HTTPAdapter(max_retries=3) >>> s.mount('http://', a) """ __attrs__ = [ "max_retries", "config", "_pool_connections", "_pool_maxsize", "_pool_block", ] def __init__( self, pool_connections=DEFAULT_POOLSIZE, pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES, pool_block=DEFAULT_POOLBLOCK, ): if max_retries == DEFAULT_RETRIES: self.max_retries = Retry(0, read=False) else: self.max_retries = Retry.from_int(max_retries) self.config = {} self.proxy_manager = {} super(HTTPAdapter, self).__init__() self._pool_connections = pool_connections self._pool_maxsize = pool_maxsize self._pool_block = pool_block self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block) def __getstate__(self): return dict((attr, getattr(self, attr, None)) for attr in self.__attrs__) def __setstate__(self, state): # Can't handle by adding 'proxy_manager' to self.__attrs__ because # because self.poolmanager uses a lambda function, which isn't pickleable. self.proxy_manager = {} self.config = {} for attr, value in state.items(): setattr(self, attr, value) self.init_poolmanager( self._pool_connections, self._pool_maxsize, block=self._pool_block ) def init_poolmanager( self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs ): """Initializes a urllib3 PoolManager. This method should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param connections: The number of urllib3 connection pools to cache. :param maxsize: The maximum number of connections to save in the pool. :param block: Block when no free connections are available. :param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager. """ # save these values for pickling self._pool_connections = connections self._pool_maxsize = maxsize self._pool_block = block self.poolmanager = PoolManager( num_pools=connections, maxsize=maxsize, block=block, strict=True, **pool_kwargs, ) def proxy_manager_for(self, proxy, **proxy_kwargs): """Return urllib3 ProxyManager for the given proxy. This method should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param proxy: The proxy to return a urllib3 ProxyManager for. :param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager. :returns: ProxyManager """ if not proxy in self.proxy_manager: proxy_headers = self.proxy_headers(proxy) self.proxy_manager[proxy] = proxy_from_url( proxy, proxy_headers=proxy_headers, num_pools=self._pool_connections, maxsize=self._pool_maxsize, block=self._pool_block, **proxy_kwargs, ) return self.proxy_manager[proxy] def cert_verify(self, conn, url, verify, cert): """Verify a SSL certificate. This method should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param conn: The urllib3 connection object associated with the cert. :param url: The requested URL. :param verify: Whether we should actually verify the certificate. :param cert: The SSL certificate to verify. """ if url.lower().startswith("https") and verify: cert_loc = None # Allow self-specified cert location. if verify is not True: cert_loc = verify if not cert_loc: cert_loc = DEFAULT_CA_BUNDLE_PATH if not cert_loc: raise Exception("Could not find a suitable SSL CA certificate bundle.") conn.cert_reqs = "CERT_REQUIRED" conn.ca_certs = cert_loc else: conn.cert_reqs = "CERT_NONE" conn.ca_certs = None if cert: if not isinstance(cert, basestring): conn.cert_file = cert[0] conn.key_file = cert[1] else: conn.cert_file = cert def build_response(self, req, resp): """Builds a :class:`Response <requests.Response>` object from a urllib3 response. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>` :param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response. :param resp: The urllib3 response object. """ response = Response() # Fallback to None if there's no status_code, for whatever reason. response.status_code = getattr(resp, "status", None) # Make headers case-insensitive. response.headers = CaseInsensitiveDict(getattr(resp, "headers", {})) # Set encoding. response.encoding = get_encoding_from_headers(response.headers) response.raw = resp response.reason = response.raw.reason if isinstance(req.url, bytes): response.url = req.url.decode("utf-8") else: response.url = req.url # Add new cookies from the server. extract_cookies_to_jar(response.cookies, req, resp) # Give the Response some context. response.request = req response.connection = self return response def get_connection(self, url, proxies=None): """Returns a urllib3 connection for the given URL. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param url: The URL to connect to. :param proxies: (optional) A Requests-style dictionary of proxies used on this request. """ proxies = proxies or {} proxy = proxies.get(urlparse(url.lower()).scheme) if proxy: proxy = prepend_scheme_if_needed(proxy, "http") proxy_manager = self.proxy_manager_for(proxy) conn = proxy_manager.connection_from_url(url) else: # Only scheme should be lower case parsed = urlparse(url) url = parsed.geturl() conn = self.poolmanager.connection_from_url(url) return conn def close(self): """Disposes of any internal state. Currently, this just closes the PoolManager, which closes pooled connections. """ self.poolmanager.clear() def request_url(self, request, proxies): """Obtain the url to use when making the final request. If the message is being sent through a HTTP proxy, the full URL has to be used. Otherwise, we should only use the path portion of the URL. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param request: The :class:`PreparedRequest <PreparedRequest>` being sent. :param proxies: A dictionary of schemes to proxy URLs. """ proxies = proxies or {} scheme = urlparse(request.url).scheme proxy = proxies.get(scheme) if proxy and scheme != "https": url = urldefragauth(request.url) else: url = request.path_url return url def add_headers(self, request, **kwargs): """Add any headers needed by the connection. As of v2.0 this does nothing by default, but is left for overriding by users that subclass the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to. :param kwargs: The keyword arguments from the call to send(). """ pass def proxy_headers(self, proxy): """Returns a dictionary of the headers to add to any request sent through a proxy. This works with urllib3 magic to ensure that they are correctly sent to the proxy, rather than in a tunnelled request if CONNECT is being used. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param proxies: The url of the proxy being used for this request. :param kwargs: Optional additional keyword arguments. """ headers = {} username, password = get_auth_from_url(proxy) if username and password: headers["Proxy-Authorization"] = _basic_auth_str(username, password) return headers def send( self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None ): """Sends PreparedRequest object. Returns Response object. :param request: The :class:`PreparedRequest <PreparedRequest>` being sent. :param stream: (optional) Whether to stream the request content. :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a (`connect timeout, read timeout <user/advanced.html#timeouts>`_) tuple. :type timeout: float or tuple :param verify: (optional) Whether to verify SSL certificates. :param cert: (optional) Any user-provided SSL certificate to be trusted. :param proxies: (optional) The proxies dictionary to apply to the request. """ conn = self.get_connection(request.url, proxies) self.cert_verify(conn, request.url, verify, cert) url = self.request_url(request, proxies) self.add_headers(request) chunked = not (request.body is None or "Content-Length" in request.headers) if isinstance(timeout, tuple): try: connect, read = timeout timeout = TimeoutSauce(connect=connect, read=read) except ValueError as e: # this may raise a string formatting error. err = ( "Invalid timeout {0}. Pass a (connect, read) " "timeout tuple, or a single float to set " "both timeouts to the same value".format(timeout) ) raise ValueError(err) else: timeout = TimeoutSauce(connect=timeout, read=timeout) try: if not chunked: resp = conn.urlopen( method=request.method, url=url, body=request.body, headers=request.headers, redirect=False, assert_same_host=False, preload_content=False, decode_content=False, retries=self.max_retries, timeout=timeout, ) # Send the request. else: if hasattr(conn, "proxy_pool"): conn = conn.proxy_pool low_conn = conn._get_conn(timeout=timeout) try: low_conn.putrequest(request.method, url, skip_accept_encoding=True) for header, value in request.headers.items(): low_conn.putheader(header, value) low_conn.endheaders() for i in request.body: low_conn.send(hex(len(i))[2:].encode("utf-8")) low_conn.send(b"\r\n") low_conn.send(i) low_conn.send(b"\r\n") low_conn.send(b"0\r\n\r\n") r = low_conn.getresponse() resp = HTTPResponse.from_httplib( r, pool=conn, connection=low_conn, preload_content=False, decode_content=False, ) except: # If we hit any problems here, clean up the connection. # Then, reraise so that we can handle the actual exception. low_conn.close() raise else: # All is well, return the connection to the pool. conn._put_conn(low_conn) except (ProtocolError, socket.error) as err: raise ConnectionError(err, request=request) except MaxRetryError as e: if isinstance(e.reason, ConnectTimeoutError): raise ConnectTimeout(e, request=request) if isinstance(e.reason, ResponseError): raise RetryError(e, request=request) raise ConnectionError(e, request=request) except _ProxyError as e: raise ProxyError(e) except (_SSLError, _HTTPError) as e: if isinstance(e, _SSLError): raise SSLError(e, request=request) elif isinstance(e, ReadTimeoutError): raise ReadTimeout(e, request=request) else: raise return self.build_response(request, resp)
ui
infostatus
# -*- coding: utf-8 -*- # # Picard, the next-generation MusicBrainz tagger # # Copyright (C) 2013, 2018, 2020-2022 Laurent Monin # Copyright (C) 2016-2017 Sambhav Kothari # Copyright (C) 2019, 2021-2022 Philipp Wolfer # Copyright (C) 2021 Gabriel Ferreira # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. import time from picard.ui.ui_infostatus import Ui_InfoStatus from picard.util import icontheme from picard.util.time import get_timestamp from PyQt5 import QtCore, QtGui, QtWidgets class InfoStatus(QtWidgets.QWidget, Ui_InfoStatus): def __init__(self, parent): QtWidgets.QWidget.__init__(self, parent) Ui_InfoStatus.__init__(self) self.setupUi(self) self._size = QtCore.QSize(16, 16) self._create_icons() self._init_labels() self.reset_counters() def _init_labels(self): size = self._size self.label1.setPixmap(self.icon_eta.pixmap(size)) self.label1.hide() self.label2.setPixmap(self.icon_file.pixmap(size)) self.label3.setPixmap(self.icon_cd.pixmap(size)) self.label4.setPixmap(self.icon_file_pending.pixmap(size)) self.label5.setPixmap( self.icon_download.pixmap(size, QtGui.QIcon.Mode.Disabled) ) self._init_tooltips() def _create_icons(self): self.icon_eta = QtGui.QIcon(":/images/22x22/hourglass.png") self.icon_cd = icontheme.lookup("media-optical") self.icon_file = QtGui.QIcon(":/images/file.png") self.icon_file_pending = QtGui.QIcon(":/images/file-pending.png") self.icon_download = QtGui.QIcon(":/images/16x16/action-go-down-16.png") def _init_tooltips(self): t1 = _("Estimated Time") t2 = _("Files") t3 = _("Albums") t4 = _("Pending files") t5 = _("Pending requests") self.val1.setToolTip(t1) self.label1.setToolTip(t1) self.val2.setToolTip(t2) self.label2.setToolTip(t2) self.val3.setToolTip(t3) self.label3.setToolTip(t3) self.val4.setToolTip(t4) self.label4.setToolTip(t4) self.val5.setToolTip(t5) self.label5.setToolTip(t5) def update( self, files=0, albums=0, pending_files=0, pending_requests=0, progress=0 ): self.set_files(files) self.set_albums(albums) self.set_pending_files(pending_files) self.set_pending_requests(pending_requests) # estimate eta total_pending = pending_files + pending_requests last_pending = self._last_pending_files + self._last_pending_requests # Reset the counters if we had no pending progress before and receive new pending items. # This resets the starting timestamp and starts a new round of measurement. if total_pending > 0 and last_pending == 0: self.reset_counters() previous_done_files = max(0, self._max_pending_files - self._last_pending_files) previous_done_requests = max( 0, self._max_pending_requests - self._last_pending_requests ) self._max_pending_files = max( self._max_pending_files, previous_done_files + pending_files ) self._max_pending_requests = max( self._max_pending_requests, previous_done_requests + pending_requests ) self._last_pending_files = pending_files self._last_pending_requests = pending_requests if total_pending == 0 or ( self._max_pending_files + self._max_pending_requests <= 1 ): self.reset_counters() self.hide_eta() return if total_pending != last_pending: current_time = time.time() # time since we started processing this batch diff_time = max( 0.1, current_time - self._prev_time ) # denominator can't be 0 previous_done_files = max(1, previous_done_files) # denominator can't be 0 # we estimate based on the time per file * number of pending files + 1 second per additional request file_eta_seconds = ( diff_time / previous_done_files ) * pending_files + pending_requests # we assume additional network requests based on the ratio of requests/files * pending files # to estimate an upper bound (e.g. fetch cover, lookup, scan) network_eta_seconds = ( pending_requests + (previous_done_requests / previous_done_files) * pending_files ) # general eta (biased towards whatever takes longer) eta_seconds = max(network_eta_seconds, file_eta_seconds) # estimate progress self._last_progress = diff_time / (diff_time + eta_seconds) self.set_eta(eta_seconds) def reset_counters(self): self._last_progress = 0 self._max_pending_requests = 0 self._last_pending_requests = 0 self._max_pending_files = 0 self._last_pending_files = 0 self._prev_time = time.time() def get_progress(self): return self._last_progress def set_eta(self, eta_seconds): if eta_seconds > 0: self.val1.setText(get_timestamp(eta_seconds)) self.val1.show() self.label1.show() else: self.hide_eta() def hide_eta(self): self.val1.hide() self.label1.hide() def set_files(self, num): self.val2.setText(str(num)) def set_albums(self, num): self.val3.setText(str(num)) def set_pending_files(self, num): self.val4.setText(str(num)) def set_pending_requests(self, num): if num <= 0: enabled = QtGui.QIcon.Mode.Disabled else: enabled = QtGui.QIcon.Mode.Normal self.label5.setPixmap(self.icon_download.pixmap(self._size, enabled)) self.val5.setText(str(num))
qt
webkit
# pylint: disable=wildcard-import """Wrapped Qt imports for Qt WebKit. All code in qutebrowser should use this module instead of importing from PyQt/PySide directly. This allows supporting both Qt 5 and Qt 6 (though WebKit is only supported with Qt 5). See machinery.py for details on how Qt wrapper selection works. Any API exported from this module is based on the QtWebKit 5.212 API: https://qtwebkit.github.io/doc/qtwebkit/qtwebkit-index.html """ import typing from qutebrowser.qt import machinery machinery.init_implicit() if machinery.USE_PYSIDE6: # pylint: disable=no-else-raise raise machinery.Unavailable() elif machinery.USE_PYQT5 or typing.TYPE_CHECKING: # If we use mypy (even on Qt 6), we pretend to have WebKit available. # This avoids central API (like BrowserTab) being Any because the webkit part of # the unions there is missing. # This causes various issues inside browser/webkit/, but we ignore those in # .mypy.ini because we don't really care much about QtWebKit anymore. from PyQt5.QtWebKit import * elif machinery.USE_PYQT6: raise machinery.Unavailable() else: raise machinery.UnknownWrapper()
classes
waveform
""" @file @brief This file has code to generate audio waveform data structures @author Jonathan Thomas <jonathan@openshot.org> @section LICENSE Copyright (c) 2008-2018 OpenShot Studios, LLC (http://www.openshotstudios.com). This file is part of OpenShot Video Editor (http://www.openshot.org), an open-source project dedicated to delivering high quality video editing and animation solutions to the world. OpenShot Video Editor is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. OpenShot Video Editor is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with OpenShot Library. If not, see <http://www.gnu.org/licenses/>. """ import threading import uuid import openshot from classes.app import get_app from classes.logger import log from classes.query import Clip, File from PyQt5.QtCore import Qt from PyQt5.QtGui import QCursor # Get settings s = get_app().get_settings() # resolution of audio waveform SAMPLES_PER_SECOND = 20 def get_audio_data(files: dict, transaction_id=None): """Get a Clip object form libopenshot, and grab audio data For for the given files and clips, start threads to gather audio data. arg1: a dict of clip_ids grouped by their file_id """ for file_id in files: clip_list = files[file_id] log.info("Clip loaded, start thread") t = threading.Thread( target=get_waveform_thread, args=[file_id, clip_list, transaction_id], daemon=True, ) t.start() def get_waveform_thread(file_id, clip_list, transaction_id): """ For the given file ID and clip IDs, update audio data. arg1: file id to get the audio data of. arg2: list of clips to update when the audio data is ready. arg3: tid: transaction id to group waveform saves together """ def getAudioData(file, channel=-1, tid=None): """ Update the file query object with audio data (if found). """ # Ensure that UI attribute exists file_data = file.data file_audio_data = file_data.get("ui", {}).get("audio_data", []) if file_audio_data and channel == -1: log.info("Audio Data already retrieved (or being retrieved).") return # Open file and access audio data (if audio data is found, otherwise return) temp_clip = openshot.Clip(file_data["path"]) if temp_clip.Reader().info.has_audio == False: log.info(f"file: {file_data['path']} has no audio_data. Skipping") return # Show waiting cursor get_app().setOverrideCursor(QCursor(Qt.WaitCursor)) # Extract audio waveform data (for all channels) # Use max RMS (root mean squared) value for each sample # NOTE: we also have the average RMS value calculated, although we do # not use it yet waveformer = openshot.AudioWaveformer(temp_clip.Reader()) file_audio_data = waveformer.ExtractSamples(channel, SAMPLES_PER_SECOND, True) samples_vectors = file_audio_data.vectors() max_samples_vector = samples_vectors[0] # max sample value dataset rms_samples_vector = samples_vectors[1] # average RMS sample value dataset # Clear data file_audio_data.clear() # Update file with audio data (only if all channels requested) if channel == -1: get_app().window.timeline.fileAudioDataReady.emit( file.id, {"ui": {"audio_data": max_samples_vector}}, tid ) # Restore cursor get_app().restoreOverrideCursor() # Return audio sample dataset return max_samples_vector # Get file query object file = File.get(id=file_id) # Only generate audio for readers that actually contain audio if not file.data.get("has_audio", False): log.info("File does not have audio. Skipping") return # Transaction id to group all deletes together if transaction_id: tid = transaction_id else: tid = str(uuid.uuid4()) # If the file doesn't have audio data, generate it. # A pending audio_data process will have audio_data == [-999] file_audio_data = file.data.get("ui", {}).get("audio_data", []) if not file_audio_data: log.debug("Generating audio data for file %s" % file.id) # Save empty 'audio_data' property before we get audio samples get_app().window.timeline.fileAudioDataReady.emit( file.id, {"ui": {"audio_data": None}}, tid ) # Generate audio data for a specific file file_audio_data = getAudioData(file, tid=tid) if not file_audio_data: log.info("No audio data found. Aborting") return log.debug("Audio data found for file: %s" % file.data.get("path")) # Loop through each selected clip (which uses this file) for clip_id in clip_list: clip = Clip.get(id=clip_id) if not clip: # Ignore null clip log.debug(f"No clip found for ID: {clip_id}. Skipping waveform generation.") continue # Check for channel mapping and filters channel_filter = int( clip.data.get("channel_filter", {}) .get("Points", [])[0] .get("co", {}) .get("Y", -1) ) if channel_filter != -1: # Some kind of filtering is happening, so we need to re-generate waveform data for this clip file_audio_data = getAudioData(file, channel_filter, tid=tid) # Get File's audio data (since it has changed) if not file_audio_data: log.info("File has no audio, so we cannot find any waveform audio data") continue # Save empty 'audio_data' property before we get audio samples get_app().window.timeline.clipAudioDataReady.emit( clip.id, {"ui": {"audio_data": None}}, tid ) # Loop through samples from the file, applying this clip's volume curve clip_audio_data = [] clip_instance = get_app().window.timeline_sync.timeline.GetClip(clip.id) num_frames = clip_instance.info.video_length # Determine best guess # of samples (based on duration) # We don't want to use the len(file_audio_data) due to padding at EOF # from libopenshot sample_count = round(clip_instance.info.duration * SAMPLES_PER_SECOND) # Determine sample ratio to FPS sample_ratio = float(sample_count / num_frames) # Loop through file samples and adjust time/volume values # Copy adjusted samples into clip data for sample_index in range(sample_count): frame_num = round(sample_index / sample_ratio) + 1 volume = clip_instance.volume.GetValue(frame_num) if clip_instance.time.GetCount() > 1: # Override sample # using time curve (if set) # Don't exceed array size sample_index = min( round(clip_instance.time.GetValue(frame_num) * sample_ratio), sample_count - 1, ) clip_audio_data.append(file_audio_data[sample_index] * volume) # Save this data to the clip object get_app().window.timeline.clipAudioDataReady.emit( clip.id, {"ui": {"audio_data": clip_audio_data}}, tid )
cli
translation
# encoding: utf-8 from __future__ import annotations import logging import os import re from typing import Any, cast import click import polib from ckan.common import config from ckan.lib.i18n import build_js_translations ckan_path = os.path.join(os.path.dirname(__file__), "..") log = logging.getLogger(__name__) @click.group(name="translation", short_help="Translation management") def translation(): pass @translation.command("js", short_help="Generate the javascript translations.") def js(): build_js_translations() click.secho("JS translation build: SUCCESS", fg="green", bold=True) @translation.command("mangle", short_help="Mangle the zh_TW translations for testing.") def mangle(): """This will mangle the zh_TW translations for translation coverage testing. NOTE: This will destroy the current translations fot zh_TW """ i18n_path = get_i18n_path() pot_path = os.path.join(i18n_path, "ckan.pot") po = polib.pofile(pot_path) # we don't want to mangle the following items in strings # %(...)s %s %0.3f %1$s %2$0.3f [1:...] {...} etc # sprintf bit after % spf_reg_ex = "\\+?(0|'.)?-?\\d*(.\\d*)?[\\%bcdeufosxX]" extract_reg_ex = ( "(\\%\\([^\\)]*\\)" + spf_reg_ex + "|\\[\\d*\\:[^\\]]*\\]" + "|\\{[^\\}]*\\}" + "|<[^>}]*>" + "|\\%((\\d)*\\$)?" + spf_reg_ex + ")" ) for entry in po: msg = entry.msgid matches = re.finditer(extract_reg_ex, msg) length = len(msg) position = 0 translation = "" for match in matches: translation += "-" * (match.start() - position) position = match.end() translation += match.group(0) translation += "-" * (length - position) entry.msgstr = translation out_dir = os.path.join(i18n_path, "zh_TW", "LC_MESSAGES") try: os.makedirs(out_dir) except OSError: pass po.metadata["Plural-Forms"] = "nplurals=1; plural=0\n" out_po = os.path.join(out_dir, "ckan.po") out_mo = os.path.join(out_dir, "ckan.mo") po.save(out_po) po.save_as_mofile(out_mo) click.secho("zh_TW has been mangled", fg="green", bold=True) @translation.command("check-po", short_help="Check po files for common mistakes") @click.argument("files", nargs=-1, type=click.Path(exists=True)) def check_po(files: list[str]): for file in files: errors = check_po_file(file) for msgid, msgstr in errors: click.echo("Format specifiers don't match:") click.echo("\t{} -> {}".format(msgid, msgstr.encode("ascii", "replace"))) @translation.command( "sync-msgids", short_help="Update the msgids on the po files " "with the ones on the pot file", ) @click.argument("files", nargs=-1, type=click.Path(exists=True)) def sync_po_msgids(files: list[str]): i18n_path = get_i18n_path() pot_path = os.path.join(i18n_path, "ckan.pot") po = polib.pofile(pot_path) entries_to_change = {} for entry in po.untranslated_entries(): entries_to_change[normalize_string(entry.msgid)] = entry.msgid for path in files: sync_po_file_msgids(entries_to_change, path) def normalize_string(s: str): return re.sub(r"\s\s+", " ", s).strip() def sync_po_file_msgids(entries_to_change: dict[str, Any], path: str): po = polib.pofile(path) cnt = 0 for entry in po.translated_entries() + po.untranslated_entries(): normalized = normalize_string(entry.msgid) if ( normalized in entries_to_change and entry.msgid != entries_to_change[normalized] ): entry.msgid = entries_to_change[normalized] cnt += 1 po.save() click.echo("Entries updated in {} file: {}".format(po.metadata["Language"], cnt)) def get_i18n_path() -> str: return config.get("ckan.i18n_directory") or os.path.join(ckan_path, "i18n") def simple_conv_specs(s: str): """Return the simple Python string conversion specifiers in the string s. e.g. ['%s', '%i'] See http://docs.python.org/library/stdtypes.html#string-formatting """ simple_conv_specs_re = re.compile("\\%\\w") return simple_conv_specs_re.findall(s) def mapping_keys(s: str): """Return a sorted list of the mapping keys in the string s. e.g. ['%(name)s', '%(age)i'] See http://docs.python.org/library/stdtypes.html#string-formatting """ mapping_keys_re = re.compile("\\%\\([^\\)]*\\)\\w") return sorted(mapping_keys_re.findall(s)) def replacement_fields(s: str): """Return a sorted list of the Python replacement fields in the string s. e.g. ['{}', '{2}', '{object}', '{target}'] See http://docs.python.org/library/string.html#formatstrings """ repl_fields_re = re.compile("\\{[^\\}]*\\}") return sorted(repl_fields_re.findall(s)) def check_translation(validator: Any, msgid: str, msgstr: str): if not validator(msgid) == validator(msgstr): return msgid, msgstr def check_po_file(path: str): errors: list[tuple[str, str]] = [] po = polib.pofile(path) for entry in po.translated_entries(): if entry.msgid_plural and entry.msgstr_plural: for function in (simple_conv_specs, mapping_keys, replacement_fields): # typechecker thinks it's a list of strings plurals = cast("dict[str, str]", entry.msgstr_plural) for key in plurals.keys(): if key == "0": error = check_translation(function, entry.msgid, plurals[key]) else: error = check_translation( function, entry.msgid_plural, plurals[key] ) if error: errors.append(error) elif entry.msgstr: for function in (simple_conv_specs, mapping_keys, replacement_fields): error = check_translation(function, entry.msgid, entry.msgstr) if error: errors.append(error) return errors
zeromq
qa_zeromq_sub
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2014 Free Software Foundation, Inc. # # This file is part of GNU Radio # # SPDX-License-Identifier: GPL-3.0-or-later # # import time import numpy import zmq from gnuradio import blocks, eng_notation, gr, gr_unittest, zeromq class qa_zeromq_sub(gr_unittest.TestCase): def setUp(self): self.tb = gr.top_block() self.zmq_context = zmq.Context() self.pub_socket = self.zmq_context.socket(zmq.PUB) self.pub_socket.bind("tcp://127.0.0.1:0") self._address = self.pub_socket.getsockopt(zmq.LAST_ENDPOINT).decode() def tearDown(self): self.pub_socket.close() self.zmq_context.term() self.tb = None def test_001(self): vlen = 10 src_data = numpy.array(list(range(vlen)) * 100, "float32") zeromq_sub_source = zeromq.sub_source(gr.sizeof_float, vlen, self._address) sink = blocks.vector_sink_f(vlen) self.tb.connect(zeromq_sub_source, sink) self.tb.start() time.sleep(0.05) self.pub_socket.send(src_data.tostring()) time.sleep(0.5) self.tb.stop() self.tb.wait() self.assertFloatTuplesAlmostEqual(sink.data(), src_data) def test_002(self): vlen = 10 # Construct multipart source data to publish raw_data = [ numpy.array(range(vlen), "float32") * 100, numpy.array(range(vlen, 2 * vlen), "float32") * 100, ] src_data = [a.tostring() for a in raw_data] zeromq_sub_source = zeromq.sub_source(gr.sizeof_float, vlen, self._address) sink = blocks.vector_sink_f(vlen) self.tb.connect(zeromq_sub_source, sink) self.tb.start() time.sleep(0.05) self.pub_socket.send_multipart(src_data) time.sleep(0.5) self.tb.stop() self.tb.wait() # Source block will concatenate everything together expected_data = numpy.concatenate(raw_data) self.assertFloatTuplesAlmostEqual(sink.data(), expected_data) def test_003(self): # Check that message is received when correct key is used # Construct multipart source data to publish vlen = 10 raw_data = [ numpy.array(range(vlen), "float32") * 100, numpy.array(range(vlen, 2 * vlen), "float32") * 100, ] src_data = [a.tostring() for a in raw_data] src_data = [b"filter_key"] + src_data zeromq_sub_source = zeromq.sub_source( gr.sizeof_float, vlen, self._address, key="filter_key" ) sink = blocks.vector_sink_f(vlen) self.tb.connect(zeromq_sub_source, sink) self.tb.start() time.sleep(0.05) self.pub_socket.send_multipart(src_data) time.sleep(0.5) self.tb.stop() self.tb.wait() # Source block will concatenate everything together expected_data = numpy.concatenate(raw_data) self.assertFloatTuplesAlmostEqual(sink.data(), expected_data) def test_004(self): # Test that no message is received when wrong key is used vlen = 10 raw_data = [ numpy.array(range(vlen), "float32") * 100, numpy.array(range(vlen, 2 * vlen), "float32") * 100, ] src_data = [a.tostring() for a in raw_data] src_data = [b"filter_key"] + src_data zeromq_sub_source = zeromq.sub_source( gr.sizeof_float, vlen, self._address, key="wrong_filter_key" ) sink = blocks.vector_sink_f(vlen) self.tb.connect(zeromq_sub_source, sink) self.tb.start() time.sleep(0.05) self.pub_socket.send_multipart(src_data) time.sleep(0.5) self.tb.stop() self.tb.wait() assert len(sink.data()) == 0 if __name__ == "__main__": gr_unittest.run(qa_zeromq_sub)
tools
util_functions
# # Copyright 2013, 2018, 2019 Free Software Foundation, Inc. # # This file is part of GNU Radio # # SPDX-License-Identifier: GPL-3.0-or-later # # """ Utility functions for gr_modtool """ import re import sys try: import readline have_readline = True except ImportError: have_readline = False # None of these must depend on other modtool stuff! def append_re_line_sequence(filename, linepattern, newline): """Detects the re 'linepattern' in the file. After its last occurrence, paste 'newline'. If the pattern does not exist, append the new line to the file. Then, write.""" with open(filename, "r") as f: oldfile = f.read() lines = re.findall(linepattern, oldfile, flags=re.MULTILINE) if len(lines) == 0: with open(filename, "a") as f: f.write(newline) return last_line = lines[-1] newfile = oldfile.replace(last_line, last_line + newline + "\n") with open(filename, "w") as f: f.write(newfile) def remove_pattern_from_file(filename, pattern): """Remove all occurrences of a given pattern from a file.""" with open(filename, "r") as f: oldfile = f.read() pattern = re.compile(pattern, re.MULTILINE) with open(filename, "w") as f: f.write(pattern.sub("", oldfile)) def str_to_fancyc_comment(text): """Return a string as a C formatted comment.""" l_lines = text.splitlines() if len(l_lines[0]) == 0: outstr = "/*\n" else: outstr = "/* " + l_lines[0] + "\n" for line in l_lines[1:]: if len(line) == 0: outstr += " *\n" else: outstr += " * " + line + "\n" outstr += " */\n" return outstr def str_to_python_comment(text): """Return a string as a Python formatted comment.""" l_lines = text.splitlines() if len(l_lines[0]) == 0: outstr = "#\n" else: outstr = "# " + l_lines[0] + "\n" for line in l_lines[1:]: if len(line) == 0: outstr += "#\n" else: outstr += "# " + line + "\n" outstr += "#\n" return outstr def strip_default_values(string): """Strip default values from a C++ argument list.""" return re.sub(" *=[^,)]*", "", string) def strip_arg_types(string): """ " Strip the argument types from a list of arguments. Example: "int arg1, double arg2" -> "arg1, arg2" Note that some types have qualifiers, which also are part of the type, e.g. "const std::string &name" -> "name", or "const char *str" -> "str". """ string = strip_default_values(string) return ( ", ".join([part.strip().split(" ")[-1] for part in string.split(",")]) .replace("*", "") .replace("&", "") ) def strip_arg_types_grc(string): """ " Strip the argument types from a list of arguments for GRC make tag. Example: "int arg1, double arg2" -> "$arg1, $arg2" """ if len(string) == 0: return "" else: string = strip_default_values(string) return ", ".join( ["${" + part.strip().split(" ")[-1] + "}" for part in string.split(",")] ) def get_modname(): """Grep the current module's name from gnuradio.project or CMakeLists.txt""" modname_trans = {"howto-write-a-block": "howto"} try: with open("gnuradio.project", "r") as f: prfile = f.read() regexp = r"projectname\s*=\s*([a-zA-Z0-9-_]+)$" return re.search(regexp, prfile, flags=re.MULTILINE).group(1).strip() except IOError: pass # OK, there's no gnuradio.project. So, we need to guess. with open("CMakeLists.txt", "r") as f: cmfile = f.read() regexp = r'(project\s*\(\s*|GR_REGISTER_COMPONENT\(")gr-(?P<modname>[a-zA-Z0-9-_]+)(\s*(CXX)?|" ENABLE)' try: modname = re.search(regexp, cmfile, flags=re.MULTILINE).group("modname").strip() if modname in list(modname_trans.keys()): modname = modname_trans[modname] return modname except AttributeError: return None def get_block_names(pattern, modname): """Return a list of block names belonging to modname that matches the regex pattern.""" blocknames = [] reg = re.compile(pattern) fname_re = re.compile(r"[a-zA-Z]\w+\.\w{1,5}$") with open(f"include/gnuradio/{modname}/CMakeLists.txt", "r") as f: for line in f.read().splitlines(): if len(line.strip()) == 0 or line.strip()[0] == "#": continue for word in re.split("[ /)(\t\n\r\f\v]", line): if fname_re.match(word) and reg.search(word): blocknames.append(word.strip(".h")) return blocknames def is_number(s): """Return True if the string s contains a number.""" try: float(s) return True except ValueError: return False def ask_yes_no(question, default): """Asks a binary question. Returns True for yes, False for no. default is given as a boolean.""" question += {True: " [Y/n] ", False: " [y/N] "}[default] if input(question).lower() != {True: "n", False: "y"}[default]: return default else: return not default class SequenceCompleter(object): """A simple completer function wrapper to be used with readline, e.g. option_iterable = ("search", "seek", "destroy") readline.set_completer(SequenceCompleter(option_iterable).completefunc) Typical usage is with the `with` statement. Restores the previous completer at exit, thus nestable. """ def __init__(self, sequence=None): self._seq = sequence or [] self._tmp_matches = [] def completefunc(self, text, state): if not text and state < len(self._seq): return self._seq[state] if not state: self._tmp_matches = [ candidate for candidate in self._seq if candidate.startswith(text) ] if state < len(self._tmp_matches): return self._tmp_matches[state] def __enter__(self): if have_readline: self._old_completer = readline.get_completer() readline.set_completer(self.completefunc) readline.parse_and_bind("tab: complete") def __exit__(self, exception_type, exception_value, traceback): if have_readline: readline.set_completer(self._old_completer)
migrations
0002_squashed_initial
# Generated by Django 3.2.5 on 2022-05-31 14:46 import django.db.models.deletion import django_migration_linter as linter from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ("user_management", "0001_squashed_initial"), ("slack", "0001_squashed_initial"), ] operations = [ linter.IgnoreMigration(), migrations.AddField( model_name="slackmessage", name="organization", field=models.ForeignKey( default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name="slack_message", to="user_management.organization", ), ), migrations.AddField( model_name="slackchannel", name="slack_team_identity", field=models.ForeignKey( default=None, null=True, on_delete=django.db.models.deletion.PROTECT, related_name="cached_channels", to="slack.slackteamidentity", ), ), migrations.AddField( model_name="slackactionrecord", name="organization", field=models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, related_name="actions", to="user_management.organization", ), ), migrations.AddField( model_name="slackactionrecord", name="user", field=models.ForeignKey( default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="actions", to="user_management.user", ), ), migrations.AddConstraint( model_name="slackuseridentity", constraint=models.UniqueConstraint( fields=("slack_id", "slack_team_identity", "counter"), name="unique_slack_identity_per_team", ), ), migrations.AlterUniqueTogether( name="slackusergroup", unique_together={("slack_id", "slack_team_identity")}, ), migrations.AddConstraint( model_name="slackmessage", constraint=models.UniqueConstraint( fields=("slack_id", "channel_id", "_slack_team_identity"), name="unique slack_id", ), ), migrations.AlterUniqueTogether( name="slackchannel", unique_together={("slack_id", "slack_team_identity")}, ), ]
PyObjCTest
test_NULL
import objc from PyObjCTools.TestSupport import * setSignature = objc.setSignatureForSelector # setSignature(b"OCTestNULL", "callOut:", "i@:o^i") # setSignature(b"OCTestNULL", "callList:andInOut2:", "i@:@^i") # setSignature(b"OCTestNULL", "callList:andInOut:", "i@:@N^i") # setSignature(b"OCTestNULL", "callList:andIn:", "i@:@n^i") # setSignature(b"OCTestNULL", "callList:andOut:", "i@:@o^i") # setSignature(b"OCTestNULL", "on:callList:andInOut:", "i@:@@N^i") # setSignature(b"OCTestNULL", "on:callList:andIn:", "i@:@@n^i") # setSignature(b"OCTestNULL", "on:callList:andOut:", "i@:@@N^i") # 'N' is by design # setSignature(b"OCTestNULL", "on:callOut:", "v@:@N^i") # 'N' is by design objc.registerMetaDataForSelector( b"OCTestNULL", b"callOut:", dict( arguments={ 2: dict(type_modifier=b"o", null_accepted=True), }, ), ) objc.registerMetaDataForSelector( b"OCTestNULL", b"callList:andInOut2:", dict( arguments={ 3: dict(type_modifier=b"o", null_accepted=True), }, ), ) objc.registerMetaDataForSelector( b"OCTestNULL", b"callList:andInOut:", dict( arguments={ 3: dict(type_modifier=b"N", null_accepted=True), }, ), ) objc.registerMetaDataForSelector( b"OCTestNULL", b"callList:andIn:", dict( arguments={ 3: dict(type_modifier=b"n", null_accepted=True), }, ), ) objc.registerMetaDataForSelector( b"OCTestNULL", b"callList:andOut:", dict( arguments={ 3: dict(type_modifier=b"o", null_accepted=True), }, ), ) objc.registerMetaDataForSelector( b"OCTestNULL", b"on:callList:andInOut:", dict( arguments={ 4: dict(type_modifier=b"N", null_accepted=True), }, ), ) objc.registerMetaDataForSelector( b"OCTestNULL", b"on:callList:andIn:", dict( arguments={ 4: dict(type_modifier=b"n", null_accepted=True), }, ), ) objc.registerMetaDataForSelector( b"OCTestNULL", b"on:callList:andOut:", dict( arguments={ 4: dict(type_modifier=b"N", null_accepted=True), # N is by design }, ), ) objc.registerMetaDataForSelector( b"OCTestNULL", b"on:callOut:", dict( arguments={ 3: dict(type_modifier=b"N", null_accepted=True), # N is by design }, ), ) from PyObjCTest.NULL import * class TestNULL(TestCase): def testNULL(self): self.assertHasAttr(objc, "NULL") self.assertEqual(repr(objc.NULL), "objc.NULL") self.assertRaises(TypeError, type(objc.NULL)) class TestNullArgumentsHelper(objc.lookUpClass("NSObject")): def callList_andInOut_(self, lst, value): lst.append(str(value)) if value is objc.NULL: return (13, value) else: return (13, value * 2) callList_andInOut_ = objc.selector(callList_andInOut_, signature=b"i@:@N^i") def callList_andInOut2_(self, lst, value): lst.append(repr(value)) if value is objc.NULL: return 29 else: return 29 callList_andInOut2_ = objc.selector(callList_andInOut2_, signature=b"i@:@^i") def callList_andIn_(self, lst, value): lst.append(repr(value)) return 26 callList_andIn_ = objc.selector(callList_andIn_, signature=b"i@:@n^i") def callList_andOut_(self, lst, value): assert value is None or value is objc.NULL lst.append("Nothing here") return (27, 99) callList_andOut_ = objc.selector(callList_andOut_, signature=b"i@:@o^i") def callOut_(self, value): assert value is None or value is objc.NULL return 441 callOut_ = objc.selector(callOut_, signature=b"v@:o^i") class TestNULLArguments(TestCase): def testCallInOutNULL(self): obj = OCTestNULL.alloc().init() v = [] rv = obj.callList_andInOut_(v, 42) self.assertEqual(v, ["42"]) self.assertEqual(rv, (12, 21)) v = [] rv = obj.callList_andInOut_(v, objc.NULL) self.assertEqual(v, ["NULL"]) self.assertEqual(rv, (12, objc.NULL)) def testCallInOutNULL2(self): # If nothing is specified the bridge assumes the argument behaves # like an 'in' argument. obj = OCTestNULL.alloc().init() v = [] self.assertRaises(ValueError, obj.callList_andInOut2_, v, 42) self.assertEqual(v, []) v = [] rv = obj.callList_andInOut2_(v, objc.NULL) self.assertEqual(v, ["NULL"]) self.assertEqual(rv, (12, objc.NULL)) def testCallInNULL(self): obj = OCTestNULL.alloc().init() v = [] rv = obj.callList_andIn_(v, 42) self.assertEqual(v, ["42"]) self.assertEqual(rv, 24) v = [] rv = obj.callList_andIn_(v, objc.NULL) self.assertEqual(v, ["NULL"]) self.assertEqual(rv, 24) def testCalledInOutNULL(self): helper = OCTestNULL.alloc().init() obj = TestNullArgumentsHelper.alloc().init() v = [] rv = helper.on_callList_andInOut_(obj, v, 42) self.assertEqual(v, ["42"]) self.assertEqual(rv, (13, 84)) v = [] rv = helper.on_callList_andInOut_(obj, v, objc.NULL) self.assertEqual(v, ["objc.NULL"]) self.assertEqual(rv, (13, objc.NULL)) def testCalledInNULL(self): helper = OCTestNULL.alloc().init() obj = TestNullArgumentsHelper.alloc().init() v = [] rv = helper.on_callList_andIn_(obj, v, 42) self.assertEqual(v, ["42"]) self.assertEqual(rv, 26) v = [] rv = helper.on_callList_andIn_(obj, v, objc.NULL) self.assertEqual(v, ["objc.NULL"]) self.assertEqual(rv, 26) def testCalledOutNULL(self): helper = OCTestNULL.alloc().init() obj = TestNullArgumentsHelper.alloc().init() v = [] rv = helper.on_callList_andOut_(obj, v, 42) self.assertEqual(v, ["Nothing here"]) self.assertEqual(rv, (27, 99)) v = [] rv = helper.on_callList_andOut_(obj, v, objc.NULL) self.assertEqual(v, ["Nothing here"]) self.assertEqual(rv, (27, objc.NULL)) rv = helper.on_callOut_(obj, 42) self.assertEqual(rv, 441) rv = helper.on_callOut_(obj, objc.NULL) self.assertEqual(rv, objc.NULL) def dont_testCalledOutNULL(self): """ XXX: I'm not happy about these semantics! Current semantics: called method doesn't know about the NULL argument, the result from Python is ignored. New semantics: - If the last argument is 'out' use new semantics, otherwise keep current semantics - If function has an optional last param stuf this with objc.NULL if the argument is NULL, otherwise don't provide - If the functioin has a required last param: stuff with objc.NULL or None """ def dont_testCallOutNULL(self): """ Call a method with an 'out' argument with an additional method - if not objc.NULL: raise TypeError - argument should be NULL in objC - result should be objc.NULL """ if __name__ == "__main__": main()
ui
collectionmenu
# -*- coding: utf-8 -*- # # Picard, the next-generation MusicBrainz tagger # # Copyright (C) 2013 Michael Wiencek # Copyright (C) 2014-2015, 2018, 2020-2022 Laurent Monin # Copyright (C) 2016-2017 Sambhav Kothari # Copyright (C) 2018 Vishal Choudhary # Copyright (C) 2018, 2022-2023 Philipp Wolfer # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. from picard.collection import load_user_collections, user_collections from picard.util import strxfrm from PyQt5 import QtCore, QtGui, QtWidgets class CollectionMenu(QtWidgets.QMenu): def __init__(self, albums, *args): super().__init__(*args) self.ids = set(a.id for a in albums) self._ignore_update = False self.update_collections() def update_collections(self): self._ignore_update = True self.clear() self.actions = [] for id_, collection in sorted( user_collections.items(), key=lambda k_v: (strxfrm(str(k_v[1])), k_v[0]) ): action = QtWidgets.QWidgetAction(self) action.setDefaultWidget(CollectionMenuItem(self, collection)) self.addAction(action) self.actions.append(action) self._ignore_update = False self.addSeparator() self.refresh_action = self.addAction(_("Refresh List")) self.hovered.connect(self.update_highlight) def refresh_list(self): self.refresh_action.setEnabled(False) load_user_collections(self.update_collections) def mouseReleaseEvent(self, event): # Not using self.refresh_action.triggered because it closes the menu if ( self.actionAt(event.pos()) == self.refresh_action and self.refresh_action.isEnabled() ): self.refresh_list() def update_highlight(self, action): if self._ignore_update: return for a in self.actions: a.defaultWidget().set_active(a == action) def update_active_action_for_widget(self, widget): if self._ignore_update: return for action in self.actions: action_widget = action.defaultWidget() is_active = action_widget == widget if is_active: self._ignore_hover = True self.setActiveAction(action) self._ignore_hover = False action_widget.set_active(is_active) class CollectionMenuItem(QtWidgets.QWidget): def __init__(self, menu, collection): super().__init__() self.menu = menu self.active = False self._setup_layout(menu, collection) self._setup_colors() def _setup_layout(self, menu, collection): layout = QtWidgets.QVBoxLayout(self) style = self.style() layout.setContentsMargins( style.pixelMetric(QtWidgets.QStyle.PixelMetric.PM_LayoutLeftMargin), style.pixelMetric(QtWidgets.QStyle.PixelMetric.PM_FocusFrameVMargin), style.pixelMetric(QtWidgets.QStyle.PixelMetric.PM_LayoutRightMargin), style.pixelMetric(QtWidgets.QStyle.PixelMetric.PM_FocusFrameVMargin), ) self.checkbox = CollectionCheckBox(self, menu, collection) layout.addWidget(self.checkbox) def _setup_colors(self): palette = self.palette() self.text_color = palette.text().color() self.highlight_color = palette.highlightedText().color() def set_active(self, active): self.active = active palette = self.palette() textcolor = self.highlight_color if active else self.text_color palette.setColor(QtGui.QPalette.ColorRole.WindowText, textcolor) self.checkbox.setPalette(palette) def enterEvent(self, e): self.menu.update_active_action_for_widget(self) def leaveEvent(self, e): self.set_active(False) def paintEvent(self, e): painter = QtWidgets.QStylePainter(self) option = QtWidgets.QStyleOptionMenuItem() option.initFrom(self) option.state = QtWidgets.QStyle.StateFlag.State_None if self.isEnabled(): option.state |= QtWidgets.QStyle.StateFlag.State_Enabled if self.active: option.state |= QtWidgets.QStyle.StateFlag.State_Selected painter.drawControl(QtWidgets.QStyle.ControlElement.CE_MenuItem, option) class CollectionCheckBox(QtWidgets.QCheckBox): def __init__(self, parent, menu, collection): self.menu = menu self.collection = collection super().__init__(self.label(), parent) releases = collection.releases & menu.ids if len(releases) == len(menu.ids): self.setCheckState(QtCore.Qt.CheckState.Checked) elif not releases: self.setCheckState(QtCore.Qt.CheckState.Unchecked) else: self.setCheckState(QtCore.Qt.CheckState.PartiallyChecked) def nextCheckState(self): ids = self.menu.ids if ids & self.collection.pending: return diff = ids - self.collection.releases if diff: self.collection.add_releases(diff, self.updateText) self.setCheckState(QtCore.Qt.CheckState.Checked) else: self.collection.remove_releases( ids & self.collection.releases, self.updateText ) self.setCheckState(QtCore.Qt.CheckState.Unchecked) def updateText(self): self.setText(self.label()) def label(self): c = self.collection return ngettext( "%(name)s (%(count)i release)", "%(name)s (%(count)i releases)", c.size ) % { "name": c.name, "count": c.size, }
mainwindow
windowundo
# SPDX-FileCopyrightText: Florian Bruhin (The Compiler) <mail@qutebrowser.org> # # SPDX-License-Identifier: GPL-3.0-or-later """Code for :undo --window.""" import collections import dataclasses from typing import TYPE_CHECKING, MutableSequence, cast from qutebrowser.config import config from qutebrowser.mainwindow import mainwindow from qutebrowser.misc import objects from qutebrowser.qt.core import QByteArray, QObject if TYPE_CHECKING: from qutebrowser.mainwindow import tabbedbrowser instance = cast("WindowUndoManager", None) @dataclasses.dataclass class _WindowUndoEntry: """Information needed for :undo -w.""" geometry: QByteArray tab_stack: "tabbedbrowser.UndoStackType" class WindowUndoManager(QObject): """Manager which saves/restores windows.""" def __init__(self, parent=None): super().__init__(parent) self._undos: MutableSequence[_WindowUndoEntry] = collections.deque() objects.qapp.window_closing.connect(self._on_window_closing) config.instance.changed.connect(self._on_config_changed) @config.change_filter("tabs.undo_stack_size") def _on_config_changed(self): self._update_undo_stack_size() def _on_window_closing(self, window): if window.tabbed_browser.is_private: return self._undos.append( _WindowUndoEntry( geometry=window.saveGeometry(), tab_stack=window.tabbed_browser.undo_stack, ) ) def _update_undo_stack_size(self): newsize = config.instance.get("tabs.undo_stack_size") if newsize < 0: newsize = None self._undos = collections.deque(self._undos, maxlen=newsize) def undo_last_window_close(self): """Restore the last window to be closed. It will have the same tab and undo stack as when it was closed. """ entry = self._undos.pop() window = mainwindow.MainWindow( private=False, geometry=entry.geometry, ) window.tabbed_browser.undo_stack = entry.tab_stack window.tabbed_browser.undo() window.show() def init(): global instance instance = WindowUndoManager(parent=objects.qapp)
misc
miscwidgets
# SPDX-FileCopyrightText: Florian Bruhin (The Compiler) <mail@qutebrowser.org> # # SPDX-License-Identifier: GPL-3.0-or-later """Misc. widgets used at different places.""" from typing import Optional from qutebrowser.browser import inspector from qutebrowser.config import config, configfiles from qutebrowser.keyinput import keyutils, modeman from qutebrowser.misc import cmdhistory from qutebrowser.qt.core import QSize, Qt, QTimer, pyqtSignal, pyqtSlot from qutebrowser.qt.gui import QPainter, QResizeEvent, QValidator from qutebrowser.qt.widgets import ( QHBoxLayout, QLabel, QLayout, QLineEdit, QSplitter, QStyle, QStyleOption, QWidget, ) from qutebrowser.utils import debug, log, qtutils, usertypes, utils class CommandLineEdit(QLineEdit): """A QLineEdit with a history and prompt chars. Attributes: history: The command history object. _validator: The current command validator. _promptlen: The length of the current prompt. """ def __init__(self, parent=None): super().__init__(parent) self.history = cmdhistory.History(parent=self) self._validator = _CommandValidator(self) self.setValidator(self._validator) self.textEdited.connect(self.on_text_edited) self.cursorPositionChanged.connect(self.__on_cursor_position_changed) self._promptlen = 0 def __repr__(self): return utils.get_repr(self, text=self.text()) @pyqtSlot(str) def on_text_edited(self, _text): """Slot for textEdited. Stop history browsing.""" self.history.stop() @pyqtSlot(int, int) def __on_cursor_position_changed(self, _old, new): """Prevent the cursor moving to the prompt. We use __ here to avoid accidentally overriding it in subclasses. """ if new < self._promptlen: self.cursorForward(self.hasSelectedText(), self._promptlen - new) def set_prompt(self, text): """Set the current prompt to text. This updates the validator, and makes sure the user can't move the cursor behind the prompt. """ self._validator.prompt = text self._promptlen = len(text) class _CommandValidator(QValidator): """Validator to prevent the : from getting deleted. Attributes: prompt: The current prompt. """ def __init__(self, parent=None): super().__init__(parent) self.prompt = None def validate(self, string, pos): """Override QValidator::validate. Args: string: The string to validate. pos: The current cursor position. Return: A tuple (status, string, pos) as a QValidator should. """ if self.prompt is None or string.startswith(self.prompt): return (QValidator.State.Acceptable, string, pos) else: return (QValidator.State.Invalid, string, pos) class DetailFold(QWidget): """A "fold" widget with an arrow to show/hide details. Attributes: _folded: Whether the widget is currently folded or not. _hbox: The HBoxLayout the arrow/label are in. _arrow: The FoldArrow widget. Signals: toggled: Emitted when the widget was folded/unfolded. arg 0: bool, if the contents are currently visible. """ toggled = pyqtSignal(bool) def __init__(self, text, parent=None): super().__init__(parent) self._folded = True self._hbox = QHBoxLayout(self) self._hbox.setContentsMargins(0, 0, 0, 0) self._arrow = _FoldArrow() self._hbox.addWidget(self._arrow) label = QLabel(text) self._hbox.addWidget(label) self._hbox.addStretch() def toggle(self): """Toggle the fold of the widget.""" self._folded = not self._folded self._arrow.fold(self._folded) self.toggled.emit(not self._folded) def mousePressEvent(self, e): """Toggle the fold if the widget was pressed. Args: e: The QMouseEvent. """ if e.button() == Qt.MouseButton.LeftButton: e.accept() self.toggle() else: super().mousePressEvent(e) class _FoldArrow(QWidget): """The arrow shown for the DetailFold widget. Attributes: _folded: Whether the widget is currently folded or not. """ def __init__(self, parent=None): super().__init__(parent) self._folded = True def fold(self, folded): """Fold/unfold the widget. Args: folded: The new desired state. """ self._folded = folded self.update() def paintEvent(self, _event): """Paint the arrow. Args: _event: The QPaintEvent (unused). """ opt = QStyleOption() opt.initFrom(self) painter = QPainter(self) if self._folded: elem = QStyle.PrimitiveElement.PE_IndicatorArrowRight else: elem = QStyle.PrimitiveElement.PE_IndicatorArrowDown style = self.style() assert style is not None style.drawPrimitive(elem, opt, painter, self) def minimumSizeHint(self): """Return a sensible size.""" return QSize(8, 8) class WrapperLayout(QLayout): """A Qt layout which simply wraps a single widget. This is used so the widget is hidden behind a defined API and can't easily be accidentally accessed. """ def __init__(self, parent=None): super().__init__(parent) self._widget: Optional[QWidget] = None self._container: Optional[QWidget] = None def addItem(self, _widget): raise utils.Unreachable def sizeHint(self): """Get the size of the underlying widget.""" if self._widget is None: return QSize() return self._widget.sizeHint() def itemAt(self, _index): return None def takeAt(self, _index): raise utils.Unreachable def setGeometry(self, rect): """Pass through setGeometry calls to the underlying widget.""" if self._widget is None: return self._widget.setGeometry(rect) def wrap(self, container, widget): """Wrap the given widget in the given container.""" self._container = container self._widget = widget container.setFocusProxy(widget) widget.setParent(container) def unwrap(self): """Remove the widget from this layout. Does nothing if it nothing was wrapped before. """ if self._widget is None: return assert self._container is not None self._widget.setParent(qtutils.QT_NONE) self._widget.deleteLater() self._widget = None self._container.setFocusProxy(qtutils.QT_NONE) class FullscreenNotification(QLabel): """A label telling the user this page is now fullscreen.""" def __init__(self, parent=None): super().__init__(parent) self.setStyleSheet( """ background-color: rgba(50, 50, 50, 80%); color: white; border-radius: 20px; padding: 30px; """ ) all_bindings = config.key_instance.get_reverse_bindings_for("normal") bindings = all_bindings.get("fullscreen --leave") if bindings: key = bindings[0] self.setText("Press {} to exit fullscreen.".format(key)) else: self.setText("Page is now fullscreen.") self.resize(self.sizeHint()) if config.val.content.fullscreen.window: parent = self.parentWidget() assert parent is not None geom = parent.geometry() else: window = self.window() assert window is not None handle = window.windowHandle() assert handle is not None screen = handle.screen() assert screen is not None geom = screen.geometry() self.move((geom.width() - self.sizeHint().width()) // 2, 30) def set_timeout(self, timeout): """Hide the widget after the given timeout.""" QTimer.singleShot(timeout, self._on_timeout) @pyqtSlot() def _on_timeout(self): """Hide and delete the widget.""" self.hide() self.deleteLater() class InspectorSplitter(QSplitter): """Allows putting an inspector inside the tab. Attributes: _main_idx: index of the main webview widget _position: position of the inspector (right/left/top/bottom) _preferred_size: the preferred size of the inpector widget in pixels Class attributes: _PROTECTED_MAIN_SIZE: How much space should be reserved for the main content (website). _SMALL_SIZE_THRESHOLD: If the window size is under this threshold, we consider this a temporary "emergency" situation. """ _PROTECTED_MAIN_SIZE = 150 _SMALL_SIZE_THRESHOLD = 300 def __init__( self, win_id: int, main_webview: QWidget, parent: QWidget = None ) -> None: super().__init__(parent) self._win_id = win_id self.addWidget(main_webview) self.setFocusProxy(main_webview) self.splitterMoved.connect(self._on_splitter_moved) self._main_idx: Optional[int] = None self._inspector_idx: Optional[int] = None self._position: Optional[inspector.Position] = None self._preferred_size: Optional[int] = None def cycle_focus(self): """Cycle keyboard focus between the main/inspector widget.""" if self.count() == 1: raise inspector.Error("No inspector inside main window") assert self._main_idx is not None assert self._inspector_idx is not None main_widget = self.widget(self._main_idx) inspector_widget = self.widget(self._inspector_idx) assert main_widget is not None assert inspector_widget is not None if not inspector_widget.isVisible(): raise inspector.Error("No inspector inside main window") if main_widget.hasFocus(): inspector_widget.setFocus() modeman.enter( self._win_id, usertypes.KeyMode.insert, reason="Inspector focused", only_if_normal=True, ) elif inspector_widget.hasFocus(): main_widget.setFocus() def set_inspector( self, inspector_widget: inspector.AbstractWebInspector, position: inspector.Position, ) -> None: """Set the position of the inspector.""" assert position != inspector.Position.window if position in [inspector.Position.right, inspector.Position.bottom]: self._main_idx = 0 self._inspector_idx = 1 else: self._inspector_idx = 0 self._main_idx = 1 self.setOrientation( Qt.Orientation.Horizontal if position in [inspector.Position.left, inspector.Position.right] else Qt.Orientation.Vertical ) self.insertWidget(self._inspector_idx, inspector_widget) self._position = position self._load_preferred_size() self._adjust_size() def _save_preferred_size(self) -> None: """Save the preferred size of the inspector widget.""" assert self._position is not None size = str(self._preferred_size) configfiles.state["inspector"][self._position.name] = size def _load_preferred_size(self) -> None: """Load the preferred size of the inspector widget.""" assert self._position is not None full = ( self.width() if self.orientation() == Qt.Orientation.Horizontal else self.height() ) # If we first open the inspector with a window size of < 300px # (self._SMALL_SIZE_THRESHOLD), we don't want to default to half of the # window size as the small window is likely a temporary situation and # the inspector isn't very usable in that state. self._preferred_size = max(self._SMALL_SIZE_THRESHOLD, full // 2) try: size = int(configfiles.state["inspector"][self._position.name]) except KeyError: # First start pass except ValueError as e: log.misc.error("Could not read inspector size: {}".format(e)) else: self._preferred_size = int(size) def _adjust_size(self) -> None: """Adjust the size of the inspector similarly to Chromium. In general, we want to keep the absolute size of the inspector (rather than the ratio) the same, as it's confusing when the layout of its contents changes. We're essentially handling three different cases: 1) We have plenty of space -> Keep inspector at the preferred absolute size. 2) We're slowly running out of space. Make sure the page still has 150px (self._PROTECTED_MAIN_SIZE) left, give the rest to the inspector. 3) The window is very small (< 300px, self._SMALL_SIZE_THRESHOLD). Keep Qt's behavior of keeping the aspect ratio, as all hope is lost at this point. """ sizes = self.sizes() total = sizes[0] + sizes[1] assert self._main_idx is not None assert self._inspector_idx is not None assert self._preferred_size is not None if total >= self._preferred_size + self._PROTECTED_MAIN_SIZE: # Case 1 above sizes[self._inspector_idx] = self._preferred_size sizes[self._main_idx] = total - self._preferred_size self.setSizes(sizes) elif ( sizes[self._main_idx] < self._PROTECTED_MAIN_SIZE and total >= self._SMALL_SIZE_THRESHOLD ): # Case 2 above handle_size = self.handleWidth() sizes[self._main_idx] = self._PROTECTED_MAIN_SIZE - handle_size // 2 sizes[self._inspector_idx] = ( total - self._PROTECTED_MAIN_SIZE + handle_size // 2 ) self.setSizes(sizes) else: # Case 3 above pass @pyqtSlot() def _on_splitter_moved(self) -> None: assert self._inspector_idx is not None sizes = self.sizes() self._preferred_size = sizes[self._inspector_idx] self._save_preferred_size() def resizeEvent(self, e: Optional[QResizeEvent]) -> None: """Window resize event.""" assert e is not None super().resizeEvent(e) if self.count() == 2: self._adjust_size() class KeyTesterWidget(QWidget): """Widget displaying key presses.""" def __init__(self, parent=None): super().__init__(parent) self.setAttribute(Qt.WidgetAttribute.WA_DeleteOnClose) self._layout = QHBoxLayout(self) self._label = QLabel(text="Waiting for keypress...") self._layout.addWidget(self._label) def keyPressEvent(self, e): """Show pressed keys.""" lines = [ str(keyutils.KeyInfo.from_event(e)), "", f"key: {debug.qenum_key(Qt, e.key(), klass=Qt.Key)}", f"modifiers: {debug.qflags_key(Qt, e.modifiers())}", "text: {!r}".format(e.text()), ] self._label.setText("\n".join(lines))
lib
count
# The contents of this file are subject to the Common Public Attribution # License Version 1.0. (the "License"); you may not use this file except in # compliance with the License. You may obtain a copy of the License at # http://code.reddit.com/LICENSE. The License is based on the Mozilla Public # License Version 1.1, but Sections 14 and 15 have been added to cover use of # software over a computer network and provide for limited attribution for the # Original Developer. In addition, Exhibit A has been modified to be consistent # with Exhibit B. # # Software distributed under the License is distributed on an "AS IS" basis, # WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for # the specific language governing rights and limitations under the License. # # The Original Code is reddit. # # The Original Developer is the Initial Developer. The Initial Developer of # the Original Code is reddit Inc. # # All portions of the code written by reddit are Copyright (c) 2006-2015 reddit # Inc. All Rights Reserved. ############################################################################### from pylons import app_globals as g from pylons import config from r2.lib import utils from r2.lib.db.operators import desc from r2.models import Link, Subreddit count_period = g.rising_period # stubs def incr_counts(wrapped): pass def get_link_counts(period=count_period): links = Link._query(Link.c._date >= utils.timeago(period), limit=50, data=True) return dict((l._fullname, (0, l.sr_id)) for l in links) def get_sr_counts(): srs = utils.fetch_things2(Subreddit._query(sort=desc("_date"))) return dict((sr._fullname, sr._ups) for sr in srs) if config["r2.import_private"]: from r2admin.lib.count import *
extractors
SevenZip
# -*- coding: utf-8 -*- import os import re import subprocess from pyload import PKGDIR from pyload.core.utils.convert import to_str from pyload.plugins.base.extractor import ( ArchiveError, BaseExtractor, CRCError, PasswordError, ) from pyload.plugins.helpers import renice class SevenZip(BaseExtractor): __name__ = "SevenZip" __type__ = "extractor" __version__ = "0.39" __status__ = "testing" __description__ = """7-Zip extractor plugin""" __license__ = "GPLv3" __authors__ = [ ("Walter Purcaro", "vuolter@gmail.com"), ("Michael Nowak", None), ("GammaC0de", "nitzo2001[AT]yahoo[DOT]com"), ] CMD = "7z" EXTENSIONS = [ ("7z", r"7z(?:\.\d{3})?"), "xz", "gz", "gzip", "tgz", "bz2", "bzip2", "tbz2", "tbz", "tar", "wim", "swm", "lzma", "rar", "cab", "arj", "z", "taz", "cpio", "rpm", "deb", "lzh", "lha", "chm", "chw", "hxs", "iso", "msi", "doc", "xls", "ppt", "dmg", "xar", "hfs", "exe", "ntfs", "fat", "vhd", "mbr", "squashfs", "cramfs", "scap", ] _RE_PART = re.compile( r"\.7z\.\d{3}|\.(part|r)\d+(\.rar|\.rev)?(\.bad)?|\.rar$", re.I ) _RE_FILES = re.compile( r"([\d\-]+)\s+([\d:]+)\s+([RHSA.]+)\s+(\d+)\s+(?:(\d+)\s+)?(.+)" ) _RE_ENCRYPTED_HEADER = re.compile(r"encrypted archive") _RE_ENCRYPTED_FILES = re.compile(r"Encrypted\s+=\s+\+") _RE_BADPWD = re.compile(r"Wrong password", re.I) _RE_BADCRC = re.compile(r"CRC Failed|Can not open file", re.I) _RE_VERSION = re.compile( r"7-Zip\s(?:\(\w+\)\s)?(?:\[(?:32|64)\]\s)?(\d+\.\d+)", re.I ) @classmethod def find(cls): try: if os.name == "nt": cls.CMD = os.path.join(PKGDIR, "lib", "7z.exe") p = subprocess.Popen( [cls.CMD], stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8", ) out, err = (r.strip() if r else "" for r in p.communicate()) except OSError: return False else: m = cls._RE_VERSION.search(out) if m is not None: cls.VERSION = m.group(1) return True @classmethod def ismultipart(cls, filename): return cls._RE_PART.search(filename) is not None def init(self): self.smallest = None self.archive_encryption = None def verify(self, password=None): #: First we check if the header (file list) is protected #: if the header is protected, we cen verify the password very fast without hassle #: otherwise, we find the smallest file in the archive and then try to extract it encrypted_header, encrypted_files = self._check_archive_encryption() if encrypted_header: p = self.call_cmd("l", "-slt", self.filename, password=password) out, err = (r.strip() if r else "" for r in p.communicate()) if err: if self._RE_ENCRYPTED_HEADER.search(err): raise PasswordError else: raise ArchiveError(err) elif encrypted_files: #: search for smallest file and try to extract it to verify password smallest = self._find_smallest_file(password=password)[0] if smallest is None: raise ArchiveError("Cannot find smallest file") try: extracted = os.path.join( self.dest, smallest if self.fullpath else os.path.basename(smallest) ) try: os.remove(extracted) except OSError as exc: pass self.extract(password=password, file=smallest) #: Extraction was successful so exclude the file from further extraction if smallest not in self.excludefiles: self.excludefiles.append(smallest) except (PasswordError, CRCError, ArchiveError) as exc: try: os.remove(extracted) except OSError as exc: pass raise exc def progress(self, process): s = "" while True: c = process.stdout.read(1) #: Quit loop on eof if not c: break #: Reading a percentage sign -> set progress and restart if c == "%" and s: self.pyfile.set_progress(int(s)) s = "" #: Not reading a digit -> therefore restart elif not c.isdigit(): s = "" #: Add digit to progress string else: s += c def extract(self, password=None, file=None): command = "x" if self.fullpath else "e" p = self.call_cmd( command, "-o" + self.dest, self.filename, file, password=password ) #: Communicate and retrieve stderr self.progress(p) out, err = (r.strip() if r else "" for r in p.communicate()) if err: if self._RE_BADPWD.search(err): raise PasswordError elif self._RE_BADCRC.search(err): raise CRCError(err) else: #: Raise error if anything is on stderr raise ArchiveError(err) if p.returncode > 1: raise ArchiveError(self._("Process return code: {}").format(p.returncode)) def chunks(self): files = [] dir, name = os.path.split(self.filename) #: eventually multi-part files files.extend( os.path.join(dir, os.path.basename(_f)) for _f in filter(self.ismultipart, os.listdir(dir)) if self._RE_PART.sub("", name) == self._RE_PART.sub("", _f) ) #: Actually extracted file if self.filename not in files: files.append(self.filename) return files def list(self, password=None): if not self.files: self._find_smallest_file(password=password) return self.files def call_cmd(self, command, *xargs, **kwargs): args = [] #: Use UTF8 for console encoding args.append("-scsUTF-8") args.append("-sccUTF-8") #: Progress output if self.VERSION and float(self.VERSION) >= 15.08: #: Disable all output except progress and errors args.append("-bso0") args.append("-bsp1") #: Overwrite flag if self.overwrite: if self.VERSION and float(self.VERSION) >= 15.08: args.append("-aoa") else: args.append("-y") else: if self.VERSION and float(self.VERSION) >= 15.08: args.append("-aos") #: Exclude files for word in self.excludefiles: args.append("-xr!{}".format(word.strip())) #: Set a password password = kwargs.get("password") if password: args.append("-p{}".format(password)) else: args.append("-p-") call = [self.CMD, command] + args + [arg for arg in xargs if arg] self.log_debug("EXECUTE " + " ".join(call)) call = [to_str(cmd) for cmd in call] p = subprocess.Popen( call, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8" ) renice(p.pid, self.priority) return p def _check_archive_encryption(self): if self.archive_encryption is None: p = self.call_cmd("l", "-slt", self.filename) out, err = (r.strip() if r else "" for r in p.communicate()) encrypted_header = self._RE_ENCRYPTED_HEADER.search(err) is not None encrypted_files = self._RE_ENCRYPTED_FILES.search(out) is not None self.archive_encryption = (encrypted_header, encrypted_files) return self.archive_encryption def _find_smallest_file(self, password=None): if not self.smallest: p = self.call_cmd("l", self.filename, password=password) out, err = (r.strip() if r else "" for r in p.communicate()) if any(e in err for e in ("Can not open", "cannot find the file")): raise ArchiveError(self._("Cannot open file")) if p.returncode > 1: raise ArchiveError( self._("Process return code: {}").format(p.returncode) ) smallest = (None, 0) files = set() for groups in self._RE_FILES.findall(out): s = int(groups[3]) f = groups[-1].strip() if smallest[1] == 0 or smallest[1] > s > 0: smallest = (f, s) if not self.fullpath: f = os.path.basename(f) f = os.path.join(self.dest, f) files.add(f) self.smallest = smallest self.files = list(files) return self.smallest
contrib
pyopenssl
"""SSL with SNI_-support for Python 2. Follow these instructions if you would like to verify SSL certificates in Python 2. Note, the default libraries do *not* do certificate checking; you need to do additional work to validate certificates yourself. This needs the following packages installed: * pyOpenSSL (tested with 0.13) * ndg-httpsclient (tested with 0.3.2) * pyasn1 (tested with 0.1.6) You can install them with the following command: pip install pyopenssl ndg-httpsclient pyasn1 To activate certificate checking, call :func:`~urllib3.contrib.pyopenssl.inject_into_urllib3` from your Python code before you begin making HTTP requests. This can be done in a ``sitecustomize`` module, or at any other time before your application begins using ``urllib3``, like this:: try: import urllib3.contrib.pyopenssl urllib3.contrib.pyopenssl.inject_into_urllib3() except ImportError: pass Now you can use :mod:`urllib3` as you normally would, and it will support SNI when the required modules are installed. Activating this module also has the positive side effect of disabling SSL/TLS compression in Python 2 (see `CRIME attack`_). If you want to configure the default list of supported cipher suites, you can set the ``urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST`` variable. Module Variables ---------------- :var DEFAULT_SSL_CIPHER_LIST: The list of supported SSL/TLS cipher suites. .. _sni: https://en.wikipedia.org/wiki/Server_Name_Indication .. _crime attack: https://en.wikipedia.org/wiki/CRIME_(security_exploit) """ try: from ndg.httpsclient.ssl_peer_verification import SUBJ_ALT_NAME_SUPPORT from ndg.httpsclient.subj_alt_name import SubjectAltName as BaseSubjectAltName except SyntaxError as e: raise ImportError(e) import select import ssl from socket import _fileobject, timeout import OpenSSL.SSL from pyasn1.codec.der import decoder as der_decoder from pyasn1.type import constraint, univ from .. import connection, util __all__ = ["inject_into_urllib3", "extract_from_urllib3"] # SNI only *really* works if we can read the subjectAltName of certificates. HAS_SNI = SUBJ_ALT_NAME_SUPPORT # Map from urllib3 to PyOpenSSL compatible parameter-values. _openssl_versions = { ssl.PROTOCOL_SSLv23: OpenSSL.SSL.SSLv23_METHOD, ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD, } try: _openssl_versions.update({ssl.PROTOCOL_SSLv3: OpenSSL.SSL.SSLv3_METHOD}) except AttributeError: pass _openssl_verify = { ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE, ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER, ssl.CERT_REQUIRED: OpenSSL.SSL.VERIFY_PEER + OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT, } DEFAULT_SSL_CIPHER_LIST = util.ssl_.DEFAULT_CIPHERS orig_util_HAS_SNI = util.HAS_SNI orig_connection_ssl_wrap_socket = connection.ssl_wrap_socket def inject_into_urllib3(): "Monkey-patch urllib3 with PyOpenSSL-backed SSL-support." connection.ssl_wrap_socket = ssl_wrap_socket util.HAS_SNI = HAS_SNI def extract_from_urllib3(): "Undo monkey-patching by :func:`inject_into_urllib3`." connection.ssl_wrap_socket = orig_connection_ssl_wrap_socket util.HAS_SNI = orig_util_HAS_SNI ### Note: This is a slightly bug-fixed version of same from ndg-httpsclient. class SubjectAltName(BaseSubjectAltName): """ASN.1 implementation for subjectAltNames support""" # There is no limit to how many SAN certificates a certificate may have, # however this needs to have some limit so we'll set an arbitrarily high # limit. sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, 1024) ### Note: This is a slightly bug-fixed version of same from ndg-httpsclient. def get_subj_alt_name(peer_cert): # Search through extensions dns_name = [] if not SUBJ_ALT_NAME_SUPPORT: return dns_name general_names = SubjectAltName() for i in range(peer_cert.get_extension_count()): ext = peer_cert.get_extension(i) ext_name = ext.get_short_name() if ext_name != "subjectAltName": continue # PyOpenSSL returns extension data in ASN.1 encoded form ext_dat = ext.get_data() decoded_dat = der_decoder.decode(ext_dat, asn1Spec=general_names) for name in decoded_dat: if not isinstance(name, SubjectAltName): continue for entry in range(len(name)): component = name.getComponentByPosition(entry) if component.getName() != "dNSName": continue dns_name.append(str(component.getComponent())) return dns_name class WrappedSocket(object): """API-compatibility wrapper for Python OpenSSL's Connection-class. Note: _makefile_refs, _drop() and _reuse() are needed for the garbage collector of pypy. """ def __init__(self, connection, socket, suppress_ragged_eofs=True): self.connection = connection self.socket = socket self.suppress_ragged_eofs = suppress_ragged_eofs self._makefile_refs = 0 def fileno(self): return self.socket.fileno() def makefile(self, mode, bufsize=-1): self._makefile_refs += 1 return _fileobject(self, mode, bufsize, close=True) def recv(self, *args, **kwargs): try: data = self.connection.recv(*args, **kwargs) except OpenSSL.SSL.SysCallError as e: if self.suppress_ragged_eofs and e.args == (-1, "Unexpected EOF"): return b"" else: raise except OpenSSL.SSL.ZeroReturnError as e: if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN: return b"" else: raise except OpenSSL.SSL.WantReadError: rd, wd, ed = select.select([self.socket], [], [], self.socket.gettimeout()) if not rd: raise timeout("The read operation timed out") else: return self.recv(*args, **kwargs) else: return data def settimeout(self, timeout): return self.socket.settimeout(timeout) def _send_until_done(self, data): while True: try: return self.connection.send(data) except OpenSSL.SSL.WantWriteError: _, wlist, _ = select.select( [], [self.socket], [], self.socket.gettimeout() ) if not wlist: raise timeout() continue def sendall(self, data): while len(data): sent = self._send_until_done(data) data = data[sent:] def close(self): if self._makefile_refs < 1: return self.connection.shutdown() else: self._makefile_refs -= 1 def getpeercert(self, binary_form=False): x509 = self.connection.get_peer_certificate() if not x509: return x509 if binary_form: return OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_ASN1, x509) return { "subject": ((("commonName", x509.get_subject().CN),),), "subjectAltName": [("DNS", value) for value in get_subj_alt_name(x509)], } def _reuse(self): self._makefile_refs += 1 def _drop(self): if self._makefile_refs < 1: self.close() else: self._makefile_refs -= 1 def _verify_callback(cnx, x509, err_no, err_depth, return_code): return err_no == 0 def ssl_wrap_socket( sock, keyfile=None, certfile=None, cert_reqs=None, ca_certs=None, server_hostname=None, ssl_version=None, ): ctx = OpenSSL.SSL.Context(_openssl_versions[ssl_version]) if certfile: keyfile = ( keyfile or certfile ) # Match behaviour of the normal python ssl library ctx.use_certificate_file(certfile) if keyfile: ctx.use_privatekey_file(keyfile) if cert_reqs != ssl.CERT_NONE: ctx.set_verify(_openssl_verify[cert_reqs], _verify_callback) if ca_certs: try: ctx.load_verify_locations(ca_certs, None) except OpenSSL.SSL.Error as e: raise ssl.SSLError("bad ca_certs: %r" % ca_certs, e) else: ctx.set_default_verify_paths() # Disable TLS compression to migitate CRIME attack (issue #309) OP_NO_COMPRESSION = 0x20000 ctx.set_options(OP_NO_COMPRESSION) # Set list of supported ciphersuites. ctx.set_cipher_list(DEFAULT_SSL_CIPHER_LIST) cnx = OpenSSL.SSL.Connection(ctx, sock) cnx.set_tlsext_host_name(server_hostname) cnx.set_connect_state() while True: try: cnx.do_handshake() except OpenSSL.SSL.WantReadError: rd, _, _ = select.select([sock], [], [], sock.gettimeout()) if not rd: raise timeout("select timed out") continue except OpenSSL.SSL.Error as e: raise ssl.SSLError("bad handshake", e) break return WrappedSocket(cnx, sock)
lib
floodfill
# This file is part of MyPaint. # Copyright (C) 2018-2019 by the MyPaint Development Team. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. """This module implements tile-based floodfill and related operations.""" import logging import threading import lib.fill_common as fc import lib.helpers import lib.modes import lib.morphology import lib.mypaintlib as myplib import lib.surface import lib.tiledsurface import numpy as np from lib.fill_common import _EMPTY_TILE, _FULL_TILE, _OPAQUE from lib.gettext import C_ from lib.gibindings import GLib from lib.pycompat import iteritems logger = logging.getLogger(__name__) TILE_SIZE = N = myplib.TILE_SIZE INF_DIST = 2 * N * N # This should point to the array transparent_tile.rgba # defined in tiledsurface.py _EMPTY_RGBA = None # Distance data for tiles with no detected distances _GAPLESS_TILE = fc.new_full_tile(INF_DIST) _GAPLESS_TILE.flags.writeable = False EDGE = myplib.edges class GapClosingOptions: """Container of parameters for gap closing fill operations to avoid updates to the call chain in case the parameter set is altered. """ def __init__(self, max_gap_size, retract_seeps): self.max_gap_size = max_gap_size self.retract_seeps = retract_seeps def enqueue_overflows(queue, tile_coord, seeds, tiles_bbox, *p): """Conditionally add (coordinate, seed list, data...) tuples to a queue. :param queue: the queue which may be appended :type queue: list :param tile_coord: the 2d coordinate in the middle of the seed coordinates :type tile_coord: (int, int) :param seeds: 4-tuple of seed lists for n, e, s, w, relative to tile_coord :type seeds: (list, list, list, list) :param tiles_bbox: the bounding box of the fill operation :type tiles_bbox: lib.fill_common.TileBoundingBox :param p: tuples of length >= 4, items added to queue items w. same index NOTE: This function improves readability significantly in exchange for a small performance hit. Replace with explicit queueing if too slow. """ for edge in zip(*(fc.orthogonal(tile_coord), seeds) + p): edge_coord = edge[0] edge_seeds = edge[1] if edge_seeds and not tiles_bbox.outside(edge_coord): queue.append(edge) def starting_coordinates(x, y): """Get the coordinates of starting tile and pixel (tx, ty, px, py)""" init_tx, init_ty = int(x // N), int(y // N) init_x, init_y = int(x % N), int(y % N) return init_tx, init_ty, init_x, init_y def seeds_by_tile(seeds): """Partition and convert seed coordinates Partition a list of model-space seed coordinates into lists of in-tile coordinates associated to their respective tile coordinate in a dictionary. """ tile_seeds = dict() for x, y in seeds: tx, ty, px, py = starting_coordinates(x, y) seed_list = tile_seeds.get((tx, ty), []) seed_list.append((px, py)) tile_seeds[(tx, ty)] = seed_list return tile_seeds def get_target_color(src, tx, ty, px, py): """Get the pixel color for the given tile/pixel coordinates""" with src.tile_request(tx, ty, readonly=True) as start: targ_r, targ_g, targ_b, targ_a = [int(c) for c in start[py][px]] if targ_a == 0: targ_r, targ_g, targ_b = 0, 0, 0 return targ_r, targ_g, targ_b, targ_a # Main fill interface class FillHandler: """Handles fill status and cancellation The fill is run in a separate thread, and this controller is used to start and (optionally) cancel the fill, as well as provide information about its current status. """ # Stages that the fill operation can be in FILL = 0 MORPH = 1 BLUR = 2 COMPOSITE = 3 FINISHING = 4 STAGE_STRINGS = [ C_("floodfill status message: use active tense", "Filling"), C_("floodfill status message: use active tense", "Morphing"), C_("floodfill status message: use active tense", "Blurring"), C_("floodfill status message: use active tense", "Compositing"), C_("floodfill status message: use active tense", "Finishing up"), ] TILES_STRING = C_("uniform square region of pixels, plural noun", "tiles") TILES_TEMPLATE = "{t} " + TILES_STRING def __init__(self): # An object with a "keep running" flag and "tiles processed" # data, for easier C++ access in morph/blur stages self.controller = myplib.Controller() # Separate "keep running" flag checked in Python code self.run = True self.stage = None self.set_stage(self.FILL) # When morphing, blurring or compositing, # this is the total amount of tiles to process self.tiles_max = 0 self.fill_thread = None @property def tiles_processed(self): """The number of tiles processed for the current stage""" return self.controller.num_processed() def inc_processed(self): """Increment the number of tiles processed by 1""" self.controller.inc_processed(1) def set_stage(self, stage, num_tiles_to_process=None): """Change stage, updating strings and tile data""" self.stage = stage self.controller.reset() self.stage_string = self.STAGE_STRINGS[stage] if num_tiles_to_process: self.tiles_max = num_tiles_to_process self.stage_string += " " + self.TILES_TEMPLATE.format( t=num_tiles_to_process ) @property def progress_string(self): """Progress for the current stage""" if self.stage == self.FILL: return self.TILES_TEMPLATE.format(t=self.tiles_processed) elif self.stage < self.FINISHING: return str(int(100 * self.tiles_processed / self.tiles_max)) + "%" else: return "" def wait(self, t=None): """Wait t seconds for the fill to complete""" self.fill_thread.join(t) def running(self): """Check if the fill is still running""" return self.fill_thread.is_alive() def cancel(self): """Tell the fill to stop as soon as possible""" self.controller.stop() self.run = False class FloodFillArguments(object): """Container holding a set of flood fill arguments The purpose of this class is to avoid unnecessary call chain updates when changing/adding parameters. """ def __init__( self, target_pos, seeds, color, tolerance, offset, feather, gap_closing_options, mode, lock_alpha, opacity, framed, bbox, ): """Create a new fill argument set :param target_pos: pixel coordinate of target color :type target_pos: tuple :param seeds: set of seed pixel coordinates {(x, y)...} :type seeds: set :param color: an RGB color :type color: tuple :param tolerance: how much filled pixels are permitted to vary :type tolerance: float [0.0, 1.0] :param offset: the post-fill expansion/contraction radius in pixels :type offset: int [-TILE_SIZE, TILE_SIZE] :param feather: the amount to blur the fill, after offset is applied :type feather: int [0, TILE_SIZE] :param gap_closing_options: parameters for gap closing fill, or None :type gap_closing_options: GapClosingOptions :param mode: Fill blend mode - normal, erasing or alpha locked :type mode: int (Any of the Combine* modes in mypaintlib) :param lock_alpha: Lock alpha of the destination layer :type lock_alpha: bool :param opacity: opacity of the fill :type opacity: float :param framed: Whether the frame is enabled or not. :type framed: bool :param bbox: Bounding box: limits the fill :type bbox: lib.helpers.Rect or equivalent 4-tuple """ self.target_pos = target_pos self.seeds = seeds self.color = color self.tolerance = tolerance self.offset = offset self.feather = feather self.gap_closing_options = gap_closing_options self.mode = mode self.lock_alpha = lock_alpha self.opacity = opacity self.framed = framed self.bbox = bbox def skip_empty_dst(self): """If true, compositing to empty tiles does nothing""" return self.lock_alpha or self.mode in [ myplib.CombineSourceAtop, myplib.CombineDestinationOut, myplib.CombineDestinationIn, ] def no_op(self): """If true, compositing will never alter the output layer These are comp modes for which alpha locking does not really make any sense, as any visible change caused by them requires the alpha of the destination to change as well. """ return self.lock_alpha and ( self.mode in lib.modes.MODES_DECREASING_BACKDROP_ALPHA ) def flood_fill(src, fill_args, dst): """Top-level fill interface Delegates actual fill in separate thread and returns a FillHandler :param src: source, surface-like object :type src: anything supporting readonly tile_request() :param fill_args: arguments common to all fill calls :type fill_args: FloodFillArguments :param dst: target surface :type dst: lib.tiledsurface.MyPaintSurface """ handler = FillHandler() fill_function_args = (src, fill_args, dst, handler) fill_thread = threading.Thread(target=_flood_fill, args=fill_function_args) handler.fill_thread = fill_thread fill_thread.start() return handler def _flood_fill(src, args, dst, handler): """Main flood fill function The fill is performed with reference to src. The resulting tiles are composited into dst. :param src: source, surface-like object :type src: anything supporting readonly tile_request() :param args: arguments common to all fill calls :type args: FloodFillArguments :param dst: target surface :type dst: lib.tiledsurface.MyPaintSurface :param handler: controller used to track state and cancel fill :type handler: FillHandler """ _, _, width, height = args.bbox if width <= 0 or height <= 0 or args.no_op(): return tiles_bbox = fc.TileBoundingBox(args.bbox) # Basic safety clamping tolerance = lib.helpers.clamp(args.tolerance, 0.0, 1.0) offset = lib.helpers.clamp(args.offset, -TILE_SIZE, TILE_SIZE) feather = lib.helpers.clamp(args.feather, 0, TILE_SIZE) # Initial parameters target_color = get_target_color(src, *starting_coordinates(*args.target_pos)) filler = myplib.Filler(*(target_color + (tolerance,))) seed_lists = seeds_by_tile(args.seeds) fill_args = (handler, src, seed_lists, tiles_bbox, filler) if args.gap_closing_options: fill_args += (args.gap_closing_options,) filled = gap_closing_fill(*fill_args) else: filled = scanline_fill(*fill_args) # Dilate/Erode (Grow/Shrink) if offset != 0 and handler.run: filled = lib.morphology.morph(handler, offset, filled) # Feather (Gaussian blur) if feather != 0 and handler.run: filled = lib.morphology.blur(handler, feather, filled) # When dilating or blurring the fill, only respect the # bounding box limits if they are set by an active frame trim_result = args.framed and (offset > 0 or feather != 0) if handler.run: composite(handler, args, trim_result, filled, tiles_bbox, dst) def update_bbox(bbox, tx, ty): """Update given the min/max, x/y bounding box If a coordinate lies outside of the current bounds, set the bounds based on that coordinate """ if bbox: min_tx, min_ty, max_tx, max_ty = bbox if tx < min_tx: min_tx = tx elif tx > max_tx: max_tx = tx if ty < min_ty: min_ty = ty elif ty > max_ty: max_ty = ty return min_tx, min_ty, max_tx, max_ty else: return tx, ty, tx, ty def composite(handler, fill_args, trim_result, filled, tiles_bbox, dst): """Composite the filled tiles into the destination surface""" handler.set_stage(handler.COMPOSITE, len(filled)) fill_col = fill_args.color # Prepare opaque color rgba tile for copying full_rgba = myplib.rgba_tile_from_alpha_tile( _FULL_TILE, *(fill_col + (0, 0, N - 1, N - 1)) ) # Bounding box of tiles that need updating dst_changed_bbox = None dst_tiles = dst.get_tiles() skip_empty_dst = fill_args.skip_empty_dst() mode = fill_args.mode lock_alpha = fill_args.lock_alpha opacity = fill_args.opacity tile_combine = myplib.tile_combine # Composite filled tiles into the destination surface for tile_coord, src_tile in iteritems(filled): if not handler.run: break handler.inc_processed() # Omit tiles outside of the bounding box _if_ the frame is enabled # Note:filled tiles outside bbox only originates from dilation/blur if trim_result and tiles_bbox.outside(tile_coord): continue # Skip empty destination tiles for erasing and alpha locking # Avoids completely unnecessary tile allocation and copying if skip_empty_dst and tile_coord not in dst_tiles: continue with dst.tile_request(*tile_coord, readonly=False) as dst_tile: # Only at this point might the bounding box need to be updated dst_changed_bbox = update_bbox(dst_changed_bbox, *tile_coord) # Under certain conditions, direct copies and dict manipulation # can be used instead of compositing operations. cut_off = trim_result and tiles_bbox.crossing(tile_coord) full_inner = src_tile is _FULL_TILE and not cut_off if full_inner: if mode == myplib.CombineNormal and opacity == 1.0: myplib.tile_copy_rgba16_into_rgba16(full_rgba, dst_tile) continue elif mode == myplib.CombineDestinationOut and opacity == 1.0: dst_tiles.pop(tile_coord) continue elif mode == myplib.CombineDestinationIn and opacity == 1.0: continue # Even if opacity != 1.0, we can reuse the full rgba tile src_tile_rgba = full_rgba else: if trim_result: tile_bounds = tiles_bbox.tile_bounds(tile_coord) else: tile_bounds = (0, 0, N - 1, N - 1) src_tile_rgba = myplib.rgba_tile_from_alpha_tile( src_tile, *(fill_col + tile_bounds) ) # If alpha locking is enabled in combination with a mode other than # CombineNormal, we need to copy the dst tile to mask the result if lock_alpha and mode != myplib.CombineSourceAtop: mask = np.copy(dst_tile) mask_mode = myplib.CombineDestinationAtop tile_combine(mode, src_tile_rgba, dst_tile, True, opacity) tile_combine(mask_mode, mask, dst_tile, True, 1.0) else: tile_combine(mode, src_tile_rgba, dst_tile, True, opacity) # Handle dst-out and dst-atop: clear untouched tiles if mode in [myplib.CombineDestinationIn, myplib.CombineDestinationAtop]: for tile_coord in list(dst_tiles.keys()): if not handler.run: break if tile_coord not in filled: dst_changed_bbox = update_bbox(dst_changed_bbox, *tile_coord) with dst.tile_request(*tile_coord, readonly=False): dst_tiles.pop(tile_coord) if dst_changed_bbox and handler.run: min_tx, min_ty, max_tx, max_ty = dst_changed_bbox bbox = ( min_tx * N, min_ty * N, (1 + max_tx - min_tx) * N, (1 + max_ty - min_ty) * N, ) # Even for large fills on slow machines, this stage # will almost always be too short to even notice. # It is not cancellable once entered. handler.set_stage(FillHandler.FINISHING) # The observers may directly or indirectly use the # Gtk API, so the call is scheduled on the gui thread. GLib.idle_add(dst.notify_observers, *bbox) def scanline_fill(handler, src, seed_lists, tiles_bbox, filler): """Perform a scanline fill and return the filled tiles Perform a scanline fill using the given starting point and tile, with reference to the src surface and given bounding box, using the provided filler instance. :param handler: updates fill status and permits cancelling :type handler: FillHandler :param src: Source surface-like object :param seed_lists: dictionary, pairing tile coords with lists of seeds :type seed_lists: dict :param tiles_bbox: Bounding box for the fill :type tiles_bbox: lib.fill_common.TileBoundingBox :param filler: filler instance performing the per-tile fill operation :type filler: myplib.Filler :returns: a dictionary of coord->tile mappings for the filled tiles """ # Dict of coord->tile data populated during the fill filled = {} inv_edges = (EDGE.south, EDGE.west, EDGE.north, EDGE.east) # Starting coordinates + direction of origin (from within) tileq = [] for seed_tile_coord, seeds in iteritems(seed_lists): tileq.append((seed_tile_coord, seeds, myplib.edges.none)) tfs = _TileFillSkipper(tiles_bbox, filler, set({})) while len(tileq) > 0 and handler.run: tile_coord, seeds, from_dir = tileq.pop(0) # Skip if the tile has been fully processed already if tile_coord in tfs.final: continue # Flood-fill one tile with src.tile_request(*tile_coord, readonly=True) as src_tile: # See if the tile can be skipped overflows = tfs.check(tile_coord, src_tile, filled, from_dir) if overflows is None: if tile_coord not in filled: handler.inc_processed() filled[tile_coord] = np.zeros((N, N), "uint16") overflows = filler.fill( src_tile, filled[tile_coord], seeds, from_dir, *tiles_bbox.tile_bounds(tile_coord), ) else: handler.inc_processed() enqueue_overflows(tileq, tile_coord, overflows, tiles_bbox, inv_edges) return filled class _TileFillSkipper: """Provides checking for, and handling of, uniform tiles""" FULL_OVERFLOWS = [ ((), [(0, N - 1)], [(0, N - 1)], [(0, N - 1)]), # from north ([(0, N - 1)], (), [(0, N - 1)], [(0, N - 1)]), # from east ([(0, N - 1)], [(0, N - 1)], (), [(0, N - 1)]), # from south ([(0, N - 1)], [(0, N - 1)], [(0, N - 1)], ()), # from west ([(0, N - 1)], [(0, N - 1)], [(0, N - 1)], [(0, N - 1)]), # from within ] def __init__(self, tiles_bbox, filler, final): self.uniform_tiles = {} self.final = final self.tiles_bbox = tiles_bbox self.filler = filler # Dict of alpha->tile, used for uniform non-opaque tile fills # NOTE: these are usually not a result of an intentional fill, but # clicking a pixel with color very similar to the intended target pixel def uniform_tile(self, alpha): """Return a reference to a uniform alpha tile If no uniform tile with the given alpha value exists, one is created """ if alpha not in self.uniform_tiles: self.uniform_tiles[alpha] = fc.new_full_tile(alpha) return self.uniform_tiles[alpha] def check(self, tile_coord, src_tile, filled, from_dir): """Check if the tile can be handled without using the fill loop. The first time the tile is encountered, check if it is uniform and if so, handle it immediately depending on whether it is fillable or not. If the tile can be handled immediately, returns the overflows (new seed ranges), otherwise return None to indicate that the fill algorithm needs to be invoked. """ if tile_coord in filled or self.tiles_bbox.crossing(tile_coord): return None # Returns the alpha of the fill for the tile's color if # the tile is uniform, otherwise returns None is_empty = src_tile is _EMPTY_RGBA alpha = self.filler.tile_uniformity(is_empty, src_tile) if alpha is None: # No shortcut can be taken, create new tile return None # Tile is uniform, so there is no need to process # it again in the fill loop, either set as # a uniformly filled alpha tile or skip it if it # cannot be filled at all (unlikely, but not impossible) self.final.add(tile_coord) if alpha == 0: return [(), (), (), ()] elif alpha == _OPAQUE: filled[tile_coord] = _FULL_TILE else: filled[tile_coord] = self.uniform_tile(alpha) return self.FULL_OVERFLOWS[from_dir] def gap_closing_fill(handler, src, seed_lists, tiles_bbox, filler, gap_closing_options): """Fill loop that finds and uses gap data to avoid unwanted leaks Gaps are defined as distances of fillable pixels enclosed on two sides by unfillable pixels. Each tile considered, and their neighbours, are flooded with alpha values based on the target color and threshold values. The resulting alphas are then searched for gaps, and the size of these gaps are marked in separate tiles - one for each tile filled. """ unseep_queue = [] filled = {} final = set({}) seed_queue = [] for seed_tile_coord, seeds in iteritems(seed_lists): seed_queue.append((seed_tile_coord, seeds)) options = gap_closing_options max_gap_size = lib.helpers.clamp(options.max_gap_size, 1, TILE_SIZE) gc_filler = myplib.GapClosingFiller(max_gap_size, options.retract_seeps) gc_handler = _GCTileHandler(final, max_gap_size, tiles_bbox, filler, src) total_px = 0 skip_unseeping = False while len(seed_queue) > 0 and handler.run: tile_coord, seeds = seed_queue.pop(0) if tile_coord in final: continue # Create distance-data and alpha output tiles for the fill # and check if the tile can be skipped directly alpha_t, dist_t, overflows = gc_handler.get_gc_data(tile_coord, seeds) if overflows: handler.inc_processed() filled[tile_coord] = _FULL_TILE else: # Complement data for initial seeds (if they are initial seeds) seeds, any_not_max = complement_gc_seeds(seeds, dist_t) # If the fill is starting at a point with a detected distance, # disable seep retraction - otherwise it is very likely # that the result will be completely empty. if any_not_max: skip_unseeping = True # Pixel limits within tiles can vary at the bounding box edges px_bounds = tiles_bbox.tile_bounds(tile_coord) # Create new output tile if not already present if tile_coord not in filled: handler.inc_processed() filled[tile_coord] = np.zeros((N, N), "uint16") # Run the gap-closing fill for the tile result = gc_filler.fill( alpha_t, dist_t, filled[tile_coord], seeds, *px_bounds ) overflows = result[0:4] fill_edges, px_f = result[4:6] # The entire tile was filled, despite potential gaps; # replace data w. constant and mark tile as final. if px_f == N * N: final.add(tile_coord) # When seep inversion is enabled, track total pixels filled # and coordinates where the fill stopped due to distance conditions total_px += px_f if not skip_unseeping and fill_edges: unseep_queue.append((tile_coord, fill_edges, True)) # Enqueue overflows, whether skipping or not enqueue_overflows(seed_queue, tile_coord, overflows, tiles_bbox) # If enabled, pull the fill back into the gaps to stop before them if not skip_unseeping and handler.run: unseep( unseep_queue, filled, gc_filler, total_px, tiles_bbox, gc_handler.distances ) return filled class _GCTileHandler(object): """Gap-closing-fill Tile Handler Manages input alpha tiles and distance tiles necessary to perform gap closing fill operations. """ OVERFLOWS = [ [(), (EDGE.west,), (EDGE.north,), (EDGE.east,)], [(EDGE.south,), (), (EDGE.north,), (EDGE.east,)], [(EDGE.south,), (EDGE.west,), (), (EDGE.east,)], [(EDGE.south,), (EDGE.west,), (EDGE.north,), ()], [(EDGE.south,), (EDGE.west,), (EDGE.north,), (EDGE.east,)], ] def __init__(self, final, max_gap_size, tiles_bbox, filler, src): self._src = src self.final = final self.distances = dict() self._alpha_tiles = dict() self._dist_data = None self._bbox = tiles_bbox self._filler = filler self._distbucket = myplib.DistanceBucket(max_gap_size) def get_gc_data(self, tile_coord, seeds): """Get the data necessary to run a gap-closing fill For the given tile coordinate, prepare the data necessary to run a gap-closing fill operation for that tile, namely the corresponding input alpha tile and distance tile. The first time a coordinate is reached, also check if it can be skipped directly, and return the overflows if that is the case. :returns: (alpha_tile, distance_tile, overflows) :rtype: tuple """ if tile_coord not in self.distances: # Ensure that alpha data exists for the tile and its neighbours grid, all_full = self.alpha_grid(tile_coord) # The search is skipped if we have a 9-grid of only full tiles # since there cannot be any gaps in that case. Otherwise, if no # gaps were found during the search, use a constant distance tile if all_full or not self.find_gaps(*grid): self.distances[tile_coord] = _GAPLESS_TILE # Check if fill can be skipped directly can_skip_fill = ( (all_full or grid[0] is _FULL_TILE) and not self._bbox.crossing(tile_coord) and gc_seeds_skippable(seeds) ) if can_skip_fill: self.final.add(tile_coord) if isinstance(seeds, list): overflows = self.OVERFLOWS[EDGE.none] else: overflows = self.OVERFLOWS[seeds[0]] return _FULL_TILE, _GAPLESS_TILE, overflows else: self.distances[tile_coord] = self._dist_data self._dist_data = None # The distance data is already present, meaning the skip checks have # already been tried, no skipping possible. return self._alpha_tiles[tile_coord], self.distances[tile_coord], () def find_gaps(self, *grid): """Search for and mark gaps, given a nine-grid of alpha tiles :param grid: nine-grid of alpha tiles :return: True if any gaps were found, otherwise false :rtype: bool """ if self._dist_data is None: self._dist_data = fc.new_full_tile(INF_DIST) return myplib.find_gaps(self._distbucket, self._dist_data, *grid) def alpha_grid(self, tile_coord): """When needed, create and calculate alpha tiles for distance searching. For the tile of the given coordinate, ensure that a corresponding tile of alpha values (based on the tolerance function) exists in the full_alphas dict for both the tile and all of its neighbors :returns: Tuple with the grid and a boolean value indicating whether every tile in the grid is the constant full alpha tile """ all_full = True alpha_tiles = self._alpha_tiles grid = [] for ntc in fc.nine_grid(tile_coord): if ntc not in alpha_tiles: with self._src.tile_request(ntc[0], ntc[1], readonly=True) as src_tile: is_empty = src_tile is _EMPTY_RGBA alpha = self._filler.tile_uniformity(is_empty, src_tile) if alpha == _OPAQUE: alpha_tiles[ntc] = _FULL_TILE elif alpha == 0: alpha_tiles[ntc] = _EMPTY_TILE elif alpha: alpha_tiles[ntc] = fc.new_full_tile(alpha) else: alpha_tile = np.empty((N, N), "uint16") self._filler.flood(src_tile, alpha_tile) alpha_tiles[ntc] = alpha_tile tile = alpha_tiles[ntc] grid.append(tile) all_full = all_full and tile is _FULL_TILE return grid, all_full def unseep(seed_queue, filled, gc_filler, total_px, tiles_bbox, distances): """Seep inversion is basically a four-way 0-alpha fill with different conditions. It only backs off into the original fill and therefore does not require creation of new tiles or use of an input alpha tile. """ backup = {} while len(seed_queue) > 0: tile_coord, seeds, is_initial = seed_queue.pop(0) if tile_coord not in distances or tile_coord not in filled: continue if tile_coord not in backup: if filled[tile_coord] is _FULL_TILE: backup[tile_coord] = _FULL_TILE filled[tile_coord] = fc.new_full_tile(1 << 15) else: backup[tile_coord] = np.copy(filled[tile_coord]) result = gc_filler.unseep( distances[tile_coord], filled[tile_coord], seeds, is_initial ) overflows = result[0:4] num_erased_pixels = result[4] total_px -= num_erased_pixels enqueue_overflows(seed_queue, tile_coord, overflows, tiles_bbox, (False,) * 4) if total_px <= 0: # For small areas, when starting on a distance-marked pixel, # backing off may remove the entire fill, in which case we # roll back the tiles that were processed for tile_coord, tile in iteritems(backup): filled[tile_coord] = tile def complement_gc_seeds(seeds, distance_tile): """Add distances to initial seeds, check if all seeds lie on detected gaps If the input seeds are not initial seeds, they are returned unchanged. Returns a tuple with complemented seeds and a boolean indicating whether all seeds lie on detected gaps (this check is only done for initial seeds) """ if isinstance(seeds, list) and len(seeds[0]) < 3: # Fetch distance for initial seed coord complemented_seeds = [] any_not_max = False for px, py in seeds: distance = distance_tile[py][px] if distance < INF_DIST: any_not_max = True complemented_seeds.append((px, py, distance)) return complemented_seeds, any_not_max else: return seeds, False def gc_seeds_skippable(seeds): return ( isinstance(seeds, tuple) # edge constant - a full edge of seeds or len(seeds[0]) == 2 # initial seeds or any([s[2] == INF_DIST for s in seeds]) # one seed can fill everything )
extractor
markiza
# coding: utf-8 from __future__ import unicode_literals import re from ..compat import compat_str from ..utils import orderedSet, parse_duration, try_get from .common import InfoExtractor class MarkizaIE(InfoExtractor): _VALID_URL = r"https?://(?:www\.)?videoarchiv\.markiza\.sk/(?:video/(?:[^/]+/)*|embed/)(?P<id>\d+)(?:[_/]|$)" _TESTS = [ { "url": "http://videoarchiv.markiza.sk/video/oteckovia/84723_oteckovia-109", "md5": "ada4e9fad038abeed971843aa028c7b0", "info_dict": { "id": "139078", "ext": "mp4", "title": "Oteckovia 109", "description": "md5:d41d8cd98f00b204e9800998ecf8427e", "thumbnail": r"re:^https?://.*\.jpg$", "duration": 2760, }, }, { "url": "http://videoarchiv.markiza.sk/video/televizne-noviny/televizne-noviny/85430_televizne-noviny", "info_dict": { "id": "85430", "title": "Televízne noviny", }, "playlist_count": 23, }, { "url": "http://videoarchiv.markiza.sk/video/oteckovia/84723", "only_matching": True, }, { "url": "http://videoarchiv.markiza.sk/video/84723", "only_matching": True, }, { "url": "http://videoarchiv.markiza.sk/video/filmy/85190_kamenak", "only_matching": True, }, { "url": "http://videoarchiv.markiza.sk/video/reflex/zo-zakulisia/84651_pribeh-alzbetky", "only_matching": True, }, { "url": "http://videoarchiv.markiza.sk/embed/85295", "only_matching": True, }, ] def _real_extract(self, url): video_id = self._match_id(url) data = self._download_json( "http://videoarchiv.markiza.sk/json/video_jwplayer7.json", video_id, query={"id": video_id}, ) info = self._parse_jwplayer_data(data, m3u8_id="hls", mpd_id="dash") if info.get("_type") == "playlist": info.update( { "id": video_id, "title": try_get(data, lambda x: x["details"]["name"], compat_str), } ) else: info["duration"] = parse_duration( try_get(data, lambda x: x["details"]["duration"], compat_str) ) return info class MarkizaPageIE(InfoExtractor): _VALID_URL = r"https?://(?:www\.)?(?:(?:[^/]+\.)?markiza|tvnoviny)\.sk/(?:[^/]+/)*(?P<id>\d+)_" _TESTS = [ { "url": "http://www.markiza.sk/soubiz/zahranicny/1923705_oteckovia-maju-svoj-den-ti-slavni-nie-su-o-nic-menej-rozkosni", "md5": "ada4e9fad038abeed971843aa028c7b0", "info_dict": { "id": "139355", "ext": "mp4", "title": "Oteckovia 110", "description": "md5:d41d8cd98f00b204e9800998ecf8427e", "thumbnail": r"re:^https?://.*\.jpg$", "duration": 2604, }, "params": { "skip_download": True, }, }, { "url": "http://dajto.markiza.sk/filmy-a-serialy/1774695_frajeri-vo-vegas", "only_matching": True, }, { "url": "http://superstar.markiza.sk/aktualne/1923870_to-je-ale-telo-spevacka-ukazala-sexy-postavicku-v-bikinach", "only_matching": True, }, { "url": "http://hybsa.markiza.sk/aktualne/1923790_uzasna-atmosfera-na-hybsa-v-poprade-superstaristi-si-prve-koncerty-pred-davom-ludi-poriadne-uzili", "only_matching": True, }, { "url": "http://doma.markiza.sk/filmy/1885250_moja-vysnivana-svadba", "only_matching": True, }, { "url": "http://www.tvnoviny.sk/domace/1923887_po-smrti-manzela-ju-cakalo-poriadne-prekvapenie", "only_matching": True, }, ] @classmethod def suitable(cls, url): return ( False if MarkizaIE.suitable(url) else super(MarkizaPageIE, cls).suitable(url) ) def _real_extract(self, url): playlist_id = self._match_id(url) webpage = self._download_webpage( # Downloading for some hosts (e.g. dajto, doma) fails with 500 # although everything seems to be OK, so considering 500 # status code to be expected. url, playlist_id, expected_status=500, ) entries = [ self.url_result("http://videoarchiv.markiza.sk/video/%s" % video_id) for video_id in orderedSet( re.findall( r'(?:initPlayer_|data-entity=["\']|id=["\']player_)(\d+)', webpage ) ) ] return self.playlist_result(entries, playlist_id)
mixins
simplify
from typing import TYPE_CHECKING, Any, Dict, List, Literal, TypeVar, cast from posthog.constants import PropertyOperatorType from posthog.models.property import GroupTypeIndex, PropertyGroup if TYPE_CHECKING: # Avoid circular import from posthog.models import Property, Team T = TypeVar("T") class SimplifyFilterMixin: # :KLUDGE: A lot of this logic ignores typing since generics w/ mixins are hard to get working properly def simplify(self: T, team: "Team", **kwargs) -> T: """ Expands this filter to not refer to external resources of the team. Actions taken: - if filter.filter_test_accounts, adds property filters to `filter.properties` - if aggregating by groups, adds property filter to remove blank groups - for cohort properties, replaces them with more concrete lookups or with cohort conditions """ if self._data.get("is_simplified"): # type: ignore return self # :TRICKY: Make a copy to avoid caching issues result: Any = self.shallow_clone({"is_simplified": True}) # type: ignore if getattr(result, "filter_test_accounts", False): new_group = {"type": "AND", "values": team.test_account_filters} prop_group = ( {"type": "AND", "values": [new_group, result.property_groups.to_dict()]} if result.property_groups.to_dict() else new_group ) result = result.shallow_clone( {"properties": prop_group, "filter_test_accounts": False} ) updated_entities = {} if hasattr(result, "entities_to_dict"): for entity_type, entities in result.entities_to_dict().items(): updated_entities[entity_type] = [ self._simplify_entity(team, entity_type, entity, **kwargs) for entity in entities ] # type: ignore from posthog.models.property.util import clear_excess_levels prop_group = clear_excess_levels( self._simplify_property_group(team, result.property_groups, **kwargs), skip=True, ) # type: ignore prop_group = prop_group.to_dict() # type: ignore new_group_props = [] if getattr(result, "aggregation_group_type_index", None) is not None: new_group_props.append( self._group_set_property( cast(int, result.aggregation_group_type_index) ).to_dict() ) # type: ignore if new_group_props: new_group = {"type": "AND", "values": new_group_props} prop_group = ( {"type": "AND", "values": [new_group, prop_group]} if prop_group else new_group ) return result.shallow_clone({**updated_entities, "properties": prop_group}) def _simplify_entity( self, team: "Team", entity_type: Literal["events", "actions", "exclusions"], entity_params: Dict, **kwargs, ) -> Dict: from posthog.models.entity import Entity, ExclusionEntity EntityClass = ExclusionEntity if entity_type == "exclusions" else Entity entity = EntityClass(entity_params) # TODO: when we support AND-ORs in entities, unflatten them here properties = self._simplify_properties( team, entity.property_groups.flat, **kwargs ).flat if entity.math == "unique_group": properties.append( self._group_set_property( cast(GroupTypeIndex, entity.math_group_type_index) ) ) return EntityClass({**entity_params, "properties": properties}).to_dict() def _simplify_properties( self, team: "Team", properties: List["Property"], **kwargs ) -> "PropertyGroup": simplified_properties_values = [] for prop in properties: simplified_properties_values.append( self._simplify_property(team, prop, **kwargs) ) return PropertyGroup( type=PropertyOperatorType.AND, values=simplified_properties_values ) def _simplify_property_group( self, team: "Team", prop_group: "PropertyGroup", **kwargs ) -> "PropertyGroup": from posthog.models.property import Property, PropertyGroup new_groups = [] for group in prop_group.values: if isinstance(group, PropertyGroup): new_groups.append(self._simplify_property_group(team, group)) elif isinstance(group, Property): new_groups.append(self._simplify_property(team, group)) prop_group.values = new_groups return prop_group def _simplify_property( self, team: "Team", property: "Property", **kwargs ) -> "PropertyGroup": if property.type == "cohort": from posthog.models import Cohort from posthog.models.cohort.util import simplified_cohort_filter_properties try: cohort = Cohort.objects.get(pk=property.value, team_id=team.pk) except Cohort.DoesNotExist: # :TODO: Handle non-existing resource in-query instead return PropertyGroup(type=PropertyOperatorType.AND, values=[property]) return simplified_cohort_filter_properties(cohort, team, property.negation) # PropertyOperatorType doesn't really matter here, since only one value. return PropertyGroup(type=PropertyOperatorType.AND, values=[property]) def _group_set_property(self, group_type_index: GroupTypeIndex) -> "Property": from posthog.models.property import Property return Property(key=f"$group_{group_type_index}", value="", operator="is_not") @property def is_simplified(self) -> bool: return self._data.get("is_simplified", False) # type: ignore
gtk3
menubar
# # Copyright (C) 2007, 2008 Andrew Resch <andrewresch@gmail.com> # Copyright (C) 2011 Pedro Algarvio <pedro@algarvio.me> # # This file is part of Deluge and is licensed under GNU General Public License 3.0, or later, with # the additional special exception to link portions of this program with the OpenSSL library. # See LICENSE for more details. # import logging import os.path import deluge.common import deluge.component as component from deluge.configmanager import ConfigManager from deluge.ui.client import client from gi.repository import Gtk from .dialogs import CopyMagnetDialog, ErrorDialog, OtherDialog from .path_chooser import PathChooser log = logging.getLogger(__name__) default_main_window_accelmap = { "<Deluge-MainWindow>/File/Add Torrent": "<Primary>o", "<Deluge-MainWindow>/File/Create Torrent": "<Primary>n", "<Deluge-MainWindow>/File/Quit & Shutdown Daemon": "<Primary><Shift>q", "<Deluge-MainWindow>/File/Quit": "<Primary>q", "<Deluge-MainWindow>/Edit/Preferences": "<Primary>p", "<Deluge-MainWindow>/Edit/Connection Manager": "<Primary>m", "<Deluge-MainWindow>/View/Find ...": "<Primary>f", "<Deluge-MainWindow>/Help/FAQ": "F1", } class MenuBar(component.Component): def __init__(self): log.debug("MenuBar init..") component.Component.__init__(self, "MenuBar") self.mainwindow = component.get("MainWindow") self.main_builder = self.mainwindow.get_builder() self.config = ConfigManager("gtk3ui.conf") self._magnet_copied = False self.builder = Gtk.Builder() # Get the torrent menu from the gtk builder file self.builder.add_from_file( deluge.common.resource_filename( __package__, os.path.join("glade", "torrent_menu.ui") ) ) # Get the torrent options menu from the gtk builder file self.builder.add_from_file( deluge.common.resource_filename( __package__, os.path.join("glade", "torrent_menu.options.ui") ) ) # Get the torrent queue menu from the gtk builder file self.builder.add_from_file( deluge.common.resource_filename( __package__, os.path.join("glade", "torrent_menu.queue.ui") ) ) # Attach queue torrent menu torrent_queue_menu = self.builder.get_object("queue_torrent_menu") self.builder.get_object("menuitem_queue").set_submenu(torrent_queue_menu) # Attach options torrent menu torrent_options_menu = self.builder.get_object("options_torrent_menu") self.builder.get_object("menuitem_options").set_submenu(torrent_options_menu) self.builder.get_object("download-limit-image").set_from_file( deluge.common.get_pixmap("downloading16.png") ) self.builder.get_object("upload-limit-image").set_from_file( deluge.common.get_pixmap("seeding16.png") ) for menuitem in ( "menuitem_down_speed", "menuitem_up_speed", "menuitem_max_connections", "menuitem_upload_slots", ): submenu = Gtk.Menu() item = Gtk.MenuItem.new_with_label(_("Set Unlimited")) item.set_name(menuitem) item.connect("activate", self.on_menuitem_set_unlimited) submenu.append(item) item = Gtk.MenuItem.new_with_label(_("Other...")) item.set_name(menuitem) item.connect("activate", self.on_menuitem_set_other) submenu.append(item) submenu.show_all() self.builder.get_object(menuitem).set_submenu(submenu) submenu = Gtk.Menu() item = Gtk.MenuItem.new_with_label(_("On")) item.connect("activate", self.on_menuitem_set_automanaged_on) submenu.append(item) item = Gtk.MenuItem.new_with_label(_("Off")) item.connect("activate", self.on_menuitem_set_automanaged_off) submenu.append(item) submenu.show_all() self.builder.get_object("menuitem_auto_managed").set_submenu(submenu) submenu = Gtk.Menu() item = Gtk.MenuItem.new_with_label(_("Disable")) item.connect("activate", self.on_menuitem_set_stop_seed_at_ratio_disable) submenu.append(item) item = Gtk.MenuItem.new_with_label(_("Enable...")) item.set_name("menuitem_stop_seed_at_ratio") item.connect("activate", self.on_menuitem_set_other) submenu.append(item) submenu.show_all() self.builder.get_object("menuitem_stop_seed_at_ratio").set_submenu(submenu) self.torrentmenu = self.builder.get_object("torrent_menu") self.menu_torrent = self.main_builder.get_object("menu_torrent") # Attach the torrent_menu to the Torrent file menu self.menu_torrent.set_submenu(self.torrentmenu) # Set keyboard shortcuts for accel_path, accelerator in default_main_window_accelmap.items(): accel_key, accel_mods = Gtk.accelerator_parse(accelerator) Gtk.AccelMap.change_entry(accel_path, accel_key, accel_mods, True) # Make sure the view menuitems are showing the correct active state self.main_builder.get_object("menuitem_toolbar").set_active( self.config["show_toolbar"] ) self.main_builder.get_object("menuitem_sidebar").set_active( self.config["show_sidebar"] ) self.main_builder.get_object("menuitem_statusbar").set_active( self.config["show_statusbar"] ) self.main_builder.get_object("sidebar_show_zero").set_active( self.config["sidebar_show_zero"] ) self.main_builder.get_object("sidebar_show_trackers").set_active( self.config["sidebar_show_trackers"] ) self.main_builder.get_object("sidebar_show_owners").set_active( self.config["sidebar_show_owners"] ) # Connect main window Signals # self.mainwindow.connect_signals(self) # Connect menubar signals self.builder.connect_signals(self) self.change_sensitivity = ["menuitem_addtorrent"] def magnet_copied(self): """ lets the caller know whether a magnet was copied internally the `mainwindow` checks every time the data in the clipboard, so it will automatically open the AddTorrentURL dialog in case it contains a valid link (URL to a torrent or a magnet URI). """ val = self._magnet_copied self._magnet_copied = False return val def start(self): for widget in self.change_sensitivity: self.main_builder.get_object(widget).set_sensitive(True) # Only show open_folder menuitem and separator if connected to a localhost daemon. localhost_items = ["menuitem_open_folder", "separator4"] if client.is_localhost(): for widget in localhost_items: self.builder.get_object(widget).show() self.builder.get_object(widget).set_no_show_all(False) else: for widget in localhost_items: self.builder.get_object(widget).hide() self.builder.get_object(widget).set_no_show_all(True) self.main_builder.get_object("separatormenuitem").set_visible( not self.config["standalone"] ) self.main_builder.get_object("menuitem_quitdaemon").set_visible( not self.config["standalone"] ) self.main_builder.get_object("menuitem_connectionmanager").set_visible( not self.config["standalone"] ) # Show the Torrent menu because we're connected to a host self.menu_torrent.show() if client.get_auth_level() == deluge.common.AUTH_LEVEL_ADMIN: # Get known accounts to allow changing ownership client.core.get_known_accounts().addCallback( self._on_known_accounts ).addErrback(self._on_known_accounts_fail) client.register_event_handler( "TorrentStateChangedEvent", self.on_torrentstatechanged_event ) client.register_event_handler( "TorrentResumedEvent", self.on_torrentresumed_event ) client.register_event_handler("SessionPausedEvent", self.on_sessionpaused_event) client.register_event_handler( "SessionResumedEvent", self.on_sessionresumed_event ) def stop(self): log.debug("MenuBar stopping") client.deregister_event_handler( "TorrentStateChangedEvent", self.on_torrentstatechanged_event ) client.deregister_event_handler( "TorrentResumedEvent", self.on_torrentresumed_event ) client.deregister_event_handler( "SessionPausedEvent", self.on_sessionpaused_event ) client.deregister_event_handler( "SessionResumedEvent", self.on_sessionresumed_event ) for widget in self.change_sensitivity: self.main_builder.get_object(widget).set_sensitive(False) # Hide the Torrent menu self.menu_torrent.hide() self.main_builder.get_object("separatormenuitem").hide() self.main_builder.get_object("menuitem_quitdaemon").hide() def update_menu(self): selected = component.get("TorrentView").get_selected_torrents() if not selected or len(selected) == 0: # No torrent is selected. Disable the 'Torrents' menu self.menu_torrent.set_sensitive(False) return self.menu_torrent.set_sensitive(True) # XXX: Should also update Pause/Resume/Remove menuitems. # Any better way than duplicating toolbar.py:update_buttons in here? def add_torrentmenu_separator(self): sep = Gtk.SeparatorMenuItem() self.torrentmenu.append(sep) sep.show() return sep # Callbacks # def on_torrentstatechanged_event(self, torrent_id, state): if state == "Paused": self.update_menu() def on_torrentresumed_event(self, torrent_id): self.update_menu() def on_sessionpaused_event(self): self.update_menu() def on_sessionresumed_event(self): self.update_menu() # File Menu # def on_menuitem_addtorrent_activate(self, data=None): log.debug("on_menuitem_addtorrent_activate") component.get("AddTorrentDialog").show() def on_menuitem_createtorrent_activate(self, data=None): log.debug("on_menuitem_createtorrent_activate") from .createtorrentdialog import CreateTorrentDialog CreateTorrentDialog().show() def on_menuitem_quitdaemon_activate(self, data=None): log.debug("on_menuitem_quitdaemon_activate") self.mainwindow.quit(shutdown=True) def on_menuitem_quit_activate(self, data=None): log.debug("on_menuitem_quit_activate") self.mainwindow.quit() # Edit Menu # def on_menuitem_preferences_activate(self, data=None): log.debug("on_menuitem_preferences_activate") component.get("Preferences").show() def on_menuitem_connectionmanager_activate(self, data=None): log.debug("on_menuitem_connectionmanager_activate") component.get("ConnectionManager").show() # Torrent Menu # def on_menuitem_pause_activate(self, data=None): log.debug("on_menuitem_pause_activate") client.core.pause_torrents(component.get("TorrentView").get_selected_torrents()) def on_menuitem_resume_activate(self, data=None): log.debug("on_menuitem_resume_activate") client.core.resume_torrents( component.get("TorrentView").get_selected_torrents() ) def on_menuitem_copymagnet_activate(self, data=None): log.debug("on_menuitem_copymagnet_activate") torrent_ids = component.get("TorrentView").get_selected_torrents() if torrent_ids: def _on_magnet_uri(magnet_uri): def update_copied(response_id): if dialog.copied: self._magnet_copied = True dialog = CopyMagnetDialog(magnet_uri) dialog.run().addCallback(update_copied) client.core.get_magnet_uri(torrent_ids[0]).addCallback(_on_magnet_uri) def on_menuitem_updatetracker_activate(self, data=None): log.debug("on_menuitem_updatetracker_activate") client.core.force_reannounce( component.get("TorrentView").get_selected_torrents() ) def on_menuitem_edittrackers_activate(self, data=None): log.debug("on_menuitem_edittrackers_activate") from .edittrackersdialog import EditTrackersDialog dialog = EditTrackersDialog( component.get("TorrentView").get_selected_torrent(), self.mainwindow.window ) dialog.run() def on_menuitem_remove_activate(self, data=None): log.debug("on_menuitem_remove_activate") torrent_ids = component.get("TorrentView").get_selected_torrents() if torrent_ids: from .removetorrentdialog import RemoveTorrentDialog RemoveTorrentDialog(torrent_ids).run() def on_menuitem_recheck_activate(self, data=None): log.debug("on_menuitem_recheck_activate") client.core.force_recheck(component.get("TorrentView").get_selected_torrents()) def on_menuitem_open_folder_activate(self, data=None): log.debug("on_menuitem_open_folder") def _on_torrent_status(status): timestamp = component.get("MainWindow").get_timestamp() path = os.path.join( status["download_location"], status["files"][0]["path"].split("/")[0] ) deluge.common.show_file(path, timestamp=timestamp) for torrent_id in component.get("TorrentView").get_selected_torrents(): component.get("SessionProxy").get_torrent_status( torrent_id, ["download_location", "files"] ).addCallback(_on_torrent_status) def on_menuitem_move_activate(self, data=None): log.debug("on_menuitem_move_activate") component.get("SessionProxy").get_torrent_status( component.get("TorrentView").get_selected_torrent(), ["download_location"] ).addCallback(self.show_move_storage_dialog) def show_move_storage_dialog(self, status): log.debug("show_move_storage_dialog") builder = Gtk.Builder() builder.add_from_file( deluge.common.resource_filename( __package__, os.path.join("glade", "move_storage_dialog.ui") ) ) # Keep it referenced: # https://bugzilla.gnome.org/show_bug.cgi?id=546802 self.move_storage_dialog = builder.get_object("move_storage_dialog") self.move_storage_dialog.set_transient_for(self.mainwindow.window) self.move_storage_dialog_hbox = builder.get_object("hbox_entry") self.move_storage_path_chooser = PathChooser( "move_completed_paths_list", self.move_storage_dialog ) self.move_storage_dialog_hbox.add(self.move_storage_path_chooser) self.move_storage_dialog_hbox.show_all() self.move_storage_path_chooser.set_text(status["download_location"]) def on_dialog_response_event(widget, response_id): def on_core_result(result): # Delete references self.move_storage_dialog.hide() del self.move_storage_dialog del self.move_storage_dialog_hbox if response_id == Gtk.ResponseType.CANCEL: on_core_result(None) if response_id == Gtk.ResponseType.OK: log.debug( "Moving torrents to %s", self.move_storage_path_chooser.get_text() ) path = self.move_storage_path_chooser.get_text() client.core.move_storage( component.get("TorrentView").get_selected_torrents(), path ).addCallback(on_core_result) self.move_storage_dialog.connect("response", on_dialog_response_event) self.move_storage_dialog.show() def on_menuitem_queue_top_activate(self, value): log.debug("on_menuitem_queue_top_activate") client.core.queue_top(component.get("TorrentView").get_selected_torrents()) def on_menuitem_queue_up_activate(self, value): log.debug("on_menuitem_queue_up_activate") client.core.queue_up(component.get("TorrentView").get_selected_torrents()) def on_menuitem_queue_down_activate(self, value): log.debug("on_menuitem_queue_down_activate") client.core.queue_down(component.get("TorrentView").get_selected_torrents()) def on_menuitem_queue_bottom_activate(self, value): log.debug("on_menuitem_queue_bottom_activate") client.core.queue_bottom(component.get("TorrentView").get_selected_torrents()) # View Menu # def on_menuitem_toolbar_toggled(self, value): log.debug("on_menuitem_toolbar_toggled") component.get("ToolBar").visible(value.get_active()) def on_menuitem_sidebar_toggled(self, value): log.debug("on_menuitem_sidebar_toggled") component.get("SideBar").visible(value.get_active()) def on_menuitem_statusbar_toggled(self, value): log.debug("on_menuitem_statusbar_toggled") component.get("StatusBar").visible(value.get_active()) # Help Menu # def on_menuitem_homepage_activate(self, data=None): log.debug("on_menuitem_homepage_activate") deluge.common.open_url_in_browser("http://deluge-torrent.org") def on_menuitem_faq_activate(self, data=None): log.debug("on_menuitem_faq_activate") deluge.common.open_url_in_browser("http://dev.deluge-torrent.org/wiki/Faq") def on_menuitem_community_activate(self, data=None): log.debug("on_menuitem_community_activate") deluge.common.open_url_in_browser("http://forum.deluge-torrent.org/") def on_menuitem_about_activate(self, data=None): log.debug("on_menuitem_about_activate") from .aboutdialog import AboutDialog AboutDialog().run() def on_menuitem_set_unlimited(self, widget): log.debug("widget name: %s", widget.get_name()) funcs = { "menuitem_down_speed": "max_download_speed", "menuitem_up_speed": "max_upload_speed", "menuitem_max_connections": "max_connections", "menuitem_upload_slots": "max_upload_slots", } if widget.get_name() in funcs: torrent_ids = component.get("TorrentView").get_selected_torrents() client.core.set_torrent_options(torrent_ids, {funcs[widget.get_name()]: -1}) def on_menuitem_set_other(self, widget): log.debug("widget name: %s", widget.get_name()) status_map = { "menuitem_down_speed": ["max_download_speed", "max_download_speed"], "menuitem_up_speed": ["max_upload_speed", "max_upload_speed"], "menuitem_max_connections": ["max_connections", "max_connections_global"], "menuitem_upload_slots": ["max_upload_slots", "max_upload_slots_global"], "menuitem_stop_seed_at_ratio": ["stop_ratio", "stop_seed_ratio"], } other_dialog_info = { "menuitem_down_speed": [ _("Download Speed Limit"), _("Set the maximum download speed"), _("KiB/s"), "downloading.svg", ], "menuitem_up_speed": [ _("Upload Speed Limit"), _("Set the maximum upload speed"), _("KiB/s"), "seeding.svg", ], "menuitem_max_connections": [ _("Incoming Connections"), _("Set the maximum incoming connections"), "", "network-transmit-receive-symbolic", ], "menuitem_upload_slots": [ _("Peer Upload Slots"), _("Set the maximum upload slots"), "", "view-sort-descending-symbolic", ], "menuitem_stop_seed_at_ratio": [ _("Stop Seed At Ratio"), "Stop torrent seeding at ratio", "", None, ], } core_key = status_map[widget.get_name()][0] core_key_global = status_map[widget.get_name()][1] def _on_torrent_status(status): other_dialog = other_dialog_info[widget.get_name()] # Add the default using status value if status: other_dialog.append(status[core_key_global]) def set_value(value): if value is not None: if value == 0: value += -1 options = {core_key: value} if core_key == "stop_ratio": options["stop_at_ratio"] = True client.core.set_torrent_options(torrent_ids, options) dialog = OtherDialog(*other_dialog) dialog.run().addCallback(set_value) torrent_ids = component.get("TorrentView").get_selected_torrents() if len(torrent_ids) == 1: core_key_global = core_key d = component.get("SessionProxy").get_torrent_status( torrent_ids[0], [core_key] ) else: d = client.core.get_config_values([core_key_global]) d.addCallback(_on_torrent_status) def on_menuitem_set_automanaged_on(self, widget): client.core.set_torrent_options( component.get("TorrentView").get_selected_torrents(), {"auto_managed": True} ) def on_menuitem_set_automanaged_off(self, widget): client.core.set_torrent_options( component.get("TorrentView").get_selected_torrents(), {"auto_managed": False}, ) def on_menuitem_set_stop_seed_at_ratio_disable(self, widget): client.core.set_torrent_options( component.get("TorrentView").get_selected_torrents(), {"stop_at_ratio": False}, ) def on_menuitem_sidebar_zero_toggled(self, widget): self.config["sidebar_show_zero"] = widget.get_active() component.get("FilterTreeView").update() def on_menuitem_sidebar_trackers_toggled(self, widget): self.config["sidebar_show_trackers"] = widget.get_active() component.get("FilterTreeView").update() def on_menuitem_sidebar_owners_toggled(self, widget): self.config["sidebar_show_owners"] = widget.get_active() component.get("FilterTreeView").update() def _on_known_accounts(self, known_accounts): known_accounts_to_log = [] for account in known_accounts: account_to_log = {} for key, value in account.copy().items(): if key == "password": value = "*" * 10 account_to_log[key] = value known_accounts_to_log.append(account_to_log) log.debug("_on_known_accounts: %s", known_accounts_to_log) if len(known_accounts) <= 1: return self.builder.get_object("menuitem_change_owner").set_visible(True) self.change_owner_submenu = Gtk.Menu() self.change_owner_submenu_items = {} maingroup = Gtk.RadioMenuItem() self.change_owner_submenu_items[None] = Gtk.RadioMenuItem(maingroup) for account in known_accounts: username = account["username"] item = Gtk.RadioMenuItem.new_with_label(maingroup, username) self.change_owner_submenu_items[username] = item self.change_owner_submenu.append(item) item.connect("toggled", self._on_change_owner_toggled, username) self.change_owner_submenu.show_all() self.change_owner_submenu_items[None].set_active(True) self.change_owner_submenu_items[None].hide() self.builder.get_object("menuitem_change_owner").connect( "activate", self._on_change_owner_submenu_active ) self.builder.get_object("menuitem_change_owner").set_submenu( self.change_owner_submenu ) def _on_known_accounts_fail(self, reason): self.builder.get_object("menuitem_change_owner").set_visible(False) def _on_change_owner_submenu_active(self, widget): log.debug("_on_change_owner_submenu_active") selected = component.get("TorrentView").get_selected_torrents() if len(selected) > 1: self.change_owner_submenu_items[None].set_active(True) return torrent_owner = component.get("TorrentView").get_torrent_status(selected[0])[ "owner" ] for username, item in self.change_owner_submenu_items.items(): item.set_active(username == torrent_owner) def _on_change_owner_toggled(self, widget, username): log.debug("_on_change_owner_toggled") update_torrents = [] selected = component.get("TorrentView").get_selected_torrents() for torrent_id in selected: torrent_status = component.get("TorrentView").get_torrent_status(torrent_id) if torrent_status["owner"] != username: update_torrents.append(torrent_id) if update_torrents: log.debug('Setting torrent owner "%s" on %s', username, update_torrents) def failed_change_owner(failure): ErrorDialog( _("Ownership Change Error"), _("There was an error while trying changing ownership."), self.mainwindow.window, details=failure.value.logable(), ).run() client.core.set_torrent_options( update_torrents, {"owner": username} ).addErrback(failed_change_owner)
pytz
tzfile
#!/usr/bin/env python """ $Id: tzfile.py,v 1.8 2004/06/03 00:15:24 zenzen Exp $ """ try: from cStringIO import StringIO except ImportError: from io import StringIO from datetime import datetime, timedelta from struct import calcsize, unpack from pytz.tzinfo import ( DstTzInfo, StaticTzInfo, memorized_datetime, memorized_timedelta, memorized_ttinfo, ) def _byte_string(s): """Cast a string or byte string to an ASCII byte string.""" return s.encode("US-ASCII") _NULL = _byte_string("\0") def _std_string(s): """Cast a string or byte string to an ASCII string.""" return str(s.decode("US-ASCII")) def build_tzinfo(zone, fp): head_fmt = ">4s c 15x 6l" head_size = calcsize(head_fmt) ( magic, format, ttisgmtcnt, ttisstdcnt, leapcnt, timecnt, typecnt, charcnt, ) = unpack(head_fmt, fp.read(head_size)) # Make sure it is a tzfile(5) file assert magic == _byte_string("TZif"), "Got magic %s" % repr(magic) # Read out the transition times, localtime indices and ttinfo structures. data_fmt = ">%(timecnt)dl %(timecnt)dB %(ttinfo)s %(charcnt)ds" % dict( timecnt=timecnt, ttinfo="lBB" * typecnt, charcnt=charcnt ) data_size = calcsize(data_fmt) data = unpack(data_fmt, fp.read(data_size)) # make sure we unpacked the right number of values assert len(data) == 2 * timecnt + 3 * typecnt + 1 transitions = [memorized_datetime(trans) for trans in data[:timecnt]] lindexes = list(data[timecnt : 2 * timecnt]) ttinfo_raw = data[2 * timecnt : -1] tznames_raw = data[-1] del data # Process ttinfo into separate structs ttinfo = [] tznames = {} i = 0 while i < len(ttinfo_raw): # have we looked up this timezone name yet? tzname_offset = ttinfo_raw[i + 2] if tzname_offset not in tznames: nul = tznames_raw.find(_NULL, tzname_offset) if nul < 0: nul = len(tznames_raw) tznames[tzname_offset] = _std_string(tznames_raw[tzname_offset:nul]) ttinfo.append((ttinfo_raw[i], bool(ttinfo_raw[i + 1]), tznames[tzname_offset])) i += 3 # Now build the timezone object if len(transitions) == 0: ttinfo[0][0], ttinfo[0][2] cls = type( zone, (StaticTzInfo,), dict( zone=zone, _utcoffset=memorized_timedelta(ttinfo[0][0]), _tzname=ttinfo[0][2], ), ) else: # Early dates use the first standard time ttinfo i = 0 while ttinfo[i][1]: i += 1 if ttinfo[i] == ttinfo[lindexes[0]]: transitions[0] = datetime.min else: transitions.insert(0, datetime.min) lindexes.insert(0, i) # calculate transition info transition_info = [] for i in range(len(transitions)): inf = ttinfo[lindexes[i]] utcoffset = inf[0] if not inf[1]: dst = 0 else: for j in range(i - 1, -1, -1): prev_inf = ttinfo[lindexes[j]] if not prev_inf[1]: break dst = inf[0] - prev_inf[0] # dst offset # Bad dst? Look further. DST > 24 hours happens when # a timzone has moved across the international dateline. if dst <= 0 or dst > 3600 * 3: for j in range(i + 1, len(transitions)): stdinf = ttinfo[lindexes[j]] if not stdinf[1]: dst = inf[0] - stdinf[0] if dst > 0: break # Found a useful std time. tzname = inf[2] # Round utcoffset and dst to the nearest minute or the # datetime library will complain. Conversions to these timezones # might be up to plus or minus 30 seconds out, but it is # the best we can do. utcoffset = int((utcoffset + 30) // 60) * 60 dst = int((dst + 30) // 60) * 60 transition_info.append(memorized_ttinfo(utcoffset, dst, tzname)) cls = type( zone, (DstTzInfo,), dict( zone=zone, _utc_transition_times=transitions, _transition_info=transition_info, ), ) return cls() if __name__ == "__main__": import os.path from pprint import pprint base = os.path.join(os.path.dirname(__file__), "zoneinfo") tz = build_tzinfo( "Australia/Melbourne", open(os.path.join(base, "Australia", "Melbourne"), "rb") ) tz = build_tzinfo("US/Eastern", open(os.path.join(base, "US", "Eastern"), "rb")) pprint(tz._utc_transition_times) # print tz.asPython(4) # print tz.transitions_mapping
negotiate
server_raw
# neubot/negotiate/server_raw.py # # Copyright (c) 2011-2012 # Nexa Center for Internet & Society, Politecnico di Torino (DAUIN) # and Simone Basso <bassosimone@gmail.com> # # This file is part of Neubot <http://www.neubot.org/>. # # Neubot is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Neubot is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Neubot. If not, see <http://www.gnu.org/licenses/>. # """ Server-side raw-test negotiate and collect. """ # Adapted from neubot/negotiate/server_bittorrent.py # To be renamed neubot/raw_negotiate_srvr.py import hashlib import logging from neubot.backend import BACKEND from neubot.negotiate.server import NegotiateServerModule class NegotiateServerRaw(NegotiateServerModule): """Negotiator for RAW test""" def __init__(self): NegotiateServerModule.__init__(self) self.peers = {} @staticmethod def _stream_to_sha512(stream): """Stream to SHA512 identifier""" sha512 = hashlib.new("sha512") sha512.update(str(hash(stream))) return sha512.digest() def unchoke(self, stream, request_body): """Invoked when we must unchoke a session""" sha512 = self._stream_to_sha512(stream) if sha512 not in self.peers: # Create record for this stream self.peers[sha512] = {} logging.debug("negotiate_server_raw: add sha512: %s", sha512.encode("hex")) stream.atclose(self._update_peers) return {"authorization": sha512.encode("hex"), "port": 12345} else: raise RuntimeError("negotiate_server_raw: multiple unchoke") def collect(self, stream, request_body): """Invoked when we must save the result of a session""" sha512 = self._stream_to_sha512(stream) if sha512 not in self.peers: raise RuntimeError("negotiate_server_raw: not authorized") else: result = self.peers[sha512] # Note: no more than one collect per session del self.peers[sha512] logging.debug( "negotiate_server_raw: del sha512 OK: %s", sha512.encode("hex") ) complete_result = {"client": request_body, "server": result} BACKEND.store_raw(complete_result) return result def _update_peers(self, stream, ignored): """Invoked when a session has been closed""" # Note: if collect is successful self.peers[sha512] doesn't exist sha512 = self._stream_to_sha512(stream) if sha512 in self.peers: logging.warning( "negotiate_server_raw: del sha512 unexpected: %s", sha512.encode("hex") ) del self.peers[sha512] NEGOTIATE_SERVER_RAW = NegotiateServerRaw()
deluge-execute
gtkui
# # Copyright (C) 2009 Damien Churchill <damoxc@gmail.com> # # This file is part of Deluge and is licensed under GNU General Public License 3.0, or later, with # the additional special exception to link portions of this program with the OpenSSL library. # See LICENSE for more details. # import logging import gi # isort:skip (Required before Gtk import). gi.require_version("Gtk", "3.0") # isort:imports-thirdparty import deluge.component as component from deluge.plugins.pluginbase import Gtk3PluginBase from deluge.ui.client import client from gi.repository import Gtk # isort:imports-firstparty # isort:imports-localfolder from . import common log = logging.getLogger(__name__) EXECUTE_ID = 0 EXECUTE_EVENT = 1 EXECUTE_COMMAND = 2 EVENT_MAP = { "complete": _("Torrent Complete"), "added": _("Torrent Added"), "removed": _("Torrent Removed"), } EVENTS = ["complete", "added", "removed"] class ExecutePreferences: def __init__(self, plugin): self.plugin = plugin def load(self): log.debug("Adding Execute Preferences page") self.builder = Gtk.Builder() self.builder.add_from_file(common.get_resource("execute_prefs.ui")) self.builder.connect_signals(self) events = self.builder.get_object("event_combobox") store = Gtk.ListStore(str, str) for event in EVENTS: event_label = EVENT_MAP[event] store.append((event_label, event)) events.set_model(store) events.set_active(0) self.plugin.add_preferences_page( _("Execute"), self.builder.get_object("execute_box") ) self.plugin.register_hook("on_show_prefs", self.load_commands) self.plugin.register_hook("on_apply_prefs", self.on_apply_prefs) self.load_commands() client.register_event_handler( "ExecuteCommandAddedEvent", self.on_command_added_event ) client.register_event_handler( "ExecuteCommandRemovedEvent", self.on_command_removed_event ) def unload(self): self.plugin.remove_preferences_page(_("Execute")) self.plugin.deregister_hook("on_apply_prefs", self.on_apply_prefs) self.plugin.deregister_hook("on_show_prefs", self.load_commands) def add_command(self, command_id, event, command): log.debug("Adding command `%s`", command_id) vbox = self.builder.get_object("commands_vbox") hbox = Gtk.Box.new(Gtk.Orientation.HORIZONTAL, spacing=5) hbox.set_name(command_id + "_" + event) label = Gtk.Label(EVENT_MAP[event]) entry = Gtk.Entry() entry.set_text(command) button = Gtk.Button() button.set_name("remove_%s" % command_id) button.connect("clicked", self.on_remove_button_clicked) img = Gtk.Image() img.set_from_stock(Gtk.STOCK_REMOVE, Gtk.IconSize.BUTTON) button.set_image(img) hbox.pack_start(label, False, False, 0) hbox.pack_start(entry, False, False, 0) hbox.pack_start(button, True, True, 0) hbox.show_all() vbox.pack_start(hbox, True, True, 0) def remove_command(self, command_id): vbox = self.builder.get_object("commands_vbox") children = vbox.get_children() for child in children: if child.get_name().split("_")[0] == command_id: vbox.remove(child) break def clear_commands(self): vbox = self.builder.get_object("commands_vbox") children = vbox.get_children() for child in children: vbox.remove(child) def load_commands(self): def on_get_commands(commands): self.clear_commands() log.debug("on_get_commands: %s", commands) for command in commands: command_id, event, command = command self.add_command(command_id, event, command) client.execute.get_commands().addCallback(on_get_commands) def on_add_button_clicked(self, *args): command = self.builder.get_object("command_entry").get_text() events = self.builder.get_object("event_combobox") event = events.get_model()[events.get_active()][1] client.execute.add_command(event, command) def on_remove_button_clicked(self, widget, *args): command_id = widget.get_name().replace("remove_", "") client.execute.remove_command(command_id) def on_apply_prefs(self): vbox = self.builder.get_object("commands_vbox") children = vbox.get_children() for child in children: command_id, event = child.get_name().split("_") for widget in child.get_children(): if isinstance(widget, Gtk.Entry): command = widget.get_text() client.execute.save_command(command_id, event, command) def on_command_added_event(self, command_id, event, command): log.debug("Adding command %s: %s", event, command) self.add_command(command_id, event, command) def on_command_removed_event(self, command_id): log.debug("Removing command %s", command_id) self.remove_command(command_id) class GtkUI(Gtk3PluginBase): def enable(self): self.plugin = component.get("PluginManager") self.preferences = ExecutePreferences(self.plugin) self.preferences.load() def disable(self): self.preferences.unload()
utilities
notifier
import inspect import logging from asyncio import AbstractEventLoop from collections import defaultdict from threading import Lock from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar, cast FuncT = TypeVar("FuncT", bound=Callable[..., Any]) class NotifierError(Exception): pass class Notifier: """ Allows communication between different Tribler modules and components. With Notifier, you can subscribe observer to a topic and receive notifications. The topic is a function, and the observer should be a callable with the same signature. Notifier is statically typed - if an observer has an incorrect signature or notification is called with wrong arguments, you should get a TypeError. PyCharm also should highlight incorrect observer registration and incorrect topic invocation. An example of usage: First, you need to create a Notifier instance. You can pass an event loop if the notifier should be able to process notifications asynchronously. >>> import asyncio >>> notifier = Notifier(loop=asyncio.get_event_loop()) A topic is a function with an arbitrary signature. Usually, it has an empty body (a pass statement) but can include a debug code as well. It is called when notification is sent to observers. >>> def topic(foo: int, bar: str): ... print("Notification is sent!") ... An observer should have the same signature as the topic (the return type is ignored for convenience). It may be a bound method of an object, in that case the `self` argument is also ignored. >>> def observer(foo: int, bar: str): ... print("Observer called with", foo, bar) ... >>> def second_observer(foo: int, bar: str): ... print("Second observer called with", foo, bar) ... To connect an observer to a specific notification, you can use the `add_observer` method. The method checks that the topic and the observer have the same signature. >>> notifier.add_observer(topic, observer) Observers can be registered as synchronous or asynchronous. Synchronous observers are called immediately, and asynchronous observers are called in the subsequent event loop iterations. By default, the observer is asynchronous if the notifier was initialized with an event loop. You can explicitly specify if the observer is synchronous or not: >>> notifier.add_observer(topic, second_observer, synchronous=True) To call observers for a specific topic in a type-safe manner, use square brackets syntax. If you are not aware what arguments should be used for specific topic, in IDE you can click on the topic function name and jump to the function signature. >>> notifier[topic](123, "abc") >>> notifier[topic](foo=123, bar="abc") When you invoke a notifier, all observers for the topic receive notification in the order as they were registered (synchronous observers first, then asynchronous). As an alternative, you can use the `notify` method, but without static type checks: >>> notifier.notify(topic, foo=123, bar="abc") The last way to invoke notification is by a topic function name. It can be useful when writing generic code. To be able to call the topic in this manner, it should have at least one observer: >>> notifier.notify_by_topic_name("topic", foo=123, bar="abc") You can also register a generic observer, receiving notifications for any topic. It will receive the topic as a first argument. When notification is called, generic observers are called before topic-specific observers in the same order as they were registered: >>> def generic_observer(topic, *args, **kwargs): ... print("Generic observer called for", topic.__name__, "with", args, kwargs) ... >>> notifier.add_generic_observer(generic_observer) You can remove an observer or generic observer by calling the corresponding method: >>> notifier.remove_observer(observer) >>> notifier.remove_generic_observer(generic_observer) In Tribler, both Core and GUI have notifiers. Tribler uses generic observer to retranslate a subset of topics from Core to GUI. Core notifier is attached to the event loop and processes most topics asynchronously. GUI does not have an event loop, so GUI notifier processes retranslated topics synchronously. Basically, GUI notifier fires corresponding Qt signal for each topic. EventsEndpoint in Core and EventsRequestManager in GUI implement this logic of retranslation. EventsEndpoint adds a generic observer that listens to all topics, serializes a subset of notification calls to JSON, and sends them to GUI. EventRequestManager receives messages, deserializes arguments and calls `notifier.notify_by_topic_name`. """ def __init__(self, loop: AbstractEventLoop = None): self.lock = Lock() self.logger = logging.getLogger(self.__class__.__name__) self.topics_by_name: Dict[str, Callable] = {} self.unknown_topic_names = set() # We use the dict type for `self.observers` and `set.generic_observers` instead of the set type to provide # the deterministic ordering of callbacks. In Python, dictionaries are ordered while sets aren't. # Therefore, `value: bool` here is unnecessary and is never used. self.topics: Dict[Callable, Dict[Callable, bool]] = defaultdict(dict) self.generic_observers: Dict[Callable, bool] = {} self.interceptors: Dict[Callable, bool] = {} # @ichorid: # We have to store the event loop in constructor. Otherwise, get_event_loop() cannot find # the original event loop when scheduling notifications from the external thread. self.loop = loop def add_observer( self, topic: FuncT, observer: FuncT, synchronous: Optional[bool] = None ): """Add the observer for the topic. Each callback will be added no more than once. Callbacks are called in the same order as they were added. topic: A callable which represents a "topic" to subscribe observer: A callable which will be actually called when notification is sent to the topic synchronous: A strategy of how to call the observer. If True, """ synchronous = self._check_synchronous(synchronous) empty = inspect._empty # pylint: disable=protected-access # ignore types of return values, as during the notification call the return values are ignored topic_signature = inspect.signature(topic).replace(return_annotation=empty) callback_signature = inspect.signature(observer).replace( return_annotation=empty ) if topic_signature != callback_signature: raise TypeError( f'Cannot add observer {observer!r} to topic "{topic.__name__}": ' f"the callback signature {callback_signature} does not match " f"the topic signature {topic_signature}" ) if inspect.iscoroutinefunction(topic): raise TypeError(f"Topic cannot be a coroutine function. Got: {topic!r}") if inspect.iscoroutinefunction(observer): raise TypeError( f"Observer cannot be a coroutine function. Got: {observer!r}" ) if topic is observer: raise TypeError( f"Topic and observer cannot be the same function. Got: {topic!r}" ) self.logger.debug(f"Add observer topic {topic.__name__}") with self.lock: topic_name: str = topic.__name__ prev_topic = self.topics_by_name.setdefault(topic_name, topic) if prev_topic is not topic: raise NotifierError( f"Cannot register topic {topic!r} because topic name {topic_name} is already taken " f"by another topic {prev_topic!r}" ) prev_synchronous = self.topics[topic].setdefault(observer, synchronous) if prev_synchronous != synchronous: raise NotifierError( "Cannot register the same observer with a different value of `synchronous` option" ) def _check_synchronous(self, synchronous: Optional[bool]) -> bool: if not any(synchronous is option for option in (True, False, None)): raise TypeError( f"`synchronous` option may be True, False or None. Got: {synchronous!r}" ) if synchronous is False and self.loop is None: raise TypeError( "synchronous=False option cannot be specified for a notifier without an event loop" ) if synchronous is None: synchronous = not self.loop return synchronous def remove_observer(self, topic: FuncT, observer: FuncT): """Remove the observer from the topic. In the case of a missed callback no error will be raised.""" with self.lock: observers = self.topics[topic] observers.pop(observer, None) comment = ( "" if not observers else f" (it still has {len(observers)} observers)" ) self.logger.debug( f"Remove observer {observer!r} from topic {topic.__name__}" + comment ) def add_generic_observer( self, observer: Callable, synchronous: Optional[bool] = None ): self.logger.debug(f"Add generic observer {observer!r}") with self.lock: self.generic_observers[observer] = self._check_synchronous(synchronous) def remove_generic_observer(self, observer: Callable): with self.lock: self.generic_observers.pop(observer, None) self.logger.debug(f"Remove generic observer {observer!r}") def __getitem__(self, topic: FuncT) -> FuncT: def wrapper(*args, **kwargs): self.notify(topic, *args, **kwargs) return cast(FuncT, wrapper) def notify_by_topic_name(self, topic_name: str, *args, **kwargs): with self.lock: topic = self.topics_by_name.get(topic_name) if topic is None: if topic_name not in self.unknown_topic_names: self.unknown_topic_names.add(topic_name) self.logger.warning(f"Topic with name `{topic_name}` not found") else: self.notify(topic, *args, **kwargs) def notify(self, topic: Callable, *args, **kwargs): """Notify all observers about the topic. Сan be called from any thread. Observers will be called from the reactor thread during the next iteration of the event loop. An exception when an observer is invoked will not affect other observers. """ self.logger.debug(f"Notification for topic {topic.__name__}") topic(*args, **kwargs) with self.lock: generic_observers: List[Tuple[Callable, bool]] = list( self.generic_observers.items() ) observers: List[Tuple[Callable, bool]] = list(self.topics[topic].items()) generic_observer_args = (topic,) + args for observer, synchronous in generic_observers: if synchronous: self._notify(topic, observer, generic_observer_args, kwargs) else: self._notify_threadsafe(topic, observer, generic_observer_args, kwargs) for observer, synchronous in observers: if synchronous: self._notify(topic, observer, args, kwargs) else: self._notify_threadsafe(topic, observer, args, kwargs) def _notify_threadsafe( self, topic: Callable, observer: Callable, args: Tuple, kwargs: Dict[str, Any] ): try: self.loop.call_soon_threadsafe(self._notify, topic, observer, args, kwargs) except RuntimeError as e: # Raises RuntimeError if called on a loop that’s been closed. # This can happen on a secondary thread when the main application is shutting down. # https://docs.python.org/3/library/asyncio-eventloop.html#asyncio.loop.call_soon_threadsafe self.logger.warning(e) def _notify(self, topic: Callable, observer: Callable, args: tuple, kwargs: dict): self.logger.debug(f"Calling observer {observer!r} for topic {topic.__name__}") try: observer(*args, **kwargs) except Exception as e: # pylint: disable=broad-except self.logger.exception(e)
bs4
testing
"""Helper classes for tests.""" # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. __license__ = "MIT" import copy import functools import pickle import unittest from unittest import TestCase from bs4 import BeautifulSoup from bs4.builder import HTMLParserTreeBuilder from bs4.element import ( CharsetMetaAttributeValue, Comment, ContentMetaAttributeValue, Doctype, SoupStrainer, ) default_builder = HTMLParserTreeBuilder class SoupTest(unittest.TestCase): @property def default_builder(self): return default_builder() def soup(self, markup, **kwargs): """Build a Beautiful Soup object from markup.""" builder = kwargs.pop("builder", self.default_builder) return BeautifulSoup(markup, builder=builder, **kwargs) def document_for(self, markup): """Turn an HTML fragment into a document. The details depend on the builder. """ return self.default_builder.test_fragment_to_document(markup) def assertSoupEquals(self, to_parse, compare_parsed_to=None): builder = self.default_builder obj = BeautifulSoup(to_parse, builder=builder) if compare_parsed_to is None: compare_parsed_to = to_parse self.assertEqual(obj.decode(), self.document_for(compare_parsed_to)) def assertConnectedness(self, element): """Ensure that next_element and previous_element are properly set for all descendants of the given element. """ earlier = None for e in element.descendants: if earlier: self.assertEqual(e, earlier.next_element) self.assertEqual(earlier, e.previous_element) earlier = e class HTMLTreeBuilderSmokeTest(object): """A basic test of a treebuilder's competence. Any HTML treebuilder, present or future, should be able to pass these tests. With invalid markup, there's room for interpretation, and different parsers can handle it differently. But with the markup in these tests, there's not much room for interpretation. """ def test_pickle_and_unpickle_identity(self): # Pickling a tree, then unpickling it, yields a tree identical # to the original. tree = self.soup("<a><b>foo</a>") dumped = pickle.dumps(tree, 2) loaded = pickle.loads(dumped) self.assertEqual(loaded.__class__, BeautifulSoup) self.assertEqual(loaded.decode(), tree.decode()) def assertDoctypeHandled(self, doctype_fragment): """Assert that a given doctype string is handled correctly.""" doctype_str, soup = self._document_with_doctype(doctype_fragment) # Make sure a Doctype object was created. doctype = soup.contents[0] self.assertEqual(doctype.__class__, Doctype) self.assertEqual(doctype, doctype_fragment) self.assertEqual(str(soup)[: len(doctype_str)], doctype_str) # Make sure that the doctype was correctly associated with the # parse tree and that the rest of the document parsed. self.assertEqual(soup.p.contents[0], "foo") def _document_with_doctype(self, doctype_fragment): """Generate and parse a document with the given doctype.""" doctype = "<!DOCTYPE %s>" % doctype_fragment markup = doctype + "\n<p>foo</p>" soup = self.soup(markup) return doctype, soup def test_normal_doctypes(self): """Make sure normal, everyday HTML doctypes are handled correctly.""" self.assertDoctypeHandled("html") self.assertDoctypeHandled( 'html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"' ) def test_empty_doctype(self): soup = self.soup("<!DOCTYPE>") doctype = soup.contents[0] self.assertEqual("", doctype.strip()) def test_public_doctype_with_url(self): doctype = 'html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"' self.assertDoctypeHandled(doctype) def test_system_doctype(self): self.assertDoctypeHandled('foo SYSTEM "http://www.example.com/"') def test_namespaced_system_doctype(self): # We can handle a namespaced doctype with a system ID. self.assertDoctypeHandled('xsl:stylesheet SYSTEM "htmlent.dtd"') def test_namespaced_public_doctype(self): # Test a namespaced doctype with a public id. self.assertDoctypeHandled('xsl:stylesheet PUBLIC "htmlent.dtd"') def test_real_xhtml_document(self): """A real XHTML document should come out more or less the same as it went in.""" markup = b"""<?xml version="1.0" encoding="utf-8"?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"> <html xmlns="http://www.w3.org/1999/xhtml"> <head><title>Hello.</title></head> <body>Goodbye.</body> </html>""" soup = self.soup(markup) self.assertEqual( soup.encode("utf-8").replace(b"\n", b""), markup.replace(b"\n", b"") ) def test_processing_instruction(self): # We test both Unicode and bytestring to verify that # process_markup correctly sets processing_instruction_class # even when the markup is already Unicode and there is no # need to process anything. markup = """<?PITarget PIContent?>""" soup = self.soup(markup) self.assertEqual(markup, soup.decode()) markup = b"""<?PITarget PIContent?>""" soup = self.soup(markup) self.assertEqual(markup, soup.encode("utf8")) def test_deepcopy(self): """Make sure you can copy the tree builder. This is important because the builder is part of a BeautifulSoup object, and we want to be able to copy that. """ copy.deepcopy(self.default_builder) def test_p_tag_is_never_empty_element(self): """A <p> tag is never designated as an empty-element tag. Even if the markup shows it as an empty-element tag, it shouldn't be presented that way. """ soup = self.soup("<p/>") self.assertFalse(soup.p.is_empty_element) self.assertEqual(str(soup.p), "<p></p>") def test_unclosed_tags_get_closed(self): """A tag that's not closed by the end of the document should be closed. This applies to all tags except empty-element tags. """ self.assertSoupEquals("<p>", "<p></p>") self.assertSoupEquals("<b>", "<b></b>") self.assertSoupEquals("<br>", "<br/>") def test_br_is_always_empty_element_tag(self): """A <br> tag is designated as an empty-element tag. Some parsers treat <br></br> as one <br/> tag, some parsers as two tags, but it should always be an empty-element tag. """ soup = self.soup("<br></br>") self.assertTrue(soup.br.is_empty_element) self.assertEqual(str(soup.br), "<br/>") def test_nested_formatting_elements(self): self.assertSoupEquals("<em><em></em></em>") def test_double_head(self): html = """<!DOCTYPE html> <html> <head> <title>Ordinary HEAD element test</title> </head> <script type="text/javascript"> alert("Help!"); </script> <body> Hello, world! </body> </html> """ soup = self.soup(html) self.assertEqual("text/javascript", soup.find("script")["type"]) def test_comment(self): # Comments are represented as Comment objects. markup = "<p>foo<!--foobar-->baz</p>" self.assertSoupEquals(markup) soup = self.soup(markup) comment = soup.find(text="foobar") self.assertEqual(comment.__class__, Comment) # The comment is properly integrated into the tree. foo = soup.find(text="foo") self.assertEqual(comment, foo.next_element) baz = soup.find(text="baz") self.assertEqual(comment, baz.previous_element) def test_preserved_whitespace_in_pre_and_textarea(self): """Whitespace must be preserved in <pre> and <textarea> tags, even if that would mean not prettifying the markup. """ pre_markup = "<pre> </pre>" textarea_markup = "<textarea> woo\nwoo </textarea>" self.assertSoupEquals(pre_markup) self.assertSoupEquals(textarea_markup) soup = self.soup(pre_markup) self.assertEqual(soup.pre.prettify(), pre_markup) soup = self.soup(textarea_markup) self.assertEqual(soup.textarea.prettify(), textarea_markup) soup = self.soup("<textarea></textarea>") self.assertEqual(soup.textarea.prettify(), "<textarea></textarea>") def test_nested_inline_elements(self): """Inline elements can be nested indefinitely.""" b_tag = "<b>Inside a B tag</b>" self.assertSoupEquals(b_tag) nested_b_tag = "<p>A <i>nested <b>tag</b></i></p>" self.assertSoupEquals(nested_b_tag) double_nested_b_tag = "<p>A <a>doubly <i>nested <b>tag</b></i></a></p>" self.assertSoupEquals(nested_b_tag) def test_nested_block_level_elements(self): """Block elements can be nested.""" soup = self.soup("<blockquote><p><b>Foo</b></p></blockquote>") blockquote = soup.blockquote self.assertEqual(blockquote.p.b.string, "Foo") self.assertEqual(blockquote.b.string, "Foo") def test_correctly_nested_tables(self): """One table can go inside another one.""" markup = ( '<table id="1">' "<tr>" "<td>Here's another table:" '<table id="2">' "<tr><td>foo</td></tr>" "</table></td>" ) self.assertSoupEquals( markup, '<table id="1"><tr><td>Here\'s another table:' '<table id="2"><tr><td>foo</td></tr></table>' "</td></tr></table>", ) self.assertSoupEquals( "<table><thead><tr><td>Foo</td></tr></thead>" "<tbody><tr><td>Bar</td></tr></tbody>" "<tfoot><tr><td>Baz</td></tr></tfoot></table>" ) def test_deeply_nested_multivalued_attribute(self): # html5lib can set the attributes of the same tag many times # as it rearranges the tree. This has caused problems with # multivalued attributes. markup = '<table><div><div class="css"></div></div></table>' soup = self.soup(markup) self.assertEqual(["css"], soup.div.div["class"]) def test_multivalued_attribute_on_html(self): # html5lib uses a different API to set the attributes ot the # <html> tag. This has caused problems with multivalued # attributes. markup = '<html class="a b"></html>' soup = self.soup(markup) self.assertEqual(["a", "b"], soup.html["class"]) def test_angle_brackets_in_attribute_values_are_escaped(self): self.assertSoupEquals('<a b="<a>"></a>', '<a b="&lt;a&gt;"></a>') def test_entities_in_attributes_converted_to_unicode(self): expect = '<p id="pi\N{LATIN SMALL LETTER N WITH TILDE}ata"></p>' self.assertSoupEquals('<p id="pi&#241;ata"></p>', expect) self.assertSoupEquals('<p id="pi&#xf1;ata"></p>', expect) self.assertSoupEquals('<p id="pi&#Xf1;ata"></p>', expect) self.assertSoupEquals('<p id="pi&ntilde;ata"></p>', expect) def test_entities_in_text_converted_to_unicode(self): expect = "<p>pi\N{LATIN SMALL LETTER N WITH TILDE}ata</p>" self.assertSoupEquals("<p>pi&#241;ata</p>", expect) self.assertSoupEquals("<p>pi&#xf1;ata</p>", expect) self.assertSoupEquals("<p>pi&#Xf1;ata</p>", expect) self.assertSoupEquals("<p>pi&ntilde;ata</p>", expect) def test_quot_entity_converted_to_quotation_mark(self): self.assertSoupEquals( "<p>I said &quot;good day!&quot;</p>", '<p>I said "good day!"</p>' ) def test_out_of_range_entity(self): expect = "\N{REPLACEMENT CHARACTER}" self.assertSoupEquals("&#10000000000000;", expect) self.assertSoupEquals("&#x10000000000000;", expect) self.assertSoupEquals("&#1000000000;", expect) def test_multipart_strings(self): "Mostly to prevent a recurrence of a bug in the html5lib treebuilder." soup = self.soup("<html><h2>\nfoo</h2><p></p></html>") self.assertEqual("p", soup.h2.string.next_element.name) self.assertEqual("p", soup.p.name) self.assertConnectedness(soup) def test_head_tag_between_head_and_body(self): "Prevent recurrence of a bug in the html5lib treebuilder." content = """<html><head></head> <link></link> <body>foo</body> </html> """ soup = self.soup(content) self.assertNotEqual(None, soup.html.body) self.assertConnectedness(soup) def test_multiple_copies_of_a_tag(self): "Prevent recurrence of a bug in the html5lib treebuilder." content = """<!DOCTYPE html> <html> <body> <article id="a" > <div><a href="1"></div> <footer> <a href="2"></a> </footer> </article> </body> </html> """ soup = self.soup(content) self.assertConnectedness(soup.article) def test_basic_namespaces(self): """Parsers don't need to *understand* namespaces, but at the very least they should not choke on namespaces or lose data.""" markup = b'<html xmlns="http://www.w3.org/1999/xhtml" xmlns:mathml="http://www.w3.org/1998/Math/MathML" xmlns:svg="http://www.w3.org/2000/svg"><head></head><body><mathml:msqrt>4</mathml:msqrt><b svg:fill="red"></b></body></html>' soup = self.soup(markup) self.assertEqual(markup, soup.encode()) html = soup.html self.assertEqual("http://www.w3.org/1999/xhtml", soup.html["xmlns"]) self.assertEqual( "http://www.w3.org/1998/Math/MathML", soup.html["xmlns:mathml"] ) self.assertEqual("http://www.w3.org/2000/svg", soup.html["xmlns:svg"]) def test_multivalued_attribute_value_becomes_list(self): markup = b'<a class="foo bar">' soup = self.soup(markup) self.assertEqual(["foo", "bar"], soup.a["class"]) # # Generally speaking, tests below this point are more tests of # Beautiful Soup than tests of the tree builders. But parsers are # weird, so we run these tests separately for every tree builder # to detect any differences between them. # def test_can_parse_unicode_document(self): # A seemingly innocuous document... but it's in Unicode! And # it contains characters that can't be represented in the # encoding found in the declaration! The horror! markup = '<html><head><meta encoding="euc-jp"></head><body>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</body>' soup = self.soup(markup) self.assertEqual("Sacr\xe9 bleu!", soup.body.string) def test_soupstrainer(self): """Parsers should be able to work with SoupStrainers.""" strainer = SoupStrainer("b") soup = self.soup("A <b>bold</b> <meta/> <i>statement</i>", parse_only=strainer) self.assertEqual(soup.decode(), "<b>bold</b>") def test_single_quote_attribute_values_become_double_quotes(self): self.assertSoupEquals("<foo attr='bar'></foo>", '<foo attr="bar"></foo>') def test_attribute_values_with_nested_quotes_are_left_alone(self): text = """<foo attr='bar "brawls" happen'>a</foo>""" self.assertSoupEquals(text) def test_attribute_values_with_double_nested_quotes_get_quoted(self): text = """<foo attr='bar "brawls" happen'>a</foo>""" soup = self.soup(text) soup.foo["attr"] = 'Brawls happen at "Bob\'s Bar"' self.assertSoupEquals( soup.foo.decode(), """<foo attr="Brawls happen at &quot;Bob\'s Bar&quot;">a</foo>""", ) def test_ampersand_in_attribute_value_gets_escaped(self): self.assertSoupEquals( '<this is="really messed up & stuff"></this>', '<this is="really messed up &amp; stuff"></this>', ) self.assertSoupEquals( '<a href="http://example.org?a=1&b=2;3">foo</a>', '<a href="http://example.org?a=1&amp;b=2;3">foo</a>', ) def test_escaped_ampersand_in_attribute_value_is_left_alone(self): self.assertSoupEquals('<a href="http://example.org?a=1&amp;b=2;3"></a>') def test_entities_in_strings_converted_during_parsing(self): # Both XML and HTML entities are converted to Unicode characters # during parsing. text = "<p>&lt;&lt;sacr&eacute;&#32;bleu!&gt;&gt;</p>" expected = ( "<p>&lt;&lt;sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!&gt;&gt;</p>" ) self.assertSoupEquals(text, expected) def test_smart_quotes_converted_on_the_way_in(self): # Microsoft smart quotes are converted to Unicode characters during # parsing. quote = b"<p>\x91Foo\x92</p>" soup = self.soup(quote) self.assertEqual( soup.p.string, "\N{LEFT SINGLE QUOTATION MARK}Foo\N{RIGHT SINGLE QUOTATION MARK}", ) def test_non_breaking_spaces_converted_on_the_way_in(self): soup = self.soup("<a>&nbsp;&nbsp;</a>") self.assertEqual(soup.a.string, "\N{NO-BREAK SPACE}" * 2) def test_entities_converted_on_the_way_out(self): text = "<p>&lt;&lt;sacr&eacute;&#32;bleu!&gt;&gt;</p>" expected = "<p>&lt;&lt;sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!&gt;&gt;</p>".encode( "utf-8" ) soup = self.soup(text) self.assertEqual(soup.p.encode("utf-8"), expected) def test_real_iso_latin_document(self): # Smoke test of interrelated functionality, using an # easy-to-understand document. # Here it is in Unicode. Note that it claims to be in ISO-Latin-1. unicode_html = '<html><head><meta content="text/html; charset=ISO-Latin-1" http-equiv="Content-type"/></head><body><p>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</p></body></html>' # That's because we're going to encode it into ISO-Latin-1, and use # that to test. iso_latin_html = unicode_html.encode("iso-8859-1") # Parse the ISO-Latin-1 HTML. soup = self.soup(iso_latin_html) # Encode it to UTF-8. result = soup.encode("utf-8") # What do we expect the result to look like? Well, it would # look like unicode_html, except that the META tag would say # UTF-8 instead of ISO-Latin-1. expected = unicode_html.replace("ISO-Latin-1", "utf-8") # And, of course, it would be in UTF-8, not Unicode. expected = expected.encode("utf-8") # Ta-da! self.assertEqual(result, expected) def test_real_shift_jis_document(self): # Smoke test to make sure the parser can handle a document in # Shift-JIS encoding, without choking. shift_jis_html = ( b"<html><head></head><body><pre>" b"\x82\xb1\x82\xea\x82\xcdShift-JIS\x82\xc5\x83R\x81[\x83f" b"\x83B\x83\x93\x83O\x82\xb3\x82\xea\x82\xbd\x93\xfa\x96{\x8c" b"\xea\x82\xcc\x83t\x83@\x83C\x83\x8b\x82\xc5\x82\xb7\x81B" b"</pre></body></html>" ) unicode_html = shift_jis_html.decode("shift-jis") soup = self.soup(unicode_html) # Make sure the parse tree is correctly encoded to various # encodings. self.assertEqual(soup.encode("utf-8"), unicode_html.encode("utf-8")) self.assertEqual(soup.encode("euc_jp"), unicode_html.encode("euc_jp")) def test_real_hebrew_document(self): # A real-world test to make sure we can convert ISO-8859-9 (a # Hebrew encoding) to UTF-8. hebrew_document = b"<html><head><title>Hebrew (ISO 8859-8) in Visual Directionality</title></head><body><h1>Hebrew (ISO 8859-8) in Visual Directionality</h1>\xed\xe5\xec\xf9</body></html>" soup = self.soup(hebrew_document, from_encoding="iso8859-8") # Some tree builders call it iso8859-8, others call it iso-8859-9. # That's not a difference we really care about. assert soup.original_encoding in ("iso8859-8", "iso-8859-8") self.assertEqual( soup.encode("utf-8"), hebrew_document.decode("iso8859-8").encode("utf-8") ) def test_meta_tag_reflects_current_encoding(self): # Here's the <meta> tag saying that a document is # encoded in Shift-JIS. meta_tag = ( '<meta content="text/html; charset=x-sjis" ' 'http-equiv="Content-type"/>' ) # Here's a document incorporating that meta tag. shift_jis_html = ( "<html><head>\n%s\n" '<meta http-equiv="Content-language" content="ja"/>' "</head><body>Shift-JIS markup goes here." ) % meta_tag soup = self.soup(shift_jis_html) # Parse the document, and the charset is seemingly unaffected. parsed_meta = soup.find("meta", {"http-equiv": "Content-type"}) content = parsed_meta["content"] self.assertEqual("text/html; charset=x-sjis", content) # But that value is actually a ContentMetaAttributeValue object. self.assertTrue(isinstance(content, ContentMetaAttributeValue)) # And it will take on a value that reflects its current # encoding. self.assertEqual("text/html; charset=utf8", content.encode("utf8")) # For the rest of the story, see TestSubstitutions in # test_tree.py. def test_html5_style_meta_tag_reflects_current_encoding(self): # Here's the <meta> tag saying that a document is # encoded in Shift-JIS. meta_tag = '<meta id="encoding" charset="x-sjis" />' # Here's a document incorporating that meta tag. shift_jis_html = ( "<html><head>\n%s\n" '<meta http-equiv="Content-language" content="ja"/>' "</head><body>Shift-JIS markup goes here." ) % meta_tag soup = self.soup(shift_jis_html) # Parse the document, and the charset is seemingly unaffected. parsed_meta = soup.find("meta", id="encoding") charset = parsed_meta["charset"] self.assertEqual("x-sjis", charset) # But that value is actually a CharsetMetaAttributeValue object. self.assertTrue(isinstance(charset, CharsetMetaAttributeValue)) # And it will take on a value that reflects its current # encoding. self.assertEqual("utf8", charset.encode("utf8")) def test_tag_with_no_attributes_can_have_attributes_added(self): data = self.soup("<a>text</a>") data.a["foo"] = "bar" self.assertEqual('<a foo="bar">text</a>', data.a.decode()) class XMLTreeBuilderSmokeTest(object): def test_pickle_and_unpickle_identity(self): # Pickling a tree, then unpickling it, yields a tree identical # to the original. tree = self.soup("<a><b>foo</a>") dumped = pickle.dumps(tree, 2) loaded = pickle.loads(dumped) self.assertEqual(loaded.__class__, BeautifulSoup) self.assertEqual(loaded.decode(), tree.decode()) def test_docstring_generated(self): soup = self.soup("<root/>") self.assertEqual( soup.encode(), b'<?xml version="1.0" encoding="utf-8"?>\n<root/>' ) def test_xml_declaration(self): markup = b"""<?xml version="1.0" encoding="utf8"?>\n<foo/>""" soup = self.soup(markup) self.assertEqual(markup, soup.encode("utf8")) def test_processing_instruction(self): markup = b"""<?xml version="1.0" encoding="utf8"?>\n<?PITarget PIContent?>""" soup = self.soup(markup) self.assertEqual(markup, soup.encode("utf8")) def test_real_xhtml_document(self): """A real XHTML document should come out *exactly* the same as it went in.""" markup = b"""<?xml version="1.0" encoding="utf-8"?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"> <html xmlns="http://www.w3.org/1999/xhtml"> <head><title>Hello.</title></head> <body>Goodbye.</body> </html>""" soup = self.soup(markup) self.assertEqual(soup.encode("utf-8"), markup) def test_formatter_processes_script_tag_for_xml_documents(self): doc = """ <script type="text/javascript"> </script> """ soup = BeautifulSoup(doc, "lxml-xml") # lxml would have stripped this while parsing, but we can add # it later. soup.script.string = 'console.log("< < hey > > ");' encoded = soup.encode() self.assertTrue(b"&lt; &lt; hey &gt; &gt;" in encoded) def test_can_parse_unicode_document(self): markup = '<?xml version="1.0" encoding="euc-jp"><root>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</root>' soup = self.soup(markup) self.assertEqual("Sacr\xe9 bleu!", soup.root.string) def test_popping_namespaced_tag(self): markup = '<rss xmlns:dc="foo"><dc:creator>b</dc:creator><dc:date>2012-07-02T20:33:42Z</dc:date><dc:rights>c</dc:rights></rss>' soup = self.soup(markup) self.assertEqual(unicode(soup.rss), markup) def test_docstring_includes_correct_encoding(self): soup = self.soup("<root/>") self.assertEqual( soup.encode("latin1"), b'<?xml version="1.0" encoding="latin1"?>\n<root/>' ) def test_large_xml_document(self): """A large XML document should come out the same as it went in.""" markup = ( b'<?xml version="1.0" encoding="utf-8"?>\n<root>' + b"0" * (2**12) + b"</root>" ) soup = self.soup(markup) self.assertEqual(soup.encode("utf-8"), markup) def test_tags_are_empty_element_if_and_only_if_they_are_empty(self): self.assertSoupEquals("<p>", "<p/>") self.assertSoupEquals("<p>foo</p>") def test_namespaces_are_preserved(self): markup = '<root xmlns:a="http://example.com/" xmlns:b="http://example.net/"><a:foo>This tag is in the a namespace</a:foo><b:foo>This tag is in the b namespace</b:foo></root>' soup = self.soup(markup) root = soup.root self.assertEqual("http://example.com/", root["xmlns:a"]) self.assertEqual("http://example.net/", root["xmlns:b"]) def test_closing_namespaced_tag(self): markup = '<p xmlns:dc="http://purl.org/dc/elements/1.1/"><dc:date>20010504</dc:date></p>' soup = self.soup(markup) self.assertEqual(unicode(soup.p), markup) def test_namespaced_attributes(self): markup = '<foo xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"><bar xsi:schemaLocation="http://www.example.com"/></foo>' soup = self.soup(markup) self.assertEqual(unicode(soup.foo), markup) def test_namespaced_attributes_xml_namespace(self): markup = '<foo xml:lang="fr">bar</foo>' soup = self.soup(markup) self.assertEqual(unicode(soup.foo), markup) class HTML5TreeBuilderSmokeTest(HTMLTreeBuilderSmokeTest): """Smoke test for a tree builder that supports HTML5.""" def test_real_xhtml_document(self): # Since XHTML is not HTML5, HTML5 parsers are not tested to handle # XHTML documents in any particular way. pass def test_html_tags_have_namespace(self): markup = "<a>" soup = self.soup(markup) self.assertEqual("http://www.w3.org/1999/xhtml", soup.a.namespace) def test_svg_tags_have_namespace(self): markup = "<svg><circle/></svg>" soup = self.soup(markup) namespace = "http://www.w3.org/2000/svg" self.assertEqual(namespace, soup.svg.namespace) self.assertEqual(namespace, soup.circle.namespace) def test_mathml_tags_have_namespace(self): markup = "<math><msqrt>5</msqrt></math>" soup = self.soup(markup) namespace = "http://www.w3.org/1998/Math/MathML" self.assertEqual(namespace, soup.math.namespace) self.assertEqual(namespace, soup.msqrt.namespace) def test_xml_declaration_becomes_comment(self): markup = '<?xml version="1.0" encoding="utf-8"?><html></html>' soup = self.soup(markup) self.assertTrue(isinstance(soup.contents[0], Comment)) self.assertEqual(soup.contents[0], '?xml version="1.0" encoding="utf-8"?') self.assertEqual("html", soup.contents[0].next_element.name) def skipIf(condition, reason): def nothing(test, *args, **kwargs): return None def decorator(test_item): if condition: return nothing else: return test_item return decorator
downloaders
PornhostCom
# -*- coding: utf-8 -*- import re from ..base.downloader import BaseDownloader class PornhostCom(BaseDownloader): __name__ = "PornhostCom" __type__ = "downloader" __version__ = "0.27" __status__ = "testing" __pattern__ = r"https?://(?:www\.)?pornhost\.com/\d+" __config__ = [("enabled", "bool", "Activated", True)] __description__ = """Pornhost.com downloader plugin""" __license__ = "GPLv3" __authors__ = [ ("jeix", "jeix@hasnomail.de"), ("GammaC0de", "nitzo2001[AT}yahoo[DOT]com"), ] NAME_PATTERN = r'class="video-title">(.+?)<' LINK_PATTERN = ( r'<source src="(https://cdn\d+-dl.pornhost.com/.+)" type="video/mp4">' ) OFFLINE_PATTERN = r">Gallery not found<" def process(self, pyfile): self.data = self.load(pyfile.url) if re.search(self.OFFLINE_PATTERN, self.data) is not None: self.offline() m = re.search(self.NAME_PATTERN, self.data) if m is None: self.error(self._("name pattern not found")) pyfile.name = m.group(1) + ".mp4" m = re.search(self.LINK_PATTERN, self.data) if m is None: self.error(self._("link pattern not found")) self.download(m.group(1))
Translate
Translate
import html import inspect import json import logging import os import re import string from Config import config translates = [] class EscapeProxy(dict): # Automatically escape the accessed string values def __getitem__(self, key): val = dict.__getitem__(self, key) if type(val) in (str, str): return html.escape(val) elif type(val) is dict: return EscapeProxy(val) elif type(val) is list: return EscapeProxy(enumerate(val)) # Convert lists to dict else: return val class Translate(dict): def __init__(self, lang_dir=None, lang=None): if not lang_dir: lang_dir = os.path.dirname(__file__) + "/languages/" if not lang: lang = config.language self.lang = lang self.lang_dir = lang_dir self.setLanguage(lang) self.formatter = string.Formatter() if config.debug: # Auto reload FileRequest on change from Debug import DebugReloader DebugReloader.watcher.addCallback(self.load) translates.append(self) def setLanguage(self, lang): self.lang = re.sub("[^a-z-]", "", lang) self.lang_file = self.lang_dir + "%s.json" % lang self.load() def __repr__(self): return "<translate %s>" % self.lang def load(self): if self.lang == "en": data = {} dict.__init__(self, data) self.clear() elif os.path.isfile(self.lang_file): try: data = json.load(open(self.lang_file, encoding="utf8")) logging.debug( "Loaded translate file: %s (%s entries)" % (self.lang_file, len(data)) ) except Exception as err: logging.error( "Error loading translate file %s: %s" % (self.lang_file, err) ) data = {} dict.__init__(self, data) else: data = {} dict.__init__(self, data) self.clear() logging.debug("Translate file not exists: %s" % self.lang_file) def format(self, s, kwargs, nested=False): kwargs["_"] = self if nested: back = self.formatter.vformat( s, [], kwargs ) # PY3 TODO: Change to format_map return self.formatter.vformat(back, [], kwargs) else: return self.formatter.vformat(s, [], kwargs) def formatLocals(self, s, nested=False): kwargs = inspect.currentframe().f_back.f_locals return self.format(s, kwargs, nested=nested) def __call__(self, s, kwargs=None, nested=False, escape=True): if not kwargs: kwargs = inspect.currentframe().f_back.f_locals if escape: kwargs = EscapeProxy(kwargs) return self.format(s, kwargs, nested=nested) def __missing__(self, key): return key def pluralize(self, value, single, multi): if value > 1: return self[multi].format(value) else: return self[single].format(value) def translateData(self, data, translate_table=None, mode="js"): if not translate_table: translate_table = self patterns = [] for key, val in list(translate_table.items()): if key.startswith( "_(" ): # Problematic string: only match if called between _(" ") function key = key.replace("_(", "").replace(")", "").replace(", ", '", "') translate_table[key] = "|" + val patterns.append(re.escape(key)) def replacer(match): target = translate_table[match.group(1)] if mode == "js": if target and target[0] == "|": # Strict string match if ( match.string[match.start() - 2] == "_" ): # Only if the match if called between _(" ") function return '"' + target[1:] + '"' else: return '"' + match.group(1) + '"' return '"' + target + '"' else: return match.group(0)[0] + target + match.group(0)[-1] if mode == "html": pattern = '[">](' + "|".join(patterns) + ')["<]' else: pattern = '"(' + "|".join(patterns) + ')"' data = re.sub(pattern, replacer, data) if mode == "html": data = data.replace( "lang={lang}", "lang=%s" % self.lang ) # lang get parameter to .js file to avoid cache return data translate = Translate()
styling
compiler
"""Compiler for CSS selectors. Based on cssselect2.compiler, written by Simon Sapin and Guillaume Ayoub. """ import re from functools import singledispatch from typing import Callable, Dict, Iterator, Literal, Tuple, Union import tinycss2 from gaphor.core.styling import selectors from gaphor.core.styling.declarations import parse_declarations # http://dev.w3.org/csswg/selectors/#whitespace split_whitespace = re.compile("[^ \t\r\n\f]+").findall Rule = Union[ Tuple[Tuple[Callable[[object], bool], Tuple[int, int, int]], Dict[str, object]], Tuple[Literal["error"], Union[tinycss2.ast.ParseError, selectors.SelectorError]], ] def compile_style_sheet(*css: str) -> Iterator[Rule]: for sheet in css: if sheet: rules = tinycss2.parse_stylesheet( sheet, skip_comments=True, skip_whitespace=True ) yield from compile_rules(rules) def compile_rules(rules): for rule in rules: if rule.type == "error": yield "error", rule continue if rule.type == "at-rule" and rule.content: at_rules = tinycss2.parser.parse_rule_list( rule.content, skip_comments=True, skip_whitespace=True ) media_selector = selectors.media_query_selector(rule.prelude) if not media_selector: continue media_query = compile_node(media_selector) yield from ( ((_combine(media_query, selspec[0]), selspec[1]), declaration) for selspec, declaration in compile_rules(at_rules) if selspec != "error" ) if rule.type != "qualified-rule": continue try: selector_list = compile_selector_list(rule.prelude) except selectors.SelectorError as e: yield "error", e continue declaration = { prop: value for prop, value in parse_declarations(rule.content) if prop != "error" and value is not None } yield from ((selector, declaration) for selector in selector_list) def _combine(a, b): return lambda el: a(el) and b(el) def compile_selector_list(input): """Compile a (comma-separated) list of selectors. Based on cssselect2.compiler.compile_selector_list(). Returns a list of compiled selectors. """ return [ (compile_node(selector), selector.specificity) for selector in selectors.selectors(input) ] @singledispatch def compile_node(selector): """Dynamic dispatch selector nodes. Default behavior is to deny (no match). """ raise selectors.SelectorError("Unknown selector", selector) @compile_node.register def compile_media_selector(selector: selectors.MediaSelector): query = selector.query if len(query) == 1: mode = query[0].lower() elif ( len(query) == 3 and query[0].lower() == "prefers-color-scheme" and query[1] == "=" ): mode = query[2].lower() else: mode = None if mode in ("dark", "dark-mode"): return lambda el: el.dark_mode is True elif mode in ("light", "light-mode"): return lambda el: el.dark_mode is False return lambda el: False @compile_node.register def compile_compound_selector(selector: selectors.CompoundSelector): sub_expressions = [compile_node(sel) for sel in selector.simple_selectors] return lambda el: all(expr(el) for expr in sub_expressions) @compile_node.register def compile_name_selector(selector: selectors.LocalNameSelector): return lambda el: el.name() == selector.lower_local_name def ancestors(el): if p := el.parent(): yield p yield from ancestors(p) def descendants(el): for c in el.children(): yield c yield from descendants(c) @compile_node.register def compile_combined_selector(selector: selectors.CombinedSelector): left_inside = compile_node(selector.left) if selector.combinator == " ": def left(el): return any(left_inside(e) for e in ancestors(el)) elif selector.combinator == ">": def left(el): p = el.parent() return p is not None and left_inside(p) else: raise selectors.SelectorError("Unknown combinator", selector.combinator) right = compile_node(selector.right) return lambda el: right(el) and left(el) @compile_node.register def compile_attribute_selector(selector: selectors.AttributeSelector): name = selector.lower_name operator = selector.operator value = selector.value and selector.value.lower() if operator is None: return lambda el: bool(el.attribute(name)) elif operator == "=": return lambda el: el.attribute(name) == value elif operator == "~=": return lambda el: value in split_whitespace(el.attribute(name)) elif operator == "^=": return lambda el: value and el.attribute(name).startswith(value) elif operator == "$=": return lambda el: value and el.attribute(name).endswith(value) elif operator == "*=": return lambda el: value and value in el.attribute(name) elif operator == "|=": def pipe_equal_matcher(el): v = el.attribute(name) return v == value or v and v.startswith(f"{value}-") return pipe_equal_matcher else: raise selectors.SelectorError("Unknown attribute operator", operator) @compile_node.register def compile_pseudo_class_selector(selector: selectors.PseudoClassSelector): name = selector.name if name == "empty": return lambda el: not next(el.children(), 0) elif name in ("root", "hover", "focus", "active", "drop", "disabled"): return lambda el: name in el.state() else: raise selectors.SelectorError("Unknown pseudo-class", name) @compile_node.register def compile_functional_pseudo_class_selector( selector: selectors.FunctionalPseudoClassSelector, ): name = selector.name if name not in ("has", "is", "not"): raise selectors.SelectorError("Unknown pseudo-class", name) sub_selectors = compile_selector_list(selector.arguments) selector.specificity = max(spec for _, spec in sub_selectors) if name == "has": return lambda el: any( any(sel(c) for sel, _ in sub_selectors) for c in descendants(el) ) elif name == "is": return lambda el: any(sel(el) for sel, _ in sub_selectors) elif name == "not": return lambda el: not any(sel(el) for sel, _ in sub_selectors)
qutebrowser
setup
#!/usr/bin/env python3 # SPDX-FileCopyrightText: Florian Bruhin (The Compiler) <mail@qutebrowser.org> # # SPDX-License-Identifier: GPL-3.0-or-later """setuptools installer script for qutebrowser.""" import ast import os import os.path import re import setuptools from scripts import setupcommon as common try: BASEDIR = os.path.dirname(os.path.realpath(__file__)) except NameError: BASEDIR = None def read_file(name): """Get the string contained in the file named name.""" with common.open_file(name, "r", encoding="utf-8") as f: return f.read() def _get_constant(name): """Read a __magic__ constant from qutebrowser/__init__.py. We don't import qutebrowser here because it can go wrong for multiple reasons. Instead we use re/ast to get the value directly from the source file. Args: name: The name of the argument to get. Return: The value of the argument. """ field_re = re.compile(r"__{}__\s+=\s+(.*)".format(re.escape(name))) path = os.path.join(BASEDIR, "qutebrowser", "__init__.py") line = field_re.search(read_file(path)).group(1) value = ast.literal_eval(line) return value try: common.write_git_file() setuptools.setup( packages=setuptools.find_namespace_packages( include=["qutebrowser", "qutebrowser.*"] ), include_package_data=True, entry_points={"gui_scripts": ["qutebrowser = qutebrowser.qutebrowser:main"]}, zip_safe=True, install_requires=[ "jinja2", "PyYAML", 'importlib_resources>=1.1.0; python_version < "3.9"', ], python_requires=">=3.8", name="qutebrowser", version=_get_constant("version"), description=_get_constant("description"), long_description=read_file("README.asciidoc"), long_description_content_type="text/plain", url="https://www.qutebrowser.org/", author=_get_constant("author"), author_email=_get_constant("email"), license=_get_constant("license"), classifiers=[ "Development Status :: 5 - Production/Stable", "Environment :: X11 Applications :: Qt", "Intended Audience :: End Users/Desktop", "License :: OSI Approved :: GNU General Public License v3 or later " "(GPLv3+)", "Natural Language :: English", "Operating System :: Microsoft :: Windows", "Operating System :: POSIX :: Linux", "Operating System :: MacOS", "Operating System :: POSIX :: BSD", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Topic :: Internet", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: WWW/HTTP :: Browsers", ], keywords="pyqt browser web qt webkit qtwebkit qtwebengine", ) finally: if BASEDIR is not None: path = os.path.join(BASEDIR, "qutebrowser", "git-commit-id") if os.path.exists(path): os.remove(path)
util
Pooled
import gevent.pool class Pooled(object): def __init__(self, size=100): self.pool = gevent.pool.Pool(size) self.pooler_running = False self.queue = [] self.func = None def waiter(self, evt, args, kwargs): res = self.func(*args, **kwargs) if type(res) == gevent.event.AsyncResult: evt.set(res.get()) else: evt.set(res) def pooler(self): while self.queue: evt, args, kwargs = self.queue.pop(0) self.pool.spawn(self.waiter, evt, args, kwargs) self.pooler_running = False def __call__(self, func): def wrapper(*args, **kwargs): evt = gevent.event.AsyncResult() self.queue.append((evt, args, kwargs)) if not self.pooler_running: self.pooler_running = True gevent.spawn(self.pooler) return evt wrapper.__name__ = func.__name__ self.func = func return wrapper if __name__ == "__main__": import time import gevent import gevent.event import gevent.monkey import gevent.pool import gevent.queue gevent.monkey.patch_all() def addTask(inner_path): evt = gevent.event.AsyncResult() gevent.spawn_later(1, lambda: evt.set(True)) return evt def needFile(inner_path): return addTask(inner_path) @Pooled(10) def pooledNeedFile(inner_path): return needFile(inner_path) threads = [] for i in range(100): threads.append(pooledNeedFile(i)) s = time.time() gevent.joinall(threads) # Should take 10 second print(time.time() - s)
archiver
rcreate_cmd
import argparse from ..cache import Cache from ..constants import * # NOQA from ..crypto.key import key_argument_names, key_creator from ..helpers import EXIT_WARNING, Location, location_validator, parse_storage_quota from ..logger import create_logger from ..manifest import Manifest from ._common import Highlander, with_other_repository, with_repository logger = create_logger() class RCreateMixIn: @with_repository(create=True, exclusive=True, manifest=False) @with_other_repository(manifest=True, compatibility=(Manifest.Operation.READ,)) def do_rcreate(self, args, repository, *, other_repository=None, other_manifest=None): """Create a new, empty repository""" other_key = other_manifest.key if other_manifest is not None else None path = args.location.canonical_path() logger.info('Initializing repository at "%s"' % path) if other_key is not None: other_key.copy_crypt_key = args.copy_crypt_key try: key = key_creator(repository, args, other_key=other_key) except (EOFError, KeyboardInterrupt): repository.destroy() return EXIT_WARNING manifest = Manifest(key, repository) manifest.key = key manifest.write() repository.commit(compact=False) with Cache(repository, manifest, warn_if_unencrypted=False): pass if key.NAME != "plaintext": logger.warning( "\n" "IMPORTANT: you will need both KEY AND PASSPHRASE to access this repo!\n" "If you used a repokey mode, the key is stored in the repo, but you should back it up separately.\n" 'Use "borg key export" to export the key, optionally in printable format.\n' "Write down the passphrase. Store both at safe place(s).\n" ) return self.exit_code def build_parser_rcreate(self, subparsers, common_parser, mid_common_parser): from ._common import process_epilog rcreate_epilog = process_epilog( """ This command creates a new, empty repository. A repository is a filesystem directory containing the deduplicated data from zero or more archives. Encryption mode TLDR ++++++++++++++++++++ The encryption mode can only be configured when creating a new repository - you can neither configure it on a per-archive basis nor change the mode of an existing repository. This example will likely NOT give optimum performance on your machine (performance tips will come below): :: borg rcreate --encryption repokey-aes-ocb Borg will: 1. Ask you to come up with a passphrase. 2. Create a borg key (which contains some random secrets. See :ref:`key_files`). 3. Derive a "key encryption key" from your passphrase 4. Encrypt and sign the key with the key encryption key 5. Store the encrypted borg key inside the repository directory (in the repo config). This is why it is essential to use a secure passphrase. 6. Encrypt and sign your backups to prevent anyone from reading or forging them unless they have the key and know the passphrase. Make sure to keep a backup of your key **outside** the repository - do not lock yourself out by "leaving your keys inside your car" (see :ref:`borg_key_export`). The encryption is done locally - if you use a remote repository, the remote machine never sees your passphrase, your unencrypted key or your unencrypted files. Chunking and id generation are also based on your key to improve your privacy. 7. Use the key when extracting files to decrypt them and to verify that the contents of the backups have not been accidentally or maliciously altered. Picking a passphrase ++++++++++++++++++++ Make sure you use a good passphrase. Not too short, not too simple. The real encryption / decryption key is encrypted with / locked by your passphrase. If an attacker gets your key, he can't unlock and use it without knowing the passphrase. Be careful with special or non-ascii characters in your passphrase: - Borg processes the passphrase as unicode (and encodes it as utf-8), so it does not have problems dealing with even the strangest characters. - BUT: that does not necessarily apply to your OS / VM / keyboard configuration. So better use a long passphrase made from simple ascii chars than one that includes non-ascii stuff or characters that are hard/impossible to enter on a different keyboard layout. You can change your passphrase for existing repos at any time, it won't affect the encryption/decryption key or other secrets. Choosing an encryption mode +++++++++++++++++++++++++++ Depending on your hardware, hashing and crypto performance may vary widely. The easiest way to find out about what's fastest is to run ``borg benchmark cpu``. `repokey` modes: if you want ease-of-use and "passphrase" security is good enough - the key will be stored in the repository (in ``repo_dir/config``). `keyfile` modes: if you want "passphrase and having-the-key" security - the key will be stored in your home directory (in ``~/.config/borg/keys``). The following table is roughly sorted in order of preference, the better ones are in the upper part of the table, in the lower part is the old and/or unsafe(r) stuff: .. nanorst: inline-fill +-----------------------------------+--------------+----------------+--------------------+ | Mode (K = keyfile or repokey) | ID-Hash | Encryption | Authentication | +-----------------------------------+--------------+----------------+--------------------+ | K-blake2-chacha20-poly1305 | BLAKE2b | CHACHA20 | POLY1305 | +-----------------------------------+--------------+----------------+--------------------+ | K-chacha20-poly1305 | HMAC-SHA-256 | CHACHA20 | POLY1305 | +-----------------------------------+--------------+----------------+--------------------+ | K-blake2-aes-ocb | BLAKE2b | AES256-OCB | AES256-OCB | +-----------------------------------+--------------+----------------+--------------------+ | K-aes-ocb | HMAC-SHA-256 | AES256-OCB | AES256-OCB | +-----------------------------------+--------------+----------------+--------------------+ | authenticated-blake2 | BLAKE2b | none | BLAKE2b | +-----------------------------------+--------------+----------------+--------------------+ | authenticated | HMAC-SHA-256 | none | HMAC-SHA256 | +-----------------------------------+--------------+----------------+--------------------+ | none | SHA-256 | none | none | +-----------------------------------+--------------+----------------+--------------------+ .. nanorst: inline-replace `none` mode uses no encryption and no authentication. You're advised NOT to use this mode as it would expose you to all sorts of issues (DoS, confidentiality, tampering, ...) in case of malicious activity in the repository. If you do **not** want to encrypt the contents of your backups, but still want to detect malicious tampering use an `authenticated` mode. It's like `repokey` minus encryption. To normally work with ``authenticated`` repos, you will need the passphrase, but there is an emergency workaround, see ``BORG_WORKAROUNDS=authenticated_no_key`` docs. Creating a related repository +++++++++++++++++++++++++++++ You can use ``borg rcreate --other-repo ORIG_REPO ...`` to create a related repository that uses the same secret key material as the given other/original repository. By default, only the ID key and chunker secret will be the same (these are important for deduplication) and the AE crypto keys will be newly generated random keys. Optionally, if you use ``--copy-crypt-key`` you can also keep the same crypt_key (used for authenticated encryption). Might be desired e.g. if you want to have less keys to manage. Creating related repositories is useful e.g. if you want to use ``borg transfer`` later. """ ) subparser = subparsers.add_parser( "rcreate", parents=[common_parser], add_help=False, description=self.do_rcreate.__doc__, epilog=rcreate_epilog, formatter_class=argparse.RawDescriptionHelpFormatter, help="create a new, empty repository", ) subparser.set_defaults(func=self.do_rcreate) subparser.add_argument( "--other-repo", metavar="SRC_REPOSITORY", dest="other_location", type=location_validator(other=True), default=Location(other=True), action=Highlander, help="reuse the key material from the other repository", ) subparser.add_argument( "-e", "--encryption", metavar="MODE", dest="encryption", required=True, choices=key_argument_names(), action=Highlander, help="select encryption key mode **(required)**", ) subparser.add_argument( "--append-only", dest="append_only", action="store_true", help="create an append-only mode repository. Note that this only affects " "the low level structure of the repository, and running `delete` " "or `prune` will still be allowed. See :ref:`append_only_mode` in " "Additional Notes for more details.", ) subparser.add_argument( "--storage-quota", metavar="QUOTA", dest="storage_quota", default=None, type=parse_storage_quota, action=Highlander, help="Set storage quota of the new repository (e.g. 5G, 1.5T). Default: no quota.", ) subparser.add_argument( "--make-parent-dirs", dest="make_parent_dirs", action="store_true", help="create the parent directories of the repository directory, if they are missing.", ) subparser.add_argument( "--copy-crypt-key", dest="copy_crypt_key", action="store_true", help="copy the crypt_key (used for authenticated encryption) from the key of the other repo " "(default: new random key).", )
dialogs
palcoldlg
# -*- coding: utf-8 -*- # # Copyright (C) 2015 by Ihor E. Novikov # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. import logging import os import urllib2 import wal from cStringIO import StringIO from sk1 import _, config from sk1.pwidgets import PaletteViewer from uc2.formats.skp import skp_loader URL = 'https://sk1project.net' USER_AGENT = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 ' \ '(KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36' PALETTE_LIST = [] LOG = logging.getLogger(__name__) def request_server(url): response = '' # noinspection PyBroadException try: req = urllib2.Request(url, {}, {'User-Agent': USER_AGENT}) response = urllib2.urlopen(req).read() except Exception: LOG.exception('Cannot read server response') return response def init_palette_list(): data = [] # noinspection PyBroadException try: txt = request_server('%s/palettes.php?action=get_list' % URL) code = compile('data=' + txt, '<string>', 'exec') exec code except Exception: LOG.exception('Cannot deserialize server response') if data: PALETTE_LIST.extend(data) class PaletteHandler: def __init__(self): self.palettes = {} def get(self, palette_name): if palette_name not in self.palettes: index = PALETTE_LIST.index(palette_name) + 1 pid = '0' * (4 - len(str(index))) + str(index) url = '%s/palettes.php?action=get_palette&id=%s' % (URL, pid) self.palettes[palette_name] = request_server(request_server(url)) return self.palettes.get(palette_name, '') PALETTES = PaletteHandler() class PaletteCollectionDialog(wal.OkCancelDialog): data = [] palette = None viewer = None stub = None def __init__(self, app, parent): self.app = app size = config.palcol_dlg_size title = _('Palette Collection') wal.OkCancelDialog.__init__(self, parent, title, size, resizable=True, action_button=wal.BUTTON_SAVE, on_load=self.on_load) self.set_minsize(config.palcol_dlg_minsize) def build(self): self.viewer = DataViewer(self) self.pack(self.viewer, expand=True, fill=True) self.viewer.hide() self.stub = DataStub(self) self.pack(self.stub, expand=True, fill=True) self.stub.show() def show(self): self.ok_btn.set_enable(False) return wal.OkCancelDialog.show(self) def on_load(self, *_args): self._timer.Stop() self.stub.init_palette_list() if PALETTE_LIST: self.stub.hide() self.viewer.show() else: msg = _('Cannot connect to server!') msg += '\n' + _('Please check Internet connection') msg += '\n' + _('and access to https://sk1project.net') wal.error_dialog(self, self.app.appdata.app_name, msg) self.on_cancel() def get_result(self): return self.palette class DataStub(wal.HPanel): def __init__(self, parent): self.parent = parent wal.HPanel.__init__(self, parent) int_panel = wal.VPanel(self) self.pack(int_panel, expand=True) int_panel.pack(wal.Label(int_panel, _('Loading data...'))) path = os.path.join(config.resource_dir, 'icons', 'generic') filepath = os.path.join(path, 'progress.gif') self.gif = wal.AnimatedGif(int_panel, filepath) int_panel.pack(self.gif, padding_all=10) if not PALETTE_LIST: self.gif.play() def init_palette_list(self): if not PALETTE_LIST: init_palette_list() self.gif.stop() class DataViewer(wal.HPanel): def __init__(self, parent): self.app = parent.app self.parent = parent wal.HPanel.__init__(self, parent) vp = wal.VPanel(self) vp.set_bg(wal.UI_COLORS['border']) self.pal_list = wal.SimpleList(vp, PALETTE_LIST, on_select=self.change_palette) vp.pack(self.pal_list, expand=True, fill=True, padding_all=1) self.pack(vp, expand=True, fill=True, padding_all=5) self.preview = PreViewer(self) self.pack(self.preview, fill=True, padding_all=5) def show(self, _update=False): wal.HPanel.show(self) self.pal_list.update(PALETTE_LIST) def change_palette(self, name=''): palette = None # noinspection PyBroadException try: pal_txt = PALETTES.get(name) pal_stream = StringIO(pal_txt) pal_stream.seek(0) palette = skp_loader(self.app.appdata, None, pal_stream, False) self.parent.ok_btn.set_enable(True) except Exception: LOG.exception('Cannot switch palette', name) self.parent.ok_btn.set_enable(False) self.parent.palette = palette self.preview.viewer.draw_palette(palette) class PreViewer(wal.VPanel): def __init__(self, parent): self.parent = parent self.app = parent.parent.app wal.VPanel.__init__(self, parent) self.viewer = PaletteViewer(self, self.app.default_cms) self.pack(self.viewer, expand=True, fill=True) def palette_collection_dlg(app, parent): ret = PaletteCollectionDialog(app, parent).show() return ret
validator
validator
# The contents of this file are subject to the Common Public Attribution # License Version 1.0. (the "License"); you may not use this file except in # compliance with the License. You may obtain a copy of the License at # http://code.reddit.com/LICENSE. The License is based on the Mozilla Public # License Version 1.1, but Sections 14 and 15 have been added to cover use of # software over a computer network and provide for limited attribution for the # Original Developer. In addition, Exhibit A has been modified to be consistent # with Exhibit B. # # Software distributed under the License is distributed on an "AS IS" basis, # WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for # the specific language governing rights and limitations under the License. # # The Original Code is reddit. # # The Original Developer is the Initial Developer. The Initial Developer of # the Original Code is reddit Inc. # # All portions of the code written by reddit are Copyright (c) 2006-2015 reddit # Inc. All Rights Reserved. ############################################################################### import cgi import inspect import json import re from collections import OrderedDict from copy import copy from curses.ascii import isprint from datetime import datetime, timedelta from decimal import Decimal from functools import wraps from itertools import chain from pylons import app_globals as g from pylons import request, response from pylons import tmpl_context as c from pylons.controllers.util import abort from pylons.i18n import _ from r2.config import feature from r2.config.extensions import api_type, is_api from r2.lib import captcha, promote, ratelimit, signing, totp, utils from r2.lib.authorize import Address, CreditCard from r2.lib.db import tdb_cassandra from r2.lib.db.operators import asc, desc from r2.lib.errors import (RedditError, UserRequiredException, VerifiedUserRequiredException, errors) from r2.lib.filters import (_force_unicode, _force_utf8, markdown_souptest, unkeep_space, websafe) from r2.lib.jsonresponse import JQueryResponse, JsonResponse from r2.lib.permissions import ModeratorPermissionSet from r2.lib.require import RequirementException, require, require_split from r2.lib.souptest import (SoupDetectedCrasherError, SoupError, SoupUnsupportedEntityError) from r2.lib.template_helpers import add_sr from r2.lib.utils import constant_time_compare from r2.models import * from r2.models.promo import Location from r2.models.rules import MAX_RULES_PER_SUBREDDIT def can_view_link_comments(article): return (article.subreddit_slow.can_view(c.user) and article.can_view_promo(c.user)) class Validator(object): notes = None default_param = None def __init__(self, param=None, default=None, post=True, get=True, url=True, get_multiple=False, body=False, docs=None): if param: self.param = param else: self.param = self.default_param self.default = default self.post, self.get, self.url, self.docs = post, get, url, docs self.get_multiple = get and get_multiple self.body = body self.has_errors = False def set_error(self, error, msg_params={}, field=False, code=None): """ Adds the provided error to c.errors and flags that it is come from the validator's param """ if field is False: field = self.param c.errors.add(error, msg_params=msg_params, field=field, code=code) self.has_errors = True def param_docs(self): param_info = {} for param in filter(None, tup(self.param)): param_info[param] = None return param_info def __call__(self, url): self.has_errors = False a = [] if self.param: for p in utils.tup(self.param): # cgi.FieldStorage is falsy even if it has a filled value # property. :( post_val = request.POST.get(p) if self.post and (post_val or isinstance(post_val, cgi.FieldStorage)): val = request.POST[p] elif ((self.get_multiple and (self.get_multiple == True or p in self.get_multiple)) and request.GET.getall(p)): val = request.GET.getall(p) elif self.get and request.GET.get(p): val = request.GET[p] elif self.url and url.get(p): val = url[p] elif self.body: val = request.body else: val = self.default a.append(val) try: return self.run(*a) except TypeError, e: if str(e).startswith('run() takes'): # Prepend our class name so we know *which* run() raise TypeError('%s.%s' % (type(self).__name__, str(e))) else: raise def build_arg_list(fn, env): """given a fn and and environment the builds a keyword argument list for fn""" kw = {} argspec = inspect.getargspec(fn) # if there is a **kw argument in the fn definition, # just pass along the environment if argspec[2]: kw = env #else for each entry in the arglist set the value from the environment else: #skip self argnames = argspec[0][1:] for name in argnames: if name in env: kw[name] = env[name] return kw def _make_validated_kw(fn, simple_vals, param_vals, env): for validator in simple_vals: validator(env) kw = build_arg_list(fn, env) for var, validator in param_vals.iteritems(): kw[var] = validator(env) return kw def set_api_docs(fn, simple_vals, param_vals, extra_vals=None): doc = fn._api_doc = getattr(fn, '_api_doc', {}) param_info = doc.get('parameters', {}) notes = doc.get('notes', []) for validator in chain(simple_vals, param_vals.itervalues()): param_docs = validator.param_docs() if validator.docs: param_docs.update(validator.docs) param_info.update(param_docs) if validator.notes: notes.append(validator.notes) if extra_vals: param_info.update(extra_vals) doc['parameters'] = param_info doc['notes'] = notes def _validators_handle_csrf(simple_vals, param_vals): for validator in chain(simple_vals, param_vals.itervalues()): if getattr(validator, 'handles_csrf', False): return True return False def validate(*simple_vals, **param_vals): """Validation decorator that delegates error handling to the controller. Runs the validators specified and calls self.on_validation_error to process each error. This allows controllers to define their own fatal error processing logic. """ def val(fn): @wraps(fn) def newfn(self, *a, **env): try: kw = _make_validated_kw(fn, simple_vals, param_vals, env) except RedditError as err: self.on_validation_error(err) for err in c.errors: self.on_validation_error(c.errors[err]) try: return fn(self, *a, **kw) except RedditError as err: self.on_validation_error(err) set_api_docs(newfn, simple_vals, param_vals) newfn.handles_csrf = _validators_handle_csrf(simple_vals, param_vals) return newfn return val def api_validate(response_type=None, add_api_type_doc=False): """ Factory for making validators for API calls, since API calls come in two flavors: responsive and unresponsive. The machinary associated with both is similar, and the error handling identical, so this function abstracts away the kw validation and creation of a Json-y responder object. """ def wrap(response_function): def _api_validate(*simple_vals, **param_vals): def val(fn): @wraps(fn) def newfn(self, *a, **env): renderstyle = request.params.get("renderstyle") if renderstyle: c.render_style = api_type(renderstyle) elif not c.extension: # if the request URL included an extension, don't # touch the render_style, since it was already set by # set_extension. if no extension was provided, default # to response_type. c.render_style = api_type(response_type) # generate a response object if response_type == "html" and not request.params.get('api_type') == "json": responder = JQueryResponse() else: responder = JsonResponse() response.content_type = responder.content_type try: kw = _make_validated_kw(fn, simple_vals, param_vals, env) return response_function(self, fn, responder, simple_vals, param_vals, *a, **kw) except UserRequiredException: responder.send_failure(errors.USER_REQUIRED) return self.api_wrapper(responder.make_response()) except VerifiedUserRequiredException: responder.send_failure(errors.VERIFIED_USER_REQUIRED) return self.api_wrapper(responder.make_response()) extra_param_vals = {} if add_api_type_doc: extra_param_vals = { "api_type": "the string `json`", } set_api_docs(newfn, simple_vals, param_vals, extra_param_vals) newfn.handles_csrf = _validators_handle_csrf(simple_vals, param_vals) return newfn return val return _api_validate return wrap @api_validate("html") def noresponse(self, self_method, responder, simple_vals, param_vals, *a, **kw): self_method(self, *a, **kw) return self.api_wrapper({}) @api_validate("html") def textresponse(self, self_method, responder, simple_vals, param_vals, *a, **kw): return self_method(self, *a, **kw) @api_validate() def json_validate(self, self_method, responder, simple_vals, param_vals, *a, **kw): if c.extension != 'json': abort(404) val = self_method(self, responder, *a, **kw) if val is None: val = responder.make_response() return self.api_wrapper(val) def _validatedForm(self, self_method, responder, simple_vals, param_vals, *a, **kw): # generate a form object form = responder(request.POST.get('id', "body")) # clear out the status line as a courtesy form.set_text(".status", "") # do the actual work val = self_method(self, form, responder, *a, **kw) # add data to the output on some errors for validator in chain(simple_vals, param_vals.values()): if (isinstance(validator, VCaptcha) and (form.has_errors('captcha', errors.BAD_CAPTCHA) or (form.has_error() and c.user.needs_captcha()))): form.new_captcha() elif (isinstance(validator, (VRatelimit, VThrottledLogin)) and form.has_errors('ratelimit', errors.RATELIMIT)): form.ratelimit(validator.seconds) if val: return val else: return self.api_wrapper(responder.make_response()) @api_validate("html", add_api_type_doc=True) def validatedForm(self, self_method, responder, simple_vals, param_vals, *a, **kw): return _validatedForm(self, self_method, responder, simple_vals, param_vals, *a, **kw) @api_validate("html", add_api_type_doc=True) def validatedMultipartForm(self, self_method, responder, simple_vals, param_vals, *a, **kw): def wrapped_self_method(*a, **kw): val = self_method(*a, **kw) if val: return val else: data = json.dumps(responder.make_response()) response.content_type = "text/html" return ('<html><head><script type="text/javascript">\n' 'parent.$.handleResponse()(%s)\n' '</script></head></html>') % filters.websafe_json(data) return _validatedForm(self, wrapped_self_method, responder, simple_vals, param_vals, *a, **kw) jsonp_callback_rx = re.compile("\\A[\\w$\\.\"'[\\]]+\\Z") def valid_jsonp_callback(callback): return jsonp_callback_rx.match(callback) #### validators #### class nop(Validator): def run(self, x): return x class VLang(Validator): @staticmethod def validate_lang(lang, strict=False): if lang in g.all_languages: return lang else: if not strict: return g.lang else: raise ValueError("invalid language %r" % lang) def run(self, lang): return VLang.validate_lang(lang) def param_docs(self): return { self.param: "a valid IETF language tag (underscore separated)", } class VRequired(Validator): def __init__(self, param, error, *a, **kw): Validator.__init__(self, param, *a, **kw) self._error = error def error(self, e = None): if not e: e = self._error if e: self.set_error(e) def run(self, item): if not item: self.error() else: return item class VThing(Validator): def __init__(self, param, thingclass, redirect = True, *a, **kw): Validator.__init__(self, param, *a, **kw) self.thingclass = thingclass self.redirect = redirect def run(self, thing_id): if thing_id: try: tid = int(thing_id, 36) thing = self.thingclass._byID(tid, True) if thing.__class__ != self.thingclass: raise TypeError("Expected %s, got %s" % (self.thingclass, thing.__class__)) return thing except (NotFound, ValueError): if self.redirect: abort(404, 'page not found') else: return None def param_docs(self): return { self.param: "The base 36 ID of a " + self.thingclass.__name__ } class VLink(VThing): def __init__(self, param, redirect = True, *a, **kw): VThing.__init__(self, param, Link, redirect=redirect, *a, **kw) class VPromoCampaign(VThing): def __init__(self, param, redirect = True, *a, **kw): VThing.__init__(self, param, PromoCampaign, *a, **kw) class VCommentByID(VThing): def __init__(self, param, redirect = True, *a, **kw): VThing.__init__(self, param, Comment, redirect=redirect, *a, **kw) class VAward(VThing): def __init__(self, param, redirect = True, *a, **kw): VThing.__init__(self, param, Award, redirect=redirect, *a, **kw) class VAwardByCodename(Validator): def run(self, codename, required_fullname=None): if not codename: return self.set_error(errors.NO_TEXT) try: a = Award._by_codename(codename) except NotFound: a = None if a and required_fullname and a._fullname != required_fullname: return self.set_error(errors.INVALID_OPTION) else: return a class VTrophy(VThing): def __init__(self, param, redirect = True, *a, **kw): VThing.__init__(self, param, Trophy, redirect=redirect, *a, **kw) class VMessage(Validator): def run(self, message_id): if message_id: try: aid = int(message_id, 36) return Message._byID(aid, True) except (NotFound, ValueError): abort(404, 'page not found') class VCommentID(Validator): def run(self, cid): if cid: try: cid = int(cid, 36) return Comment._byID(cid, True) except (NotFound, ValueError): pass class VMessageID(Validator): def run(self, cid): if cid: try: cid = int(cid, 36) m = Message._byID(cid, True) if not m.can_view_slow(): abort(403, 'forbidden') return m except (NotFound, ValueError): pass class VCount(Validator): def run(self, count): if count is None: count = 0 try: return max(int(count), 0) except ValueError: return 0 def param_docs(self): return { self.param: "a positive integer (default: 0)", } class VLimit(Validator): def __init__(self, param, default=25, max_limit=100, **kw): self.default_limit = default self.max_limit = max_limit Validator.__init__(self, param, **kw) def run(self, limit): default = c.user.pref_numsites if not default or c.render_style in ("compact", api_type("compact")): default = self.default_limit # TODO: ini param? if limit is None: return default try: i = int(limit) except ValueError: return default return min(max(i, 1), self.max_limit) def param_docs(self): return { self.param: "the maximum number of items desired " "(default: %d, maximum: %d)" % (self.default_limit, self.max_limit), } class VCssMeasure(Validator): measure = re.compile(r"\A\s*[\d\.]+\w{0,3}\s*\Z") def run(self, value): return value if value and self.measure.match(value) else '' class VLength(Validator): only_whitespace = re.compile(r"\A\s*\Z", re.UNICODE) def __init__(self, param, max_length, min_length=0, empty_error = errors.NO_TEXT, length_error = errors.TOO_LONG, short_error=errors.TOO_SHORT, **kw): Validator.__init__(self, param, **kw) self.max_length = max_length self.min_length = min_length self.length_error = length_error self.short_error = short_error self.empty_error = empty_error def run(self, text, text2 = ''): text = text or text2 if self.empty_error and (not text or self.only_whitespace.match(text)): self.set_error(self.empty_error, code=400) elif len(text) > self.max_length: self.set_error(self.length_error, {'max_length': self.max_length}, code=400) elif len(text) < self.min_length: self.set_error(self.short_error, {'min_length': self.min_length}, code=400) else: return text def param_docs(self): return { self.param: "a string no longer than %d characters" % self.max_length, } class VUploadLength(VLength): def run(self, upload, text2=''): # upload is expected to be a FieldStorage object if isinstance(upload, cgi.FieldStorage): return VLength.run(self, upload.value, text2) else: self.set_error(self.empty_error, code=400) def param_docs(self): kibibytes = self.max_length / 1024 return { self.param: "file upload with maximum size of %d KiB" % kibibytes, } class VPrintable(VLength): def run(self, text, text2 = ''): text = VLength.run(self, text, text2) if text is None: return None try: if all(isprint(str(x)) for x in text): return str(text) except UnicodeEncodeError: pass self.set_error(errors.BAD_STRING, code=400) return None def param_docs(self): return { self.param: "a string up to %d characters long," " consisting of printable characters." % self.max_length, } class VTitle(VLength): def __init__(self, param, max_length = 300, **kw): VLength.__init__(self, param, max_length, **kw) def param_docs(self): return { self.param: "title of the submission. " "up to %d characters long" % self.max_length, } class VMarkdown(Validator): def __init__(self, param, renderer='reddit'): Validator.__init__(self, param) self.renderer = renderer def run(self, text, text2=''): text = text or text2 try: markdown_souptest(text, renderer=self.renderer) return text except SoupError as e: # Could happen if someone does `&#00;`. It's not a security issue, # it's just unacceptable. # TODO: give a better indication to the user of what happened if isinstance(e, SoupUnsupportedEntityError): abort(400) return import sys user = "???" if c.user_is_loggedin: user = c.user.name # work around CRBUG-464270 if isinstance(e, SoupDetectedCrasherError): # We want a general idea of how often this is triggered, and # by what g.log.warning("CHROME HAX by %s: %s" % (user, text)) abort(400) return g.log.error("HAX by %s: %s" % (user, text)) s = sys.exc_info() # reraise the original error with the original stack trace raise s[1], None, s[2] def param_docs(self): return { tup(self.param)[0]: "raw markdown text", } class VMarkdownLength(VMarkdown): def __init__(self, param, renderer='reddit', max_length=10000, empty_error=errors.NO_TEXT, length_error=errors.TOO_LONG): VMarkdown.__init__(self, param, renderer) self.max_length = max_length self.empty_error = empty_error self.length_error = length_error def run(self, text, text2=''): text = text or text2 text = VLength(self.param, self.max_length, empty_error=self.empty_error, length_error=self.length_error).run(text) if text: return VMarkdown.run(self, text) else: return '' class VSavedCategory(Validator): savedcategory_rx = re.compile(r"\A[a-z0-9 _]{1,20}\Z") def run(self, name): if not name: return name = name.lower() valid = self.savedcategory_rx.match(name) if not valid: self.set_error('BAD_SAVE_CATEGORY') return return name def param_docs(self): return { self.param: "a category name", } class VSubredditName(VRequired): def __init__(self, item, allow_language_srs=False, *a, **kw): VRequired.__init__(self, item, errors.BAD_SR_NAME, *a, **kw) self.allow_language_srs = allow_language_srs def run(self, name): if name: name = sr_path_rx.sub('\g<name>', name.strip()) valid_name = Subreddit.is_valid_name( name, allow_language_srs=self.allow_language_srs) if not valid_name: self.set_error(self._error, code=400) return return str(name) def param_docs(self): return { self.param: "subreddit name", } class VAvailableSubredditName(VSubredditName): def run(self, name): name = VSubredditName.run(self, name) if name: try: a = Subreddit._by_name(name) return self.error(errors.SUBREDDIT_EXISTS) except NotFound: return name class VSRByName(Validator): def __init__(self, sr_name, required=True, return_srname=False): self.required = required self.return_srname = return_srname Validator.__init__(self, sr_name) def run(self, sr_name): if not sr_name: if self.required: self.set_error(errors.BAD_SR_NAME, code=400) else: sr_name = sr_path_rx.sub('\g<name>', sr_name.strip()) try: sr = Subreddit._by_name(sr_name) if self.return_srname: return sr.name else: return sr except NotFound: self.set_error(errors.SUBREDDIT_NOEXIST, code=400) def param_docs(self): return { self.param: "subreddit name", } class VSRByNames(Validator): """Returns a dict mapping subreddit names to subreddit objects. sr_names_csv - a comma delimited string of subreddit names required - if true (default) an empty subreddit name list is an error """ def __init__(self, sr_names_csv, required=True): self.required = required Validator.__init__(self, sr_names_csv) def run(self, sr_names_csv): if sr_names_csv: sr_names = [sr_path_rx.sub('\g<name>', s.strip()) for s in sr_names_csv.split(',')] return Subreddit._by_name(sr_names) elif self.required: self.set_error(errors.BAD_SR_NAME, code=400) return {} def param_docs(self): return { self.param: "comma-delimited list of subreddit names", } class VSubredditTitle(Validator): def run(self, title): if not title: self.set_error(errors.NO_TITLE) elif len(title) > 100: self.set_error(errors.TITLE_TOO_LONG) else: return title class VSubredditDesc(Validator): def run(self, description): if description and len(description) > 500: self.set_error(errors.DESC_TOO_LONG) return unkeep_space(description or '') class VAvailableSubredditRuleName(Validator): def __init__(self, short_name, updating=False): Validator.__init__(self, short_name) self.updating = updating def run(self, short_name): short_name = VLength( self.param, max_length=50, min_length=1, ).run(short_name.strip()) if not short_name: return None if SubredditRules.get_rule(c.site, short_name): self.set_error(errors.SR_RULE_EXISTS) elif not self.updating: number_rules = len(SubredditRules.get_rules(c.site)) if number_rules >= MAX_RULES_PER_SUBREDDIT: self.set_error(errors.SR_RULE_TOO_MANY) return None return short_name class VSubredditRule(Validator): def run(self, short_name): short_name = VLength( self.param, max_length=50, min_length=1, ).run(short_name.strip()) if not short_name: self.set_error(errors.SR_RULE_DOESNT_EXIST) return None rule = SubredditRules.get_rule(c.site, short_name) if not rule: self.set_error(errors.SR_RULE_DOESNT_EXIST) else: return rule class VAccountByName(VRequired): def __init__(self, param, error = errors.USER_DOESNT_EXIST, *a, **kw): VRequired.__init__(self, param, error, *a, **kw) def run(self, name): if name: try: return Account._by_name(name) except NotFound: pass return self.error() def param_docs(self): return {self.param: "A valid, existing reddit username"} class VFriendOfMine(VAccountByName): def run(self, name): # Must be logged in VUser().run() maybe_friend = VAccountByName.run(self, name) if maybe_friend: friend_rel = Account.get_friend(c.user, maybe_friend) if friend_rel: return friend_rel else: self.error(errors.NOT_FRIEND) return None def fullname_regex(thing_cls = None, multiple = False): pattern = "[%s%s]" % (Relation._type_prefix, Thing._type_prefix) if thing_cls: pattern += utils.to36(thing_cls._type_id) else: pattern += r"[0-9a-z]+" pattern += r"_[0-9a-z]+" if multiple: pattern = r"(%s *,? *)+" % pattern return re.compile(r"\A" + pattern + r"\Z") class VByName(Validator): # Lookup tdb_sql.Thing or tdb_cassandra.Thing objects by fullname. splitter = re.compile('[ ,]+') def __init__(self, param, thing_cls=None, multiple=False, limit=None, error=errors.NO_THING_ID, ignore_missing=False, backend='sql', **kw): # Limit param only applies when multiple is True if not multiple and limit is not None: raise TypeError('multiple must be True when limit is set') self.thing_cls = thing_cls self.re = fullname_regex(thing_cls) self.multiple = multiple self.limit = limit self._error = error self.ignore_missing = ignore_missing self.backend = backend Validator.__init__(self, param, **kw) def run(self, items): if self.backend == 'cassandra': # tdb_cassandra.Thing objects can't use the regex if items and self.multiple: items = [item for item in self.splitter.split(items)] if self.limit and len(items) > self.limit: return self.set_error(errors.TOO_MANY_THING_IDS) if items: try: return tdb_cassandra.Thing._by_fullname( items, ignore_missing=self.ignore_missing, return_dict=False, ) except tdb_cassandra.NotFound: pass else: if items and self.multiple: items = [item for item in self.splitter.split(items) if item and self.re.match(item)] if self.limit and len(items) > self.limit: return self.set_error(errors.TOO_MANY_THING_IDS) if items and (self.multiple or self.re.match(items)): try: return Thing._by_fullname( items, return_dict=False, ignore_missing=self.ignore_missing, data=True, ) except NotFound: pass return self.set_error(self._error) def param_docs(self): thingtype = (self.thing_cls or Thing).__name__.lower() if self.multiple: return { self.param: ("A comma-separated list of %s [fullnames]" "(#fullnames)" % thingtype) } else: return { self.param: "[fullname](#fullnames) of a %s" % thingtype, } class VByNameIfAuthor(VByName): def run(self, fullname): thing = VByName.run(self, fullname) if thing: if c.user_is_loggedin and thing.author_id == c.user._id: return thing return self.set_error(errors.NOT_AUTHOR) def param_docs(self): return { self.param: "[fullname](#fullnames) of a thing created by the user", } class VCaptcha(Validator): default_param = ('iden', 'captcha') def run(self, iden, solution): if c.user.needs_captcha(): valid_captcha = captcha.valid_solution(iden, solution) if not valid_captcha: self.set_error(errors.BAD_CAPTCHA) g.stats.action_event_count("captcha", valid_captcha) def param_docs(self): return { self.param[0]: "the identifier of the CAPTCHA challenge", self.param[1]: "the user's response to the CAPTCHA challenge", } class VUser(Validator): def run(self): if not c.user_is_loggedin: raise UserRequiredException class VNotInTimeout(Validator): def run(self, target_fullname=None, fatal=True, action_name=None, details_text="", target=None, subreddit=None): if c.user_is_loggedin and c.user.in_timeout: g.events.timeout_forbidden_event( action_name, details_text=details_text, target=target, target_fullname=target_fullname, subreddit=subreddit, request=request, context=c, ) if fatal: request.environ['REDDIT_ERROR_NAME'] = 'IN_TIMEOUT' abort(403, errors.IN_TIMEOUT) return False class VVerifyPassword(Validator): def __init__(self, param, fatal=True, *a, **kw): Validator.__init__(self, param, *a, **kw) self.fatal = fatal def run(self, password): VUser().run() if not valid_password(c.user, password): if self.fatal: abort(403) self.set_error(errors.WRONG_PASSWORD) return None # bcrypt wants a bytestring return _force_utf8(password) def param_docs(self): return { self.param: "the current user's password", } class VModhash(Validator): handles_csrf = True default_param = 'uh' def __init__(self, param=None, fatal=True, *a, **kw): Validator.__init__(self, param, *a, **kw) self.fatal = fatal def run(self, modhash): # OAuth authenticated requests do not require CSRF protection. if c.oauth_user: return VUser().run() if modhash is None: modhash = request.headers.get('X-Modhash') hook = hooks.get_hook("modhash.validate") result = hook.call_until_return(modhash=modhash) # if no plugins validate the hash, just check if it's the user name if result is None: result = (modhash == c.user.name) if not result: g.stats.simple_event("event.modhash.invalid") if self.fatal: abort(403) self.set_error('INVALID_MODHASH') def param_docs(self): return { '%s / X-Modhash header' % self.param: 'a [modhash](#modhashes)', } class VModhashIfLoggedIn(Validator): handles_csrf = True default_param = 'uh' def __init__(self, param=None, fatal=True, *a, **kw): Validator.__init__(self, param, *a, **kw) self.fatal = fatal def run(self, modhash): if c.user_is_loggedin: VModhash(fatal=self.fatal).run(modhash) def param_docs(self): return { '%s / X-Modhash header' % self.param: 'a [modhash](#modhashes)', } class VAdmin(Validator): def run(self): if not c.user_is_admin: abort(404, "page not found") def make_or_admin_secret_cls(base_cls): class VOrAdminSecret(base_cls): handles_csrf = True def run(self, secret=None): '''If validation succeeds, return True if the secret was used, False otherwise''' if secret and constant_time_compare(secret, g.secrets["ADMINSECRET"]): return True super(VOrAdminSecret, self).run() if request.method.upper() != "GET": VModhash(fatal=True).run(request.POST.get("uh")) return False return VOrAdminSecret VAdminOrAdminSecret = make_or_admin_secret_cls(VAdmin) class VVerifiedUser(VUser): def run(self): VUser.run(self) if not c.user.email_verified: raise VerifiedUserRequiredException class VGold(VUser): notes = "*Requires a subscription to [reddit gold](/gold/about)*" def run(self): VUser.run(self) if not c.user.gold: abort(403, 'forbidden') class VSponsorAdmin(VVerifiedUser): """ Validator which checks c.user_is_sponsor """ def user_test(self, thing): return (thing.author_id == c.user._id) def run(self, link_id = None): VVerifiedUser.run(self) if c.user_is_sponsor: return abort(403, 'forbidden') VSponsorAdminOrAdminSecret = make_or_admin_secret_cls(VSponsorAdmin) class VSponsor(VUser): """ Not intended to be used as a check for c.user_is_sponsor, but rather is the user allowed to use the sponsored link system. If a link or campaign is passed in, it also checks whether the user is allowed to edit that particular sponsored link. """ def user_test(self, thing): return (thing.author_id == c.user._id) def run(self, link_id=None, campaign_id=None): assert not (link_id and campaign_id), 'Pass link or campaign, not both' VUser.run(self) if c.user_is_sponsor: return elif campaign_id: pc = None try: if '_' in campaign_id: pc = PromoCampaign._by_fullname(campaign_id, data=True) else: pc = PromoCampaign._byID36(campaign_id, data=True) except (NotFound, ValueError): pass if pc: link_id = pc.link_id if link_id: try: if '_' in link_id: t = Link._by_fullname(link_id, True) else: aid = int(link_id, 36) t = Link._byID(aid, True) if self.user_test(t): return except (NotFound, ValueError): pass abort(403, 'forbidden') class VVerifiedSponsor(VSponsor): def run(self, *args, **kwargs): VVerifiedUser().run() return super(VVerifiedSponsor, self).run(*args, **kwargs) class VEmployee(VVerifiedUser): """Validate that user is an employee.""" def run(self): if not c.user.employee: abort(403, 'forbidden') VVerifiedUser.run(self) class VSrModerator(Validator): def __init__(self, fatal=True, perms=(), *a, **kw): # If True, abort rather than setting an error self.fatal = fatal self.perms = utils.tup(perms) super(VSrModerator, self).__init__(*a, **kw) def run(self): if not (c.user_is_loggedin and c.site.is_moderator_with_perms(c.user, *self.perms) or c.user_is_admin): if self.fatal: abort(403, "forbidden") return self.set_error('MOD_REQUIRED', code=403) class VCanDistinguish(VByName): def run(self, thing_name, how): if c.user_is_loggedin: can_distinguish = False item = VByName.run(self, thing_name) if not item: abort(404) if item.author_id == c.user._id: if isinstance(item, Message) and c.user.employee: return True subreddit = item.subreddit_slow if (how in ("yes", "no") and subreddit.can_distinguish(c.user)): can_distinguish = True elif (how in ("special", "no") and c.user_special_distinguish): can_distinguish = True elif (how in ("admin", "no") and c.user.employee): can_distinguish = True if can_distinguish: # Don't allow distinguishing for users in timeout VNotInTimeout().run(target=item, subreddit=subreddit) return can_distinguish abort(403,'forbidden') def param_docs(self): return {} class VSrCanAlter(VByName): def run(self, thing_name): if c.user_is_admin: return True elif c.user_is_loggedin: can_alter = False subreddit = None item = VByName.run(self, thing_name) if item.author_id == c.user._id: can_alter = True elif item.promoted and c.user_is_sponsor: can_alter = True else: # will throw a legitimate 500 if this isn't a link or # comment, because this should only be used on links and # comments subreddit = item.subreddit_slow if subreddit.can_distinguish(c.user): can_alter = True if can_alter: # Don't allow mod actions for users who are in timeout VNotInTimeout().run(target=item, subreddit=subreddit) return can_alter abort(403,'forbidden') class VSrCanBan(VByName): def run(self, thing_name): if c.user_is_admin: return True elif c.user_is_loggedin: item = VByName.run(self, thing_name) if isinstance(item, (Link, Comment)): sr = item.subreddit_slow if sr.is_moderator_with_perms(c.user, 'posts'): return True elif isinstance(item, Message): sr = item.subreddit_slow if sr and sr.is_moderator_with_perms(c.user, 'mail'): return True abort(403,'forbidden') class VSrSpecial(VByName): def run(self, thing_name): if c.user_is_admin: return True elif c.user_is_loggedin: item = VByName.run(self, thing_name) # will throw a legitimate 500 if this isn't a link or # comment, because this should only be used on links and # comments subreddit = item.subreddit_slow if subreddit.is_special(c.user): return True abort(403,'forbidden') class VSubmitParent(VByName): def run(self, fullname, fullname2): # for backwards compatibility (with iphone app) fullname = fullname or fullname2 parent = VByName.run(self, fullname) if fullname else None if not parent: # for backwards compatibility (normally 404) abort(403, "forbidden") if not isinstance(parent, (Comment, Link, Message)): # for backwards compatibility (normally 400) abort(403, "forbidden") if not c.user_is_loggedin: # in practice this is handled by VUser abort(403, "forbidden") if parent.author_id in c.user.enemies: self.set_error(errors.USER_BLOCKED) if isinstance(parent, Message): return parent elif isinstance(parent, Link): sr = parent.subreddit_slow if parent.is_archived(sr): self.set_error(errors.TOO_OLD) elif parent.locked and not sr.can_distinguish(c.user): self.set_error(errors.THREAD_LOCKED) if self.has_errors or parent.can_comment_slow(c.user): return parent elif isinstance(parent, Comment): sr = parent.subreddit_slow if parent._deleted: self.set_error(errors.DELETED_COMMENT) elif parent._spam: # Only author, mod or admin can reply to removed comments can_reply = (c.user_is_loggedin and (parent.author_id == c.user._id or c.user_is_admin or sr.is_moderator(c.user))) if not can_reply: self.set_error(errors.DELETED_COMMENT) link = Link._byID(parent.link_id, data=True) if link.is_archived(sr): self.set_error(errors.TOO_OLD) elif link.locked and not sr.can_distinguish(c.user): self.set_error(errors.THREAD_LOCKED) if self.has_errors or link.can_comment_slow(c.user): return parent abort(403, "forbidden") def param_docs(self): return { self.param[0]: "[fullname](#fullnames) of parent thing", } class VSubmitSR(Validator): def __init__(self, srname_param, linktype_param=None, promotion=False): self.require_linktype = False self.promotion = promotion if linktype_param: self.require_linktype = True Validator.__init__(self, (srname_param, linktype_param)) else: Validator.__init__(self, srname_param) def run(self, sr_name, link_type = None): if not sr_name: self.set_error(errors.SUBREDDIT_REQUIRED) return None try: sr_name = sr_path_rx.sub('\g<name>', str(sr_name).strip()) sr = Subreddit._by_name(sr_name) except (NotFound, AttributeError, UnicodeEncodeError): self.set_error(errors.SUBREDDIT_NOEXIST) return if not c.user_is_loggedin or not sr.can_submit(c.user, self.promotion): self.set_error(errors.SUBREDDIT_NOTALLOWED) return if not sr.allow_ads and self.promotion: self.set_error(errors.SUBREDDIT_DISABLED_ADS) return if self.require_linktype: if link_type not in ('link', 'self'): self.set_error(errors.INVALID_OPTION) return elif link_type == "link" and not sr.can_submit_link(c.user): self.set_error(errors.NO_LINKS) return elif link_type == "self" and not sr.can_submit_text(c.user): self.set_error(errors.NO_SELFS) return return sr def param_docs(self): return { self.param[0]: "name of a subreddit", } class VSubscribeSR(VByName): def __init__(self, srid_param, srname_param): VByName.__init__(self, (srid_param, srname_param)) def run(self, sr_id, sr_name): if sr_id: return VByName.run(self, sr_id) elif not sr_name: return try: sr = Subreddit._by_name(str(sr_name).strip()) except (NotFound, AttributeError, UnicodeEncodeError): self.set_error(errors.SUBREDDIT_NOEXIST) return return sr def param_docs(self): return { self.param[0]: "the name of a subreddit", } RE_GTM_ID = re.compile(r"^GTM-[A-Z0-9]+$") class VGTMContainerId(Validator): def run(self, value): if not value: return g.googletagmanager if RE_GTM_ID.match(value): return value else: abort(404) class VCollection(Validator): def run(self, name): collection = Collection.by_name(name) if collection: return collection self.set_error(errors.COLLECTION_NOEXIST) class VPromoTarget(Validator): default_param = ("targeting", "sr", "collection") def run(self, targeting, sr_name, collection_name): if targeting == "collection" and collection_name == "none": return Target(Frontpage.name) elif targeting == "none": return Target(Frontpage.name) elif targeting == "collection": collection = VCollection("collection").run(collection_name) if collection: return Target(collection) else: # VCollection added errors so no need to do anything return elif targeting == "one": sr = VSubmitSR("sr", promotion=True).run(sr_name) if sr: return Target(sr.name) else: # VSubmitSR added errors so no need to do anything return else: self.set_error(errors.INVALID_TARGET, field="targeting") class VOSVersion(Validator): def __init__(self, param, os, *a, **kw): Validator.__init__(self, param, *a, **kw) self.os = os def assign_error(self): self.set_error(errors.INVALID_OS_VERSION, field="os_version") def run(self, version_range): if not version_range: return # check that string conforms to `min,max` format try: min, max = version_range.split(',') except ValueError: self.assign_error() return # check for type errors # (max can be empty string, otherwise both float) type_errors = False if max == '': # check that min is a float try: min = float(min) except ValueError: type_errors = True else: # check that min and max are both floats try: min, max = float(min), float(max) # ensure than min is less-or-equal-to max if min > max: type_errors = True except ValueError: type_errors = True if type_errors == True: self.assign_error() return for endpoint in (min, max): if endpoint != '': # check that the version is in the global config if endpoint not in getattr(g, '%s_versions' % self.os): self.assign_error() return return [str(min), str(max)] MIN_PASSWORD_LENGTH = 6 class VPassword(Validator): def run(self, password): if not (password and len(password) >= MIN_PASSWORD_LENGTH): self.set_error(errors.SHORT_PASSWORD, {"chars": MIN_PASSWORD_LENGTH}) self.set_error(errors.BAD_PASSWORD) else: return password.encode('utf8') def param_docs(self): return { self.param[0]: "the password" } class VPasswordChange(VPassword): def run(self, password, verify): base = super(VPasswordChange, self).run(password) if self.has_errors: return base if (verify != password): self.set_error(errors.BAD_PASSWORD_MATCH) else: return base def param_docs(self): return { self.param[0]: "the new password", self.param[1]: "the password again (for verification)", } MIN_USERNAME_LENGTH = 3 MAX_USERNAME_LENGTH = 20 user_rx = re.compile(r"\A[\w-]+\Z", re.UNICODE) def chkuser(x): if x is None: return None try: if any(ch.isspace() for ch in x): return None return str(x) if user_rx.match(x) else None except TypeError: return None except UnicodeEncodeError: return None class VUname(VRequired): def __init__(self, item, *a, **kw): VRequired.__init__(self, item, errors.BAD_USERNAME, *a, **kw) def run(self, user_name): length = 0 if not user_name else len(user_name) if (length < MIN_USERNAME_LENGTH or length > MAX_USERNAME_LENGTH): msg_params = { 'min': MIN_USERNAME_LENGTH, 'max': MAX_USERNAME_LENGTH, } self.set_error(errors.USERNAME_TOO_SHORT, msg_params=msg_params) self.set_error(errors.BAD_USERNAME) return user_name = chkuser(user_name) if not user_name: self.set_error(errors.USERNAME_INVALID_CHARACTERS) self.set_error(errors.BAD_USERNAME) return else: try: a = Account._by_name(user_name, True) if a._deleted: return self.set_error(errors.USERNAME_TAKEN_DEL) else: return self.set_error(errors.USERNAME_TAKEN) except NotFound: return user_name def param_docs(self): return { self.param[0]: "a valid, unused, username", } class VLoggedOut(Validator): def run(self): if c.user_is_loggedin: self.set_error(errors.LOGGED_IN) class AuthenticationFailed(Exception): pass class LoginRatelimit(object): def __init__(self, category, key): self.category = category self.key = key def __str__(self): return "login-%s-%s" % (self.category, self.key) def __hash__(self): return hash(str(self)) class VThrottledLogin(VRequired): def __init__(self, params): VRequired.__init__(self, params, error=errors.WRONG_PASSWORD) self.vlength = VLength("user", max_length=100) self.seconds = None def get_ratelimits(self, account): is_previously_seen_ip = request.ip in [ j for i in IPsByAccount.get(account._id, column_count=1000) for j in i.itervalues() ] # We want to maintain different rate-limit buckets depending on whether # we have seen the IP logging in before. If someone is trying to brute # force an account from an unfamiliar location, we will rate limit # *all* requests from unfamiliar locations that try to access the # account, while still maintaining a separate rate-limit for IP # addresses we have seen use the account before. # # Finally, we also rate limit IPs themselves that appear to be trying # to log into accounts they have never logged into before. This goes # into a separately maintained bucket. if is_previously_seen_ip: ratelimits = { LoginRatelimit("familiar", account._id): g.RL_LOGIN_MAX_REQS, } else: ratelimits = { LoginRatelimit("unfamiliar", account._id): g.RL_LOGIN_MAX_REQS, LoginRatelimit("ip", request.ip): g.RL_LOGIN_IP_MAX_REQS, } hooks.get_hook("login.ratelimits").call( ratelimits=ratelimits, familiar=is_previously_seen_ip, ) return ratelimits def run(self, username, password): ratelimits = {} try: if username: username = username.strip() username = self.vlength.run(username) username = chkuser(username) if not username: raise AuthenticationFailed try: account = Account._by_name(username) except NotFound: raise AuthenticationFailed hooks.get_hook("account.spotcheck").call(account=account) if account._banned: raise AuthenticationFailed # if already logged in, you're exempt from your own ratelimit # (e.g. to allow account deletion regardless of DoS) ratelimit_exempt = (account == c.user) if not ratelimit_exempt: time_slice = ratelimit.get_timeslice(g.RL_RESET_SECONDS) ratelimits = self.get_ratelimits(account) now = int(time.time()) for rl, max_requests in ratelimits.iteritems(): try: failed_logins = ratelimit.get_usage(str(rl), time_slice) if failed_logins >= max_requests: self.seconds = time_slice.end - now period_end = datetime.utcfromtimestamp( time_slice.end).replace(tzinfo=pytz.UTC) remaining_text = utils.timeuntil(period_end) self.set_error( errors.RATELIMIT, {'time': remaining_text}, field='ratelimit', code=429) g.stats.event_count('login.throttle', rl.category) return False except ratelimit.RatelimitError as e: g.log.info("ratelimitcache error (login): %s", e) try: str(password) except UnicodeEncodeError: password = password.encode("utf8") if not valid_password(account, password): raise AuthenticationFailed g.stats.event_count('login', 'success') return account except AuthenticationFailed: g.stats.event_count('login', 'failure') if ratelimits: for rl in ratelimits: try: ratelimit.record_usage(str(rl), time_slice) except ratelimit.RatelimitError as e: g.log.info("ratelimitcache error (login): %s", e) self.error() return False def param_docs(self): return { self.param[0]: "a username", self.param[1]: "the user's password", } class VSanitizedUrl(Validator): def run(self, url): return utils.sanitize_url(url) def param_docs(self): return {self.param: "a valid URL"} class VUrl(VRequired): def __init__(self, item, allow_self=True, require_scheme=False, valid_schemes=utils.VALID_SCHEMES, *a, **kw): self.allow_self = allow_self self.require_scheme = require_scheme self.valid_schemes = valid_schemes VRequired.__init__(self, item, errors.NO_URL, *a, **kw) def run(self, url): if not url: return self.error(errors.NO_URL) url = utils.sanitize_url(url, require_scheme=self.require_scheme, valid_schemes=self.valid_schemes) if not url: return self.error(errors.BAD_URL) try: url.encode('utf-8') except UnicodeDecodeError: return self.error(errors.BAD_URL) if url == 'self': if self.allow_self: return url else: self.error(errors.BAD_URL) else: return url def param_docs(self): return {self.param: "a valid URL"} class VRedirectUri(VUrl): def __init__(self, item, valid_schemes=None, *a, **kw): VUrl.__init__(self, item, allow_self=False, require_scheme=True, valid_schemes=valid_schemes, *a, **kw) def param_docs(self): doc = "a valid URI" if self.valid_schemes: doc += " with one of the following schemes: " doc += ", ".join(self.valid_schemes) return {self.param: doc} class VShamedDomain(Validator): def run(self, url): if not url: return is_shamed, domain, reason = is_shamed_domain(url) if is_shamed: self.set_error(errors.DOMAIN_BANNED, dict(domain=domain, reason=reason)) class VExistingUname(VRequired): def __init__(self, item, allow_deleted=False, *a, **kw): self.allow_deleted = allow_deleted VRequired.__init__(self, item, errors.NO_USER, *a, **kw) def run(self, name): if name: name = name.strip() if name and name.startswith('~') and c.user_is_admin: try: user_id = int(name[1:]) return Account._byID(user_id, True) except (NotFound, ValueError): self.error(errors.USER_DOESNT_EXIST) # make sure the name satisfies our user name regexp before # bothering to look it up. name = chkuser(name) if name: try: return Account._by_name(name) except NotFound: if self.allow_deleted: try: return Account._by_name(name, allow_deleted=True) except NotFound: pass self.error(errors.USER_DOESNT_EXIST) else: self.error() def param_docs(self): return { self.param: 'the name of an existing user' } class VMessageRecipient(VExistingUname): def run(self, name): if not name: return self.error() is_subreddit = False if name.startswith('/r/'): name = name[3:] is_subreddit = True elif name.startswith('#'): name = name[1:] is_subreddit = True # A user in timeout should only be able to message us, the admins. if (c.user.in_timeout and not (is_subreddit and '/r/%s' % name == g.admin_message_acct)): abort(403, 'forbidden') if is_subreddit: try: s = Subreddit._by_name(name) if isinstance(s, FakeSubreddit): raise NotFound, "fake subreddit" if s._spam: raise NotFound, "banned subreddit" if s.is_muted(c.user) and not c.user_is_admin: self.set_error(errors.USER_MUTED) return s except NotFound: self.set_error(errors.SUBREDDIT_NOEXIST) else: account = VExistingUname.run(self, name) if account and account._id in c.user.enemies: self.set_error(errors.USER_BLOCKED) else: return account class VUserWithEmail(VExistingUname): def run(self, name): user = VExistingUname.run(self, name) if not user or not hasattr(user, 'email') or not user.email: return self.error(errors.NO_EMAIL_FOR_USER) return user class VBoolean(Validator): def run(self, val): if val is True or val is False: # val is already a bool object, no processing needed return val lv = str(val).lower() if lv == 'off' or lv == '' or lv[0] in ("f", "n"): return False return bool(val) def param_docs(self): return { self.param: 'boolean value', } class VNumber(Validator): def __init__(self, param, min=None, max=None, coerce = True, error=errors.BAD_NUMBER, num_default=None, *a, **kw): self.min = self.cast(min) if min is not None else None self.max = self.cast(max) if max is not None else None self.coerce = coerce self.error = error self.num_default = num_default Validator.__init__(self, param, *a, **kw) def cast(self, val): raise NotImplementedError def _set_error(self): if self.max is None and self.min is None: range = "" elif self.max is None: range = _("%(min)d to any") % dict(min=self.min) elif self.min is None: range = _("any to %(max)d") % dict(max=self.max) else: range = _("%(min)d to %(max)d") % dict(min=self.min, max=self.max) self.set_error(self.error, msg_params=dict(range=range)) def run(self, val): if not val: return self.num_default try: val = self.cast(val) if self.min is not None and val < self.min: if self.coerce: val = self.min else: raise ValueError, "" elif self.max is not None and val > self.max: if self.coerce: val = self.max else: raise ValueError, "" return val except ValueError: self._set_error() class VInt(VNumber): def cast(self, val): return int(val) def param_docs(self): if self.min is not None and self.max is not None: description = "an integer between %d and %d" % (self.min, self.max) elif self.min is not None: description = "an integer greater than %d" % self.min elif self.max is not None: description = "an integer less than %d" % self.max else: description = "an integer" if self.num_default is not None: description += " (default: %d)" % self.num_default return {self.param: description} class VFloat(VNumber): def cast(self, val): return float(val) class VDecimal(VNumber): def cast(self, val): return Decimal(val) class VCssName(Validator): """ returns a name iff it consists of alphanumeric characters and possibly "-", and is below the length limit. """ r_css_name = re.compile(r"\A[a-zA-Z0-9\-]{1,100}\Z") def run(self, name): if name: if self.r_css_name.match(name): return name else: self.set_error(errors.BAD_CSS_NAME) return '' def param_docs(self): return { self.param: "a valid subreddit image name", } class VColor(Validator): """Validate a string as being a 6 digit hex color starting with #""" color = re.compile(r"\A#[a-f0-9]{6}\Z", re.IGNORECASE) def run(self, color): if color: if self.color.match(color): return color.lower() else: self.set_error(errors.BAD_COLOR) return '' def param_docs(self): return { self.param: "a 6-digit rgb hex color, e.g. `#AABBCC`", } class VMenu(Validator): def __init__(self, param, menu_cls, remember = True, **kw): self.nav = menu_cls self.remember = remember param = (menu_cls.name, param) Validator.__init__(self, param, **kw) def run(self, sort, where): if self.remember: pref = "%s_%s" % (where, self.nav.name) user_prefs = copy(c.user.sort_options) if c.user else {} user_pref = user_prefs.get(pref) # check to see if a default param has been set if not sort: sort = user_pref # validate the sort if sort not in self.nav._options: sort = self.nav._default # commit the sort if changed and if this is a POST request if (self.remember and c.user_is_loggedin and sort != user_pref and request.method.upper() == 'POST'): user_prefs[pref] = sort c.user.sort_options = user_prefs user = c.user user._commit() return sort def param_docs(self): return { self.param[0]: 'one of (%s)' % ', '.join("`%s`" % s for s in self.nav._options), } class VRatelimit(Validator): def __init__(self, rate_user=False, rate_ip=False, prefix='rate_', error=errors.RATELIMIT, fatal=False, *a, **kw): self.rate_user = rate_user self.rate_ip = rate_ip self.name = prefix self.cache_prefix = "rl:%s" % self.name self.error = error self.fatal = fatal self.seconds = None Validator.__init__(self, *a, **kw) def run(self): if g.disable_ratelimit: return if c.user_is_loggedin: hook = hooks.get_hook("account.is_ratelimit_exempt") ratelimit_exempt = hook.call_until_return(account=c.user) if ratelimit_exempt: self._record_event(self.name, 'exempted') return to_check = [] if self.rate_user and c.user_is_loggedin: to_check.append('user' + str(c.user._id36)) self._record_event(self.name, 'check_user') if self.rate_ip: to_check.append('ip' + str(request.ip)) self._record_event(self.name, 'check_ip') r = g.ratelimitcache.get_multi(to_check, prefix=self.cache_prefix) if r: expire_time = max(r.values()) time = utils.timeuntil(expire_time) g.log.debug("rate-limiting %s from %s" % (self.name, r.keys())) for key in r.keys(): if key.startswith('user'): self._record_event(self.name, 'user_limit_hit') elif key.startswith('ip'): self._record_event(self.name, 'ip_limit_hit') # when errors have associated field parameters, we'll need # to add that here if self.error == errors.RATELIMIT: from datetime import datetime delta = expire_time - datetime.now(g.tz) self.seconds = delta.total_seconds() if self.seconds < 3: # Don't ratelimit within three seconds return if self.fatal: abort(429) self.set_error(errors.RATELIMIT, {'time': time}, field='ratelimit', code=429) else: if self.fatal: abort(429) self.set_error(self.error) @classmethod def ratelimit(cls, rate_user=False, rate_ip=False, prefix="rate_", seconds=None): name = prefix cache_prefix = "rl:%s" % name if seconds is None: seconds = g.RL_RESET_SECONDS expire_time = datetime.now(g.tz) + timedelta(seconds=seconds) to_set = {} if rate_user and c.user_is_loggedin: to_set['user' + str(c.user._id36)] = expire_time cls._record_event(name, 'set_user_limit') if rate_ip: to_set['ip' + str(request.ip)] = expire_time cls._record_event(name, 'set_ip_limit') g.ratelimitcache.set_multi(to_set, prefix=cache_prefix, time=seconds) @classmethod def _record_event(cls, name, event): g.stats.event_count('VRatelimit.%s' % name, event, sample_rate=0.1) class VRatelimitImproved(Validator): """Enforce ratelimits on a function. This is a newer version of VRatelimit that uses the ratelimit lib. """ class RateLimit(ratelimit.RateLimit): """A RateLimit with defaults specialized for VRatelimitImproved. Arguments: event_action: The type of the action the user took, for logging. event_type: Part of the key in the rate limit cache. limit: The RateLimit.limit value. Allowed hits per batch of seconds. seconds: The RateLimit.seconds value. How may seconds per batch. event_id_fn: Nullary function that derives an id from the current context. """ sample_rate = 0.1 def __init__(self, event_action, event_type, limit, seconds, event_id_fn): ratelimit.RateLimit.__init__(self) self.event_name = 'VRatelimitImproved.' + event_action self.event_type = event_type self.event_id_fn = event_id_fn self.limit = limit self.seconds = seconds @property def key(self): return 'ratelimit-%s-%s' % (self.event_type, self.event_id_fn()) def __init__(self, user_limit=None, ip_limit=None, error=errors.RATELIMIT, *a, **kw): """ At least one of user_limit and ip_limit should be set for this function to have any effect. Arguments: user_limit: RateLimit -- The per-user rate limit. ip_limit: RateLimit -- The per-IP rate limit. error -- the error message to use when the limit is exceeded. """ self.user_limit = user_limit self.ip_limit = ip_limit self.error = error # _validatedForm passes self.seconds to the current form's javascript. self.seconds = None Validator.__init__(self, *a, **kw) def run(self): if g.disable_ratelimit: return if c.user_is_loggedin: hook = hooks.get_hook("account.is_ratelimit_exempt") ratelimit_exempt = hook.call_until_return(account=c.user) if ratelimit_exempt: return if self.user_limit and c.user_is_loggedin: self._check_usage(self.user_limit) if self.ip_limit: self._check_usage(self.ip_limit) def _check_usage(self, rate_limit): """Check ratelimit usage and set an error if necessary.""" if rate_limit.check(): # Not rate limited. return g.log.debug('rate-limiting %s with %s used', rate_limit.key, rate_limit.get_usage()) # When errors have associated field parameters, we'll need # to add that here. if self.error == errors.RATELIMIT: period_end = datetime.utcfromtimestamp( rate_limit.timeslice.end).replace(tzinfo=pytz.UTC) time = utils.timeuntil(period_end) self.set_error(errors.RATELIMIT, {'time': time}, field='ratelimit', code=429) else: self.set_error(self.error) @classmethod def ratelimit(cls, user_limit=None, ip_limit=None): """Record usage of a resource.""" if user_limit and c.user_is_loggedin: user_limit.record_usage() if ip_limit: ip_limit.record_usage() class VShareRatelimit(VRatelimitImproved): USER_LIMIT = VRatelimitImproved.RateLimit( 'share', 'user', limit=g.RL_SHARE_MAX_REQS, seconds=g.RL_RESET_SECONDS, event_id_fn=lambda: c.user._id36) IP_LIMIT = VRatelimitImproved.RateLimit( 'share', 'ip', limit=g.RL_SHARE_MAX_REQS, seconds=g.RL_RESET_SECONDS, event_id_fn=lambda: request.ip) def __init__(self): super(VShareRatelimit, self).__init__( user_limit=self.USER_LIMIT, ip_limit=self.IP_LIMIT) @classmethod def ratelimit(cls): super(VShareRatelimit, cls).ratelimit( user_limit=cls.USER_LIMIT, ip_limit=cls.IP_LIMIT) class VCommentIDs(Validator): def run(self, id_str): if id_str: try: cids = [int(i, 36) for i in id_str.split(',')] return cids except ValueError: abort(400) return [] def param_docs(self): return { self.param: "a comma-delimited list of comment ID36s", } class VOneTimeToken(Validator): def __init__(self, model, param, *args, **kwargs): self.model = model Validator.__init__(self, param, *args, **kwargs) def run(self, key): token = self.model.get_token(key) if token: return token else: self.set_error(errors.EXPIRED) return None class VOneOf(Validator): def __init__(self, param, options = (), *a, **kw): Validator.__init__(self, param, *a, **kw) self.options = options def run(self, val): if self.options and val not in self.options: self.set_error(errors.INVALID_OPTION, code=400) return self.default else: return val def param_docs(self): return { self.param: 'one of (%s)' % ', '.join("`%s`" % s for s in self.options), } class VList(Validator): def __init__(self, param, separator=",", choices=None, error=errors.INVALID_OPTION, *a, **kw): Validator.__init__(self, param, *a, **kw) self.separator = separator self.choices = choices self.error = error def run(self, items): if not items: return None all_values = items.split(self.separator) if self.choices is None: return all_values values = [] for val in all_values: if val in self.choices: values.append(val) else: msg_params = {"choice": val} self.set_error(self.error, msg_params=msg_params, code=400) return values # Not i18n'able, but param_docs are not currently i18n'ed NICE_SEP = {",": "comma"} def param_docs(self): if self.choices: msg = ("A %(separator)s-separated list of items from " "this set:\n\n%(choices)s") choices = "`" + "` \n`".join(self.choices) + "`" else: msg = "A %(separator)s-separated list of items" choices = None sep = self.NICE_SEP.get(self.separator, self.separator) docs = msg % {"separator": sep, "choices": choices} return {self.param: docs} class VFrequencyCap(Validator): def run(self, frequency_capped='false', frequency_cap=None): if frequency_capped == 'true': if frequency_cap and int(frequency_cap) >= g.frequency_cap_min: try: return frequency_cap except (ValueError, TypeError): self.set_error(errors.INVALID_FREQUENCY_CAP, code=400) else: self.set_error( errors.FREQUENCY_CAP_TOO_LOW, {'min': g.frequency_cap_min}, code=400 ) else: return None class VPriority(Validator): def run(self, val): if c.user_is_sponsor: return (PROMOTE_PRIORITIES.get(val, PROMOTE_DEFAULT_PRIORITY(context=c))) elif feature.is_enabled('ads_auction'): return PROMOTE_DEFAULT_PRIORITY(context=c) else: return PROMOTE_PRIORITIES['standard'] class VLocation(Validator): default_param = ("country", "region", "metro") def run(self, country, region, metro): # some browsers are sending "null" rather than omitting the input when # the select is disabled country, region, metro = map(lambda val: None if val == "null" else val, [country, region, metro]) if not (country or region or metro): return None # Sponsors should only be creating fixed-CPM campaigns, which we # cannot calculate region specific inventory for if c.user_is_sponsor and region and not (region and metro): invalid_region = True else: invalid_region = False # Non-sponsors can only create auctions (non-inventory), so they # can target country, country/region, and country/region/metro if not (country and not (region or metro) or (country and region and not metro) or (country and region and metro)): invalid_geotargets = True else: invalid_geotargets = False if (country not in g.locations or region and region not in g.locations[country]['regions'] or metro and metro not in g.locations[country]['regions'][region]['metros']): nonexistent_geotarget = True else: nonexistent_geotarget = False if invalid_region or invalid_geotargets or nonexistent_geotarget: self.set_error(errors.INVALID_LOCATION, code=400, field='location') else: return Location(country, region, metro) class VImageType(Validator): def run(self, img_type): if not img_type in ('png', 'jpg'): return 'png' return img_type def param_docs(self): return { self.param: "one of `png` or `jpg` (default: `png`)", } class ValidEmail(Validator): """Validates a single email. Returns the email on success.""" def run(self, email): # Strip out leading/trailing whitespace, since the inclusion of that is # a common and easily-fixable user error. if email is not None: email = email.strip() if not email: self.set_error(errors.NO_EMAIL) elif not ValidEmails.email_re.match(email): self.set_error(errors.BAD_EMAIL) else: return email class ValidEmails(Validator): """Validates a list of email addresses passed in as a string and delineated by whitespace, ',' or ';'. Also validates quantity of provided emails. Returns a list of valid email addresses on success""" separator = re.compile(r'[^\s,;]+') email_re = re.compile(r'\A[^\s@]+@[^\s@]+\.[^\s@]+\Z') def __init__(self, param, num = 20, **kw): self.num = num Validator.__init__(self, param = param, **kw) def run(self, emails0): emails = set(self.separator.findall(emails0) if emails0 else []) failures = set(e for e in emails if not self.email_re.match(e)) emails = emails - failures # make sure the number of addresses does not exceed the max if self.num > 0 and len(emails) + len(failures) > self.num: # special case for 1: there should be no delineators at all, so # send back original string to the user if self.num == 1: self.set_error(errors.BAD_EMAILS, {'emails': '"%s"' % emails0}) # else report the number expected else: self.set_error(errors.TOO_MANY_EMAILS, {'num': self.num}) # correct number, but invalid formatting elif failures: self.set_error(errors.BAD_EMAILS, {'emails': ', '.join(failures)}) # no emails elif not emails: self.set_error(errors.NO_EMAILS) else: # return single email if one is expected, list otherwise return list(emails)[0] if self.num == 1 else emails class ValidEmailsOrExistingUnames(Validator): """Validates a list of mixed email addresses and usernames passed in as a string, delineated by whitespace, ',' or ';'. Validates total quantity too while we're at it. Returns a tuple of the form (e-mail addresses, user account objects)""" def __init__(self, param, num=20, **kw): self.num = num Validator.__init__(self, param=param, **kw) def run(self, items): # Use ValidEmails separator to break the list up everything = set(ValidEmails.separator.findall(items) if items else []) # Use ValidEmails regex to divide the list into e-mail and other emails = set(e for e in everything if ValidEmails.email_re.match(e)) failures = everything - emails # Run the rest of the validator against the e-mails list ve = ValidEmails(self.param, self.num) if len(emails) > 0: ve.run(", ".join(emails)) # ValidEmails will add to c.errors for us, so do nothing if that fails # Elsewise, on with the users if not ve.has_errors: users = set() # set of accounts validusers = set() # set of usernames to subtract from failures # Now steal from VExistingUname: for uname in failures: check = uname if re.match('/u/', uname): check = check[3:] veu = VExistingUname(check) account = veu.run(check) if account: validusers.add(uname) users.add(account) # We're fine if all our failures turned out to be valid users if len(users) == len(failures): # ValidEmails checked to see if there were too many addresses, # check to see if there's enough left-over space for users remaining = self.num - len(emails) if len(users) > remaining: if self.num == 1: # We only wanted one, and we got it as an e-mail, # so complain. self.set_error(errors.BAD_EMAILS, {"emails": '"%s"' % items}) else: # Too many total self.set_error(errors.TOO_MANY_EMAILS, {"num": self.num}) elif len(users) + len(emails) == 0: self.set_error(errors.NO_EMAILS) else: # It's all good! return (emails, users) else: failures = failures - validusers self.set_error(errors.BAD_EMAILS, {'emails': ', '.join(failures)}) class VCnameDomain(Validator): domain_re = re.compile(r'\A([\w\-_]+\.)+[\w]+\Z') def run(self, domain): if (domain and (not self.domain_re.match(domain) or domain.endswith('.' + g.domain) or domain.endswith('.' + g.media_domain) or len(domain) > 300)): self.set_error(errors.BAD_CNAME) elif domain: try: return str(domain).lower() except UnicodeEncodeError: self.set_error(errors.BAD_CNAME) def param_docs(self): # cnames are dead; don't advertise this. return {} class VDate(Validator): """ Date checker that accepts string inputs. Error conditions: * BAD_DATE on mal-formed date strings (strptime parse failure) """ def __init__(self, param, format="%m/%d/%Y", required=True): self.format = format self.required = required Validator.__init__(self, param) def run(self, datestr): if not datestr and not self.required: return None try: dt = datetime.strptime(datestr, self.format) return dt.replace(tzinfo=g.tz) except (ValueError, TypeError): self.set_error(errors.BAD_DATE) class VDestination(Validator): def __init__(self, param = 'dest', default = "", **kw): Validator.__init__(self, param, default, **kw) def run(self, dest): if not dest: dest = self.default or add_sr("/") ld = dest.lower() if ld.startswith(('/', 'http://', 'https://')): u = UrlParser(dest) if u.is_reddit_url(c.site) and u.is_web_safe_url(): return dest return "/" def param_docs(self): return { self.param: 'destination url (must be same-domain)', } class ValidAddress(Validator): def set_error(self, msg, field): Validator.set_error(self, errors.BAD_ADDRESS, dict(message=msg), field = field) def run(self, firstName, lastName, company, address, city, state, zipCode, country, phoneNumber): if not firstName: self.set_error(_("please provide a first name"), "firstName") elif not lastName: self.set_error(_("please provide a last name"), "lastName") elif not address: self.set_error(_("please provide an address"), "address") elif not city: self.set_error(_("please provide your city"), "city") elif not state: self.set_error(_("please provide your state"), "state") elif not zipCode: self.set_error(_("please provide your zip or post code"), "zip") elif not country: self.set_error(_("please provide your country"), "country") # Make sure values don't exceed max length defined in the authorize.net # xml schema: https://api.authorize.net/xml/v1/schema/AnetApiSchema.xsd max_lengths = [ (firstName, 50, 'firstName'), # (argument, max len, form field name) (lastName, 50, 'lastName'), (company, 50, 'company'), (address, 60, 'address'), (city, 40, 'city'), (state, 40, 'state'), (zipCode, 20, 'zip'), (country, 60, 'country'), (phoneNumber, 255, 'phoneNumber') ] for (arg, max_length, form_field_name) in max_lengths: if arg and len(arg) > max_length: self.set_error(_("max length %d characters" % max_length), form_field_name) if not self.has_errors: return Address(firstName = firstName, lastName = lastName, company = company or "", address = address, city = city, state = state, zip = zipCode, country = country, phoneNumber = phoneNumber or "") class ValidCard(Validator): valid_date = re.compile(r"\d\d\d\d-\d\d") def set_error(self, msg, field): Validator.set_error(self, errors.BAD_CARD, dict(message=msg), field = field) def run(self, cardNumber, expirationDate, cardCode): has_errors = False cardNumber = cardNumber or "" if not (cardNumber.isdigit() and 13 <= len(cardNumber) <= 16): self.set_error(_("credit card numbers should be 13 to 16 digits"), "cardNumber") has_errors = True if not self.valid_date.match(expirationDate or ""): self.set_error(_("dates should be YYYY-MM"), "expirationDate") has_errors = True else: now = datetime.now(g.tz) yyyy, mm = expirationDate.split("-") year = int(yyyy) month = int(mm) if month < 1 or month > 12: self.set_error(_("month must be in the range 01..12"), "expirationDate") has_errors = True elif datetime(year, month, 1) < datetime(now.year, now.month, 1): self.set_error(_("expiration date must be in the future"), "expirationDate") has_errors = True cardCode = cardCode or "" if not (cardCode.isdigit() and 3 <= len(cardCode) <= 4): self.set_error(_("card verification codes should be 3 or 4 digits"), "cardCode") has_errors = True if not has_errors: return CreditCard(cardNumber = cardNumber, expirationDate = expirationDate, cardCode = cardCode) class VTarget(Validator): target_re = re.compile("\A[\w_-]{3,20}\Z") def run(self, name): if name and self.target_re.match(name): return name def param_docs(self): # this is just for htmllite and of no interest to api consumers return {} class VFlairAccount(VRequired): def __init__(self, item, *a, **kw): VRequired.__init__(self, item, errors.BAD_FLAIR_TARGET, *a, **kw) def _lookup(self, name, allow_deleted): try: return Account._by_name(name, allow_deleted=allow_deleted) except NotFound: return None def run(self, name): if not name: return self.error() return ( self._lookup(name, False) or self._lookup(name, True) or self.error()) def param_docs(self): return {self.param: _("a user by name")} class VFlairLink(VRequired): def __init__(self, item, *a, **kw): VRequired.__init__(self, item, errors.BAD_FLAIR_TARGET, *a, **kw) def run(self, name): if not name: return self.error() try: return Link._by_fullname(name, data=True) except NotFound: return self.error() def param_docs(self): return {self.param: _("a [fullname](#fullname) of a link")} class VFlairCss(VCssName): def __init__(self, param, max_css_classes=10, **kw): self.max_css_classes = max_css_classes VCssName.__init__(self, param, **kw) def run(self, css): if not css: return css names = css.split() if len(names) > self.max_css_classes: self.set_error(errors.TOO_MUCH_FLAIR_CSS) return '' for name in names: if not self.r_css_name.match(name): self.set_error(errors.BAD_CSS_NAME) return '' return css class VFlairText(VLength): def __init__(self, param, max_length=64, **kw): VLength.__init__(self, param, max_length, **kw) class VFlairTemplateByID(VRequired): def __init__(self, param, **kw): VRequired.__init__(self, param, None, **kw) def run(self, flair_template_id): try: return FlairTemplateBySubredditIndex.get_template( c.site._id, flair_template_id) except tdb_cassandra.NotFound: return None class VOneTimePassword(Validator): allowed_skew = [-1, 0, 1] # allow a period of skew on either side of now ratelimit = 3 # maximum number of tries per period def __init__(self, param, required): self.required = required Validator.__init__(self, param) @classmethod def validate_otp(cls, secret, password): # is the password a valid format and has it been used? try: key = "otp:used_%s_%d" % (c.user._id36, int(password)) except (TypeError, ValueError): valid_and_unused = False else: # leave this key around for one more time period than the maximum # number of time periods we'll check for valid passwords key_ttl = totp.PERIOD * (len(cls.allowed_skew) + 1) valid_and_unused = g.gencache.add(key, True, time=key_ttl) # check the password (allowing for some clock-skew as 2FA-users # frequently travel at relativistic velocities) if valid_and_unused: for skew in cls.allowed_skew: expected_otp = totp.make_totp(secret, skew=skew) if constant_time_compare(password, expected_otp): return True return False def run(self, password): # does the user have 2FA configured? secret = c.user.otp_secret if not secret: if self.required: self.set_error(errors.NO_OTP_SECRET) return # do they have the otp cookie instead? if c.otp_cached: return # make sure they're not trying this too much if not g.disable_ratelimit: current_password = totp.make_totp(secret) otp_ratelimit = ratelimit.SimpleRateLimit( name="otp_tries_%s_%s" % (c.user._id36, current_password), seconds=600, limit=self.ratelimit, ) if not otp_ratelimit.record_and_check(): self.set_error(errors.RATELIMIT, dict(time="30 seconds")) return # check the password if self.validate_otp(secret, password): return # if we got this far, their password was wrong, invalid or already used self.set_error(errors.WRONG_PASSWORD) class VOAuth2ClientID(VRequired): default_param = "client_id" default_param_doc = _("an app") def __init__(self, param=None, *a, **kw): VRequired.__init__(self, param, errors.OAUTH2_INVALID_CLIENT, *a, **kw) def run(self, client_id): client_id = VRequired.run(self, client_id) if client_id: client = OAuth2Client.get_token(client_id) if client and not client.deleted: return client else: self.error() def param_docs(self): return {self.default_param: self.default_param_doc} class VOAuth2ClientDeveloper(VOAuth2ClientID): default_param_doc = _("an app developed by the user") def run(self, client_id): client = super(VOAuth2ClientDeveloper, self).run(client_id) if not client or not client.has_developer(c.user): return self.error() return client class VOAuth2Scope(VRequired): default_param = "scope" def __init__(self, param=None, *a, **kw): VRequired.__init__(self, param, errors.OAUTH2_INVALID_SCOPE, *a, **kw) def run(self, scope): scope = VRequired.run(self, scope) if scope: parsed_scope = OAuth2Scope(scope) if parsed_scope.is_valid(): return parsed_scope else: self.error() class VOAuth2RefreshToken(Validator): def __init__(self, param, *a, **kw): Validator.__init__(self, param, None, *a, **kw) def run(self, refresh_token_id): if refresh_token_id: try: token = OAuth2RefreshToken._byID(refresh_token_id) except tdb_cassandra.NotFound: self.set_error(errors.OAUTH2_INVALID_REFRESH_TOKEN) return None if not token.check_valid(): self.set_error(errors.OAUTH2_INVALID_REFRESH_TOKEN) return None return token else: return None class VPermissions(Validator): types = dict( moderator=ModeratorPermissionSet, moderator_invite=ModeratorPermissionSet, ) def __init__(self, type_param, permissions_param, *a, **kw): Validator.__init__(self, (type_param, permissions_param), *a, **kw) def run(self, type, permissions): permission_class = self.types.get(type) if not permission_class: self.set_error(errors.INVALID_PERMISSION_TYPE, field=self.param[0]) return (None, None) try: perm_set = permission_class.loads(permissions, validate=True) except ValueError: self.set_error(errors.INVALID_PERMISSIONS, field=self.param[1]) return (None, None) return type, perm_set class VJSON(Validator): def run(self, json_str): if not json_str: return self.set_error('JSON_PARSE_ERROR', code=400) else: try: return json.loads(json_str) except ValueError: return self.set_error('JSON_PARSE_ERROR', code=400) def param_docs(self): return { self.param: "JSON data", } class VValidatedJSON(VJSON): """Apply validators to the values of JSON formatted data.""" class ArrayOf(object): """A JSON array of objects with the specified schema.""" def __init__(self, spec): self.spec = spec def run(self, data): if not isinstance(data, list): raise RedditError('JSON_INVALID', code=400) validated_data = [] for item in data: validated_data.append(self.spec.run(item)) return validated_data def spec_docs(self): spec_lines = [] spec_lines.append('[') if hasattr(self.spec, 'spec_docs'): array_docs = self.spec.spec_docs() else: array_docs = self.spec.param_docs()[self.spec.param] for line in array_docs.split('\n'): spec_lines.append(' ' + line) spec_lines[-1] += ',' spec_lines.append(' ...') spec_lines.append(']') return '\n'.join(spec_lines) class Object(object): """A JSON object with validators for specified fields.""" def __init__(self, spec): self.spec = spec def run(self, data, ignore_missing=False): if not isinstance(data, dict): raise RedditError('JSON_INVALID', code=400) validated_data = {} for key, validator in self.spec.iteritems(): try: validated_data[key] = validator.run(data[key]) except KeyError: if ignore_missing: continue raise RedditError('JSON_MISSING_KEY', code=400, msg_params={'key': key}) return validated_data def spec_docs(self): spec_docs = {} for key, validator in self.spec.iteritems(): if hasattr(validator, 'spec_docs'): spec_docs[key] = validator.spec_docs() elif hasattr(validator, 'param_docs'): spec_docs.update(validator.param_docs()) if validator.docs: spec_docs.update(validator.docs) # generate markdown json schema docs spec_lines = [] spec_lines.append('{') for key in sorted(spec_docs.keys()): key_docs = spec_docs[key] # indent any new lines key_docs = key_docs.replace('\n', '\n ') spec_lines.append(' "%s": %s,' % (key, key_docs)) spec_lines.append('}') return '\n'.join(spec_lines) class PartialObject(Object): def run(self, data): super_ = super(VValidatedJSON.PartialObject, self) return super_.run(data, ignore_missing=True) def __init__(self, param, spec, **kw): VJSON.__init__(self, param, **kw) self.spec = spec def run(self, json_str): data = VJSON.run(self, json_str) if self.has_errors: return # Note: this relies on the fact that all validator errors are dumped # into a global (c.errors) and then checked by @validate. return self.spec.run(data) def docs_model(self): spec_md = self.spec.spec_docs() # indent for code formatting spec_md = '\n'.join( ' ' + line for line in spec_md.split('\n') ) return spec_md def param_docs(self): return { self.param: 'json data:\n\n' + self.docs_model(), } multi_name_rx = re.compile(r"\A[A-Za-z0-9][A-Za-z0-9_]{1,20}\Z") multi_name_chars_rx = re.compile(r"[^A-Za-z0-9_]") class VMultiPath(Validator): """Validates a multireddit path. Returns a path info dictionary. """ def __init__(self, param, kinds=None, required=True, **kw): Validator.__init__(self, param, **kw) self.required = required self.kinds = tup(kinds or ('f', 'm')) @classmethod def normalize(self, path): if path[0] != '/': path = '/' + path path = path.lower().rstrip('/') return path def run(self, path): if not path and not self.required: return None try: require(path) path = self.normalize(path) require(path.startswith('/user/')) prefix, owner, kind, name = require_split(path, 5, sep='/')[1:] require(kind in self.kinds) owner = chkuser(owner) require(owner) except RequirementException: self.set_error('BAD_MULTI_PATH', code=400) return try: require(multi_name_rx.match(name)) except RequirementException: invalid_char = multi_name_chars_rx.search(name) if invalid_char: char = invalid_char.group() if char == ' ': reason = _('no spaces allowed') else: reason = _("invalid character: '%s'") % char elif name[0] == '_': reason = _("can't start with a '_'") elif len(name) < 2: reason = _('that name is too short') elif len(name) > 21: reason = _('that name is too long') else: reason = _("that name isn't going to work") self.set_error('BAD_MULTI_NAME', {'reason': reason}, code=400) return return {'path': path, 'prefix': prefix, 'owner': owner, 'name': name} def param_docs(self): return { self.param: "multireddit url path", } class VMultiByPath(Validator): """Validates a multireddit path. Returns a LabeledMulti. """ def __init__(self, param, require_view=True, require_edit=False, kinds=None): Validator.__init__(self, param) self.require_view = require_view self.require_edit = require_edit self.kinds = tup(kinds or ('f', 'm')) def run(self, path): path = VMultiPath.normalize(path) if not path.startswith('/user/'): return self.set_error('MULTI_NOT_FOUND', code=404) name = path.split('/')[-1] if not multi_name_rx.match(name): return self.set_error('MULTI_NOT_FOUND', code=404) try: multi = LabeledMulti._byID(path) except tdb_cassandra.NotFound: return self.set_error('MULTI_NOT_FOUND', code=404) if not multi or multi.kind not in self.kinds: return self.set_error('MULTI_NOT_FOUND', code=404) if not multi or (self.require_view and not multi.can_view(c.user)): return self.set_error('MULTI_NOT_FOUND', code=404) if self.require_edit and not multi.can_edit(c.user): return self.set_error('MULTI_CANNOT_EDIT', code=403) return multi def param_docs(self): return { self.param: "multireddit url path", } sr_path_rx = re.compile(r"\A(/?r/)?(?P<name>.*?)/?\Z") class VSubredditList(Validator): def __init__(self, param, limit=20, allow_language_srs=True): Validator.__init__(self, param) self.limit = limit self.allow_language_srs = allow_language_srs def run(self, subreddits): if not subreddits: return [] # extract subreddit name if path provided subreddits = [sr_path_rx.sub('\g<name>', sr.strip()) for sr in subreddits.lower().strip().splitlines() if sr] for name in subreddits: valid_name = Subreddit.is_valid_name( name, allow_language_srs=self.allow_language_srs) if not valid_name: return self.set_error(errors.BAD_SR_NAME, code=400) unique_srs = set(subreddits) if subreddits: valid_srs = set(Subreddit._by_name(subreddits).keys()) if unique_srs - valid_srs: return self.set_error(errors.SUBREDDIT_NOEXIST, code=400) if len(unique_srs) > self.limit: return self.set_error( errors.TOO_MANY_SUBREDDITS, {'max': self.limit}, code=400) # return list of subreddit names as entered return subreddits def param_docs(self): return { self.param: 'a list of subreddit names, line break delimited', } class VResultTypes(Validator): """ Validates a list of search result types, provided either as multiple GET parameters or as a comma separated list. Returns a set. """ def __init__(self, param): Validator.__init__(self, param, get_multiple=True) self.default = [] self.options = {'link', 'sr'} def run(self, result_types): if result_types and ',' in result_types[0]: result_types = result_types[0].strip(',').split(',') # invalid values are ignored result_types = set(result_types) & self.options # for backwards compatibility, api and legacy default to link results if is_api(): result_types = result_types or {'link'} elif feature.is_enabled('legacy_search') or c.user.pref_legacy_search: result_types = {'link'} else: result_types = result_types or {'link', 'sr'} return result_types def param_docs(self): return { self.param: ( '(optional) comma-delimited list of result types ' '(`%s`)' % '`, `'.join(self.options) ), } class VSigned(Validator): """Validate if the request is properly signed. Checks the headers (mostly the User-Agent) are signed with :py:function:`~r2.lib.signing.valid_ua_signature` and in the case of POST and PUT ensure that any request.body included is also signed via :py:function:`~r2.lib.signing.valid_body_signature`. In :py:method:`run`, the signatures are combined as needed to generate a final signature that is generally the combination of the two. """ def run(self): signature = signing.valid_ua_signature(request) # only check the request body when there should be one if request.method.upper() in ("POST", "PUT"): signature.update(signing.valid_post_signature(request)) # add a simple event for each error as it appears (independent of # whether we're going to ignore them). for code, field in signature.errors: g.stats.simple_event( "signing.%s.invalid.%s" % (field, code.lower()) ) # persistent skew problems on android suggest something deeper is # wrong in v1. Disable the expiration check for now! if signature.platform == "android" and signature.version == 1: signature.add_ignore(signing.ERRORS.EXPIRED_TOKEN) return signature def need_provider_captcha(): return False
migrations
0158_auto_20220919_1634
# Generated by Django 3.2.15 on 2022-09-19 16:34 import bookwyrm.models.fields import django.utils.timezone from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ("bookwyrm", "0157_auto_20220909_2338"), ] operations = [ migrations.AddField( model_name="automod", name="created_date", field=models.DateTimeField( auto_now_add=True, default=django.utils.timezone.now ), preserve_default=False, ), migrations.AddField( model_name="automod", name="remote_id", field=bookwyrm.models.fields.RemoteIdField( max_length=255, null=True, validators=[bookwyrm.models.fields.validate_remote_id], ), ), migrations.AddField( model_name="automod", name="updated_date", field=models.DateTimeField(auto_now=True), ), migrations.AddField( model_name="emailblocklist", name="remote_id", field=bookwyrm.models.fields.RemoteIdField( max_length=255, null=True, validators=[bookwyrm.models.fields.validate_remote_id], ), ), migrations.AddField( model_name="emailblocklist", name="updated_date", field=models.DateTimeField(auto_now=True), ), migrations.AddField( model_name="ipblocklist", name="remote_id", field=bookwyrm.models.fields.RemoteIdField( max_length=255, null=True, validators=[bookwyrm.models.fields.validate_remote_id], ), ), migrations.AddField( model_name="ipblocklist", name="updated_date", field=models.DateTimeField(auto_now=True), ), ]
lib
dicts
# This file is part of Archivematica. # # Copyright 2010-2013 Artefactual Systems Inc. <http://artefactual.com> # # Archivematica is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Archivematica is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Archivematica. If not, see <http://www.gnu.org/licenses/>. # @package Archivematica # @subpackage MCPServer # @author Joseph Perry <joseph@artefactual.com> import ast import os import re from main import models config = {} def setup(shared_directory, processing_directory, watch_directory, rejected_directory): config["shared_directory"] = shared_directory config["processing_directory"] = processing_directory config["watch_directory"] = watch_directory config["rejected_directory"] = rejected_directory def replace_string_values(string, **kwargs): """ Replace standard Archivematica variables in a string given data from the database to use to populate them. This function is just a wrapper around ReplacementDict.frommodel().replace(string)[0]. The keyword arguments to this function are identical to the keyword arguments to ReplacementDict.frommodel. """ rd = ReplacementDict.frommodel(**kwargs) return rd.replace(string)[0] class ReplacementDict(dict): @staticmethod def fromstring(s): """ Create a new ReplacementDict given a string representing a serialized Python dict. This is commonly used within the MCPServer, where unit variables are frequently dicts stored in the database. """ return ReplacementDict(ast.literal_eval(s)) @staticmethod def frommodel(type_="file", sip=None, file_=None, expand_path=True): """ Creates a new ReplacementDict option with the standard variables populated based on values taken from the models passed in. SIP and File instances can be passed as arguments, using the sip and file_ keyword arguments respectively. sip accepts both SIP and Transfer objects. By default, path strings returned via this constructor are not absolute, but include the %sharedPath% variable in place of the actual path to the Archivematica shared path. This matches the behaviour previously used by the ReplacementDict construction code in the Unit classes, and is suitable for passing paths to the MCPClient. If the expand_path keyword argument is set to True, then true absolute paths will be returned instead. This is useful when creating ReplacementDicts within MCPClient scripts. If both sip and file_ are passed in, values from both will be included. Since there is some overlap in variable naming, the type_ keyword argument must be used to indicate the context in which the dict is being created. Supported values are 'file', 'sip', and 'transfer'. The default is 'file'. """ # Currently, MCPServer does not use the Django ORM. # In order to make this code accessible to MCPServer, # we need to support passing in UUID strings instead # of models. if isinstance(file_, str): file_ = models.File.objects.get(uuid=file_) if isinstance(sip, str): # sip can be a SIP or Transfer try: sip = models.SIP.objects.get(uuid=sip) except: sip = models.Transfer.objects.get(uuid=sip) shared_path = config["shared_directory"] # We still want to set SIP variables, even if no SIP or Transfer # was passed in, so try to fetch it from the file if file_ and not sip: try: sip = file_.sip except: sip = file_.transfer rd = ReplacementDict() sipdir = None if sip: if isinstance(sip, models.Transfer): relative_location = sip.currentlocation else: relative_location = sip.currentpath if expand_path: sipdir = relative_location.replace("%sharedPath%", shared_path) else: sipdir = relative_location rd["%SIPUUID%"] = sip.uuid sip_name = os.path.basename(sipdir.rstrip("/")).replace("-" + sip.uuid, "") rd["%SIPName%"] = sip_name rd["%currentPath%"] = sipdir rd["%SIPDirectory%"] = sipdir rd["%SIPDirectoryBasename%"] = os.path.basename(os.path.abspath(sipdir)) rd["%SIPLogsDirectory%"] = os.path.join(sipdir, "logs", "") rd["%SIPObjectsDirectory%"] = os.path.join(sipdir, "objects", "") if type_ == "sip": rd["%relativeLocation%"] = relative_location elif type_ == "transfer": rd["%transferDirectory%"] = sipdir rd["%relativeLocation%"] = relative_location if file_: rd["%fileUUID%"] = file_.uuid try: base_location = file_.sip.currentpath except: base_location = file_.transfer.currentlocation if expand_path and sipdir is not None: base_location = base_location.replace("%sharedPath%", shared_path) origin = file_.originallocation.replace( "%transferDirectory%", base_location ) current_location = file_.currentlocation.replace( "%transferDirectory%", base_location ) current_location = current_location.replace("%SIPDirectory%", sipdir) else: origin = file_.originallocation current_location = file_.currentlocation rd["%originalLocation%"] = origin rd["%currentLocation%"] = current_location rd["%fileDirectory%"] = os.path.dirname(current_location) rd["%fileGrpUse%"] = file_.filegrpuse if type_ == "file": rd["%relativeLocation%"] = current_location # These synonyms were originally defined by the Normalize microservice rd["%inputFile%"] = current_location rd["%fileFullName%"] = current_location name, ext = os.path.splitext(current_location) rd["%fileName%"] = os.path.basename(name) rd["%fileExtension%"] = ext[1:] rd["%fileExtensionWithDot%"] = ext rd["%tmpDirectory%"] = os.path.join(config["shared_directory"], "tmp", "") rd["%processingDirectory%"] = config["processing_directory"] rd["%watchDirectoryPath%"] = config["watch_directory"] rd["%rejectedDirectory%"] = config["rejected_directory"] return rd def replace(self, *strings): """ Iterates over a set of strings. Any keys in self found within the string will be replaced with their respective values. Returns an array of strings, regardless of the number of parameters pased in. For example: >>> rd = ReplacementDict({"$foo": "bar"}) >>> rd.replace('The value of the foo variable is: $foo') ['The value of the foo variable is: bar'] IMPORTANT NOTE: Any unicode strings present as dictionary values will be converted into bytestrings. All returned strings will also be bytestrings, regardless of the type of the original strings. Returned strings may or may not be valid Unicode, depending on the contents of data fetched from the database. (%originalLocation%, for instance, may contain arbitrary non-Unicode characters of nonspecific encoding.) Note that, within, Archivematica, the only value that typically contains Unicode characters is "%originalLocation%", and Archivematica does not use this variable in any place where precise fidelity of the original string is required. """ ret = [] for orig in strings: if orig is not None: for key, value in self.items(): orig = orig.replace(key, value) ret.append(orig) return ret def to_gnu_options(self): """ Returns the replacement dict's values as an array of GNU-style long options. This is primarily useful for passing options to FPR commands. For example: >>> rd = ReplacementDict({'%foo%': 'bar'}) >>> rd.to_gnu_options() ['--foo=bar'] """ args = [] for key, value in self.items(): optname = re.sub(r"([A-Z]+)", r"-\1", key[1:-1]).lower() opt = f"--{optname}={value}" args.append(opt) return args class ChoicesDict(ReplacementDict): @staticmethod def fromstring(s): """ See ReplacementDict.fromstring. """ return ChoicesDict(ast.literal_eval(s))
extractor
camdemy
# coding: utf-8 from __future__ import unicode_literals import re from ..compat import compat_urllib_parse_urlencode, compat_urlparse from ..utils import clean_html, parse_duration, str_to_int, unified_strdate from .common import InfoExtractor class CamdemyIE(InfoExtractor): _VALID_URL = r"https?://(?:www\.)?camdemy\.com/media/(?P<id>\d+)" _TESTS = [ { # single file "url": "http://www.camdemy.com/media/5181/", "md5": "5a5562b6a98b37873119102e052e311b", "info_dict": { "id": "5181", "ext": "mp4", "title": "Ch1-1 Introduction, Signals (02-23-2012)", "thumbnail": r"re:^https?://.*\.jpg$", "creator": "ss11spring", "duration": 1591, "upload_date": "20130114", "view_count": int, }, }, { # With non-empty description # webpage returns "No permission or not login" "url": "http://www.camdemy.com/media/13885", "md5": "4576a3bb2581f86c61044822adbd1249", "info_dict": { "id": "13885", "ext": "mp4", "title": "EverCam + Camdemy QuickStart", "thumbnail": r"re:^https?://.*\.jpg$", "description": "md5:2a9f989c2b153a2342acee579c6e7db6", "creator": "evercam", "duration": 318, }, }, { # External source (YouTube) "url": "http://www.camdemy.com/media/14842", "info_dict": { "id": "2vsYQzNIsJo", "ext": "mp4", "title": "Excel 2013 Tutorial - How to add Password Protection", "description": "Excel 2013 Tutorial for Beginners - How to add Password Protection", "upload_date": "20130211", "uploader": "Hun Kim", "uploader_id": "hunkimtutorials", }, "params": { "skip_download": True, }, }, ] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) src_from = self._html_search_regex( r"class=['\"]srcFrom['\"][^>]*>Sources?(?:\s+from)?\s*:\s*<a[^>]+(?:href|title)=(['\"])(?P<url>(?:(?!\1).)+)\1", webpage, "external source", default=None, group="url", ) if src_from: return self.url_result(src_from) oembed_obj = self._download_json( "http://www.camdemy.com/oembed/?format=json&url=" + url, video_id ) title = oembed_obj["title"] thumb_url = oembed_obj["thumbnail_url"] video_folder = compat_urlparse.urljoin(thumb_url, "video/") file_list_doc = self._download_xml( compat_urlparse.urljoin(video_folder, "fileList.xml"), video_id, "Downloading filelist XML", ) file_name = file_list_doc.find("./video/item/fileName").text video_url = compat_urlparse.urljoin(video_folder, file_name) # Some URLs return "No permission or not login" in a webpage despite being # freely available via oembed JSON URL (e.g. http://www.camdemy.com/media/13885) upload_date = unified_strdate( self._search_regex( r">published on ([^<]+)<", webpage, "upload date", default=None ) ) view_count = str_to_int( self._search_regex( r'role=["\']viewCnt["\'][^>]*>([\d,.]+) views', webpage, "view count", default=None, ) ) description = self._html_search_meta( "description", webpage, default=None ) or clean_html(oembed_obj.get("description")) return { "id": video_id, "url": video_url, "title": title, "thumbnail": thumb_url, "description": description, "creator": oembed_obj.get("author_name"), "duration": parse_duration(oembed_obj.get("duration")), "upload_date": upload_date, "view_count": view_count, } class CamdemyFolderIE(InfoExtractor): _VALID_URL = r"https?://(?:www\.)?camdemy\.com/folder/(?P<id>\d+)" _TESTS = [ { # links with trailing slash "url": "http://www.camdemy.com/folder/450", "info_dict": { "id": "450", "title": "信號與系統 2012 & 2011 (Signals and Systems)", }, "playlist_mincount": 145, }, { # links without trailing slash # and multi-page "url": "http://www.camdemy.com/folder/853", "info_dict": {"id": "853", "title": "科學計算 - 使用 Matlab"}, "playlist_mincount": 20, }, { # with displayMode parameter. For testing the codes to add parameters "url": "http://www.camdemy.com/folder/853/?displayMode=defaultOrderByOrg", "info_dict": {"id": "853", "title": "科學計算 - 使用 Matlab"}, "playlist_mincount": 20, }, ] def _real_extract(self, url): folder_id = self._match_id(url) # Add displayMode=list so that all links are displayed in a single page parsed_url = list(compat_urlparse.urlparse(url)) query = dict(compat_urlparse.parse_qsl(parsed_url[4])) query.update({"displayMode": "list"}) parsed_url[4] = compat_urllib_parse_urlencode(query) final_url = compat_urlparse.urlunparse(parsed_url) page = self._download_webpage(final_url, folder_id) matches = re.findall(r"href='(/media/\d+/?)'", page) entries = [ self.url_result("http://www.camdemy.com" + media_path) for media_path in matches ] folder_title = self._html_search_meta("keywords", page) return self.playlist_result(entries, folder_id, folder_title)
config
feature
__license__ = "GNU Affero General Public License http://www.gnu.org/licenses/agpl.html" __copyright__ = "Copyright (C) 2022 The OctoPrint Project - Released under terms of the AGPLv3 License" from typing import List from octoprint.schema import BaseModel from octoprint.vendor.with_attrs_docs import with_attrs_docs @with_attrs_docs class FeatureConfig(BaseModel): temperatureGraph: bool = True """Whether to enable the temperature graph in the UI or not.""" sdSupport: bool = True """Specifies whether support for SD printing and file management should be enabled.""" keyboardControl: bool = True """Whether to enable the keyboard control feature in the control tab.""" pollWatched: bool = False """Whether to actively poll the watched folder (true) or to rely on the OS's file system notifications instead (false).""" modelSizeDetection: bool = True """Whether to enable model size detection and warning (true) or not (false).""" rememberFileFolder: bool = False """Whether to remember the selected folder on the file manager.""" printStartConfirmation: bool = False """Whether to show a confirmation on print start (true) or not (false)""" printCancelConfirmation: bool = True """Whether to show a confirmation on print cancelling (true) or not (false)""" uploadOverwriteConfirmation: bool = True autoUppercaseBlacklist: List[str] = ["M117", "M118"] """Commands that should never be auto-uppercased when sent to the printer through the Terminal tab.""" g90InfluencesExtruder: bool = False """Whether `G90`/`G91` also influence absolute/relative mode of extruders.""" enforceReallyUniversalFilenames: bool = False """Replace all special characters and spaces with text equivalent to make them universally compatible. Most OS filesystems work fine with unicode characters, but just in case you can revert to the older behaviour by setting this to true."""
backends
amidi
"""Mido amidi backend Very experimental backend using amidi to access the ALSA rawmidi interface. TODO: * use parser instead of from_hex()? * default port name * do sysex messages work? * starting amidi for every message sent is costly """ import os import select import subprocess import threading from ..messages import Message from ._common import InputMethods, OutputMethods, PortMethods """ Dir Device Name IO hw:1,0,0 UM-1 MIDI 1 IO hw:2,0,0 nanoKONTROL2 MIDI 1 IO hw:2,0,0 MPK mini MIDI 1 """ def get_devices(): devices = [] lines = os.popen("amidi -l").read().splitlines() for line in lines[1:]: mode, device, name = line.strip().split(None, 2) devices.append( { "name": name.strip(), "device": device, "is_input": "I" in mode, "is_output": "O" in mode, } ) return devices def _get_device(name, mode): for dev in get_devices(): if name == dev["name"] and dev[mode]: return dev else: raise IOError("unknown port {!r}".format(name)) class Input(PortMethods, InputMethods): def __init__(self, name=None, **kwargs): self.name = name self.closed = False self._proc = None self._poller = select.poll() self._lock = threading.RLock() dev = _get_device(self.name, "is_input") self._proc = subprocess.Popen( ["amidi", "-d", "-p", dev["device"]], stdout=subprocess.PIPE ) self._poller.register(self._proc.stdout, select.POLLIN) def _read_message(self): line = self._proc.stdout.readline().strip().decode("ascii") if line: return Message.from_hex(line) else: # The first line is sometimes blank. return None def receive(self, block=True): if not block: return self.poll() while True: msg = self.poll() if msg: return msg # Wait for message. self._poller.poll() def poll(self): with self._lock: while self._poller.poll(0): msg = self._read_message() if msg is not None: return msg def close(self): if not self.closed: if self._proc: self._proc.kill() self._proc = None self.closed = True class Output(PortMethods, OutputMethods): def __init__(self, name=None, autoreset=False, **kwargs): self.name = name self.autoreset = autoreset self.closed = False self._dev = _get_device(self.name, "is_output") def send(self, msg): proc = subprocess.Popen( ["amidi", "--send-hex", msg.hex(), "-p", self._dev["device"]] ) proc.wait() def close(self): if not self.closed: if self.autoreset: self.reset() self.closed = True
engines
springer
# SPDX-License-Identifier: AGPL-3.0-or-later """Springer Nature (science) """ # pylint: disable=missing-function-docstring from datetime import datetime from json import loads from urllib.parse import urlencode from searx import logger from searx.exceptions import SearxEngineAPIException logger = logger.getChild("Springer Nature engine") about = { "website": "https://www.springernature.com/", "wikidata_id": "Q21096327", "official_api_documentation": "https://dev.springernature.com/", "use_official_api": True, "require_api_key": True, "results": "JSON", } categories = ["science"] paging = True nb_per_page = 10 api_key = "unset" base_url = "https://api.springernature.com/metadata/json?" def request(query, params): if api_key == "unset": raise SearxEngineAPIException("missing Springer-Nature API key") args = urlencode( { "q": query, "s": nb_per_page * (params["pageno"] - 1), "p": nb_per_page, "api_key": api_key, } ) params["url"] = base_url + args logger.debug("query_url --> %s", params["url"]) return params def response(resp): results = [] json_data = loads(resp.text) for record in json_data["records"]: content = record["abstract"][0:500] if len(record["abstract"]) > len(content): content += "..." published = datetime.strptime(record["publicationDate"], "%Y-%m-%d") metadata = [ record[x] for x in [ "publicationName", "identifier", "contentType", ] if record.get(x) is not None ] metadata = " / ".join(metadata) if record.get("startingPage") and record.get("endingPage") is not None: metadata += " (%(startingPage)s-%(endingPage)s)" % record results.append( { "title": record["title"], "url": record["url"][0]["value"].replace("http://", "https://", 1), "content": content, "publishedDate": published, "metadata": metadata, } ) return results
utils
definitions
from pyparsing import * IdentifierStart = oneOf(["$", "_"] + list(alphas)) Identifier = Combine(IdentifierStart + Optional(Word(alphas + nums + "$_"))) _keywords = [ "break", "do", "instanceof", "typeof", "case", "else", "new", "var", "catch", "finally", "return", "void", "continue", "for", "switch", "while", "debugger", "function", "this", "with", "default", "if", "throw", "delete", "in", "try", ] Keyword = oneOf(_keywords) # Literals # Bool BooleanLiteral = oneOf(("true", "false")) # Null NullLiteral = Literal("null") # Undefined UndefinedLiteral = Literal("undefined") # NaN NaNLiteral = Literal("NaN") # Number NonZeroDigit = oneOf(["1", "2", "3", "4", "5", "6", "7", "8", "9"]) DecimalDigit = oneOf(["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]) HexDigit = oneOf(list("0123456789abcdefABCDEF")) DecimalDigits = Word(nums) DecimalIntegerLiteral = Combine(NonZeroDigit + Optional(DecimalDigits)) | "0" SignedInteger = ( Combine("-" + DecimalDigits) | Combine("+" + DecimalDigits) | DecimalDigits ) ExponentPart = Combine(oneOf("e", "E") + SignedInteger) _DecimalLiteral = ( Combine( DecimalIntegerLiteral("int") + "." + Optional(DecimalDigits("float")) + Optional(ExponentPart("exp")) ) | Combine("." + DecimalDigits("float") + Optional(ExponentPart("exp"))) | DecimalIntegerLiteral("int") + Optional(ExponentPart("exp")) ) DecimalLiteral = Combine(_DecimalLiteral + NotAny(IdentifierStart)) HexIntegerLiteral = Combine(oneOf(("0x", "0X")) + Word("0123456789abcdefABCDEF")("hex")) NumericLiteral = Group(DecimalLiteral)("decimal") ^ Group(HexIntegerLiteral)("hex") def js_num(num): res = NumericLiteral.parseString(num) if res.decimal: res = res.decimal cand = int(res.int if res.int else 0) + float( "0." + res.float if res.float else 0 ) if res.exp: cand *= 10 ** int(res.exp) return cand elif res.hex: return int(res.hex.hex, 16) # String LineTerminator = White("\n", 1, 1, 1) | White("\r", 1, 1, 1) LineTerminatorSequence = ( Combine(White("\r", 1, 1, 1) + White("\n", 1, 1, 1)) | White("\n", 1, 1, 1) | White("\r", 1, 1, 1) ) LineContinuation = Combine("\\" + LineTerminatorSequence) UnicodeEscapeSequence = Combine("u" + HexDigit + HexDigit + HexDigit + HexDigit) HexEscapeSequence = Combine("x" + HexDigit + HexDigit) SingleEscapeCharacter = oneOf(["'", '"', "\\", "b", "f", "n", "r", "t", "v"]) EscapeCharacter = ( SingleEscapeCharacter | "0" | "x" | "u" ) # Changed DecimalDigit to 0 since it would match for example "\3" To verify.. NonEscapeCharacter = CharsNotIn([EscapeCharacter | LineTerminator]) CharacterEscapeSequence = SingleEscapeCharacter | NonEscapeCharacter EscapeSequence = ( CharacterEscapeSequence | Combine("0" + NotAny(DecimalDigit)) | HexEscapeSequence | UnicodeEscapeSequence ) SingleStringCharacter = ( CharsNotIn([LineTerminator | "\\" | "'"]) | Combine("\\" + EscapeSequence) | LineContinuation ) DoubleStringCharacter = ( CharsNotIn([LineTerminator | "\\" | '"']) | Combine("\\" + EscapeSequence) | LineContinuation ) StringLiteral = Combine('"' + ZeroOrMore(DoubleStringCharacter) + '"') ^ Combine( "'" + ZeroOrMore(SingleStringCharacter) + "'" ) # Array # Dict
PyObjCTest
helper_bridgesupport
import sys sys.path.insert(0, sys.argv[1]) import objc if not objc.__file__.startswith(sys.argv[1]): print("Loaded objc from unexpected path") sys.exit(1) try: unicode except NameError: unicode = str passed = True g = {} objc.initFrameworkWrapper( "AddressBook", "/System/Library/Frameworks/AddressBook.framework", "com.apple.AddressBook.framework", g, scan_classes=False, ) if "ABAddPropertiesAndTypes" not in g: print("Cannot find 'ABAddPropertiesAndTypes'") passed = False else: func = g["ABAddPropertiesAndTypes"] if not isinstance(func, objc.function): print("'ABAddPropertiesAndTypes' not an objc.function") passed = False else: if func.__metadata__() != { "retval": { "already_retained": False, "already_cfretained": False, "type": "q" if sys.maxint > 2**32 else "l", }, "arguments": ( { "null_accepted": True, "already_retained": False, "already_cfretained": False, "type": "^{__ABAddressBookRef=}", }, { "null_accepted": True, "already_retained": False, "already_cfretained": False, "type": "^{__CFString=}", }, { "null_accepted": True, "already_retained": False, "already_cfretained": False, "type": "^{__CFDictionary=}", }, ), "variadic": False, }: print("Unexpected metadata for 'ABAddPropertiesAndTypes'") passed = False if "ABAddressBookErrorDomain" not in g: print("'ABAddressBookErrorDomain' not found") passed = False elif not isinstance(g["ABAddressBookErrorDomain"], unicode): print("'ABAddressBookErrorDomain' not a string") passed = False if "ABAddRecordsError" not in g: print("'ABAddRecordsError' not found") passed = False elif g["ABAddRecordsError"] != 1001: print("'ABAddRecordsError' has wrong value") passed = False if "NSObject" in g: print("No scan_classes, but 'NSObject' found") passed = False if "ABAddressBook" in g: print("No scan_classes, but 'ABAddressBook' found") passed = False g = {} objc.initFrameworkWrapper( "AddressBook", "/System/Library/Frameworks/AddressBook.framework", "com.apple.AddressBook.framework", g, scan_classes=True, ) if "ABAddressBook" not in g: print("'ABAddressBook' not found") passed = False elif not isinstance(g["ABAddressBook"], objc.objc_class): print("'ABAddressBook' not a class") passed = False else: m = g["ABAddressBook"].addRecord_.__metadata__() if m["retval"]["type"] != objc._C_NSBOOL: print("'ABAddressBook' -addRecord: metadata not processed") passed = False if passed: sys.exit(0) else: sys.exit(1)
Scripts
Spring
#! python # -*- coding: utf-8 -*- # (c) 2011 Adrian Przekwas LGPL from __future__ import division # allows floating point division from integers import FreeCAD import Part from FreeCAD import Base class MySpring: def __init__(self, obj): """Add the properties: Pitch, Diameter, Height, BarDiameter""" obj.addProperty( "App::PropertyLength", "Pitch", "MySpring", "Pitch of the helix" ).Pitch = 5.0 obj.addProperty( "App::PropertyLength", "Diameter", "MySpring", "Diameter of the helix" ).Diameter = 6.0 obj.addProperty( "App::PropertyLength", "Height", "MySpring", "Height of the helix" ).Height = 30.0 obj.addProperty( "App::PropertyLength", "BarDiameter", "MySpring", "Diameter of the bar" ).BarDiameter = 3.0 obj.Proxy = self def onChanged(self, fp, prop): if ( prop == "Pitch" or prop == "Diameter" or prop == "Height" or prop == "BarDiameter" ): self.execute(fp) def execute(self, fp): pitch = fp.Pitch radius = fp.Diameter / 2 height = fp.Height barradius = fp.BarDiameter / 2 myhelix = Part.makeHelix(pitch, height, radius) g = myhelix.Edges[0].Curve c = Part.Circle() c.Center = g.value(0) # start point of the helix c.Axis = (0, 1, 0) c.Radius = barradius p = c.toShape() section = Part.Wire([p]) makeSolid = True isFrenet = True myspring = Part.Wire(myhelix).makePipeShell([section], makeSolid, isFrenet) fp.Shape = myspring def makeMySpring(): doc = FreeCAD.activeDocument() if doc is None: doc = FreeCAD.newDocument() spring = doc.addObject("Part::FeaturePython", "My_Spring") spring.Label = "My Spring" MySpring(spring) spring.ViewObject.Proxy = 0 doc.recompute() if __name__ == "__main__": makeMySpring()
controllers
paint_ctrl
# -*- coding: utf-8 -*- # # Copyright (C) 2013 by Ihor E. Novikov # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. from creators import AbstractCreator from sk1 import config, modes from uc2 import sk2const from uc2.libgeom import ( apply_trafo_to_paths, bezier_base_point, contra_point, is_point_in_rect2, midpoint, round_angle_point, ) class PolyLineCreator(AbstractCreator): mode = modes.LINE_MODE # drawing data paths = [] path = [[], [], sk2const.CURVE_OPENED] points = [] cursor = [] obj = None # Actual event point point = [] doc_point = [] ctrl_mask = False alt_mask = False shift_mask = False # Drawing timer to avoid repainting overhead timer = None timer_callback = None # Flags draw = False # entering into drawing mode create = False # entering into continuous drawing mode def __init__(self, canvas, presenter): AbstractCreator.__init__(self, canvas, presenter) def escape_pressed(self): if self.draw: self.mouse_double_click(None) else: self.canvas.set_mode() def start_(self): self.snap = self.presenter.snap self.init_flags() self.init_data() self.init_timer() self.update_from_selection() self.presenter.selection.clear() self.on_timer() def stop_(self): if self.obj: self.presenter.selection.set([self.obj]) self.init_flags() self.init_data() self.init_timer() self.canvas.renderer.paint_curve([]) self.on_timer() def standby(self): self.init_timer() self.cursor = [] self.on_timer() def restore(self): if self.path: point = self.points[-1] if self.points else self.path[0] self.point = self.canvas.point_doc_to_win(point) self.on_timer() def mouse_down(self, event): if not self.draw: self.draw = True self.clear_data() self.set_key_mask(event) self.point, self.doc_point = self._calc_points(event) self.add_point(self.point, self.doc_point) self.create = True self.init_timer() def mouse_up(self, event): if self.draw: self.set_key_mask(event) self.create = False self.cursor = self._calc_points(event)[0] self.on_timer() def mouse_double_click(self, event): if self.ctrl_mask: self.draw = False self.release_curve(False) else: self.release_curve() def mouse_move(self, event): if self.draw: self.set_key_mask(event) self.cursor = self._calc_points(event)[0] if self.create: self.set_drawing_timer() else: self.set_repaint_timer() else: self.init_timer() self.counter += 1 if self.counter > 5: self.counter = 0 point = event.get_point() dpoint = self.canvas.win_to_doc(point) if self.selection.is_point_over_marker(dpoint): mark = self.selection.is_point_over_marker(dpoint)[0] self.canvas.resize_marker = mark self.cursor = [] self.canvas.set_temp_mode(modes.RESIZE_MODE) def repaint(self): if self.timer_callback is not None: self.timer_callback() def repaint_draw(self): if self.path[0] or self.paths: paths = self.canvas.paths_doc_to_win(self.paths) cursor = self.cursor self.canvas.renderer.paint_curve(paths, cursor) return True def continuous_draw(self): if self.create and self.cursor: self.point, self.doc_point = self._snap(self.cursor) self.add_point(self.point, self.doc_point) return self.repaint_draw() def init_timer(self): self.timer.stop() self.timer_callback = self.repaint_draw def on_timer(self): self.canvas.selection_redraw() def set_repaint_timer(self): if not self.timer.is_running(): self.timer_callback = self.repaint_draw self.timer.start() def set_drawing_timer(self): if not self.timer.is_running(): self.timer_callback = self.continuous_draw self.timer.start() def init_data(self): self.paths = [] self.path = [[], [], sk2const.CURVE_OPENED] self.points = [] self.cursor = [] self.obj = None self.point = [] self.doc_point = [] self.timer_callback = None def clear_data(self): self.path = [[], [], sk2const.CURVE_OPENED] self.points = [] self.cursor = [] self.point = [] self.doc_point = [] def init_flags(self): self.create = False self.draw = False def set_key_mask(self, event): self.ctrl_mask = event.is_ctrl() self.alt_mask = event.is_alt() self.shift_mask = event.is_shift() def update_from_selection(self): sel_objs = self.selection.objs if len(sel_objs) == 1 and sel_objs[0].is_curve and self.obj is None: self.update_from_obj(sel_objs[0]) def update_from_obj(self, obj): self.obj = obj self.paths = apply_trafo_to_paths(self.obj.paths, self.obj.trafo) path = self.paths[-1] if path[-1] == sk2const.CURVE_OPENED: self.path = path self.points = self.path[1] last = bezier_base_point(self.points[-1]) self.doc_point = [] + last self.point = [] + self.canvas.point_doc_to_win(last) paths = self.canvas.paths_doc_to_win(self.paths) self.canvas.renderer.paint_curve(paths) else: paths = self.canvas.paths_doc_to_win(self.paths) self.canvas.renderer.paint_curve(paths) self.draw = True def add_point(self, point, doc_point): subpoint = bezier_base_point(point) if self.path[0]: w = h = config.curve_point_sensitivity_size start = self.canvas.point_doc_to_win(self.path[0]) if self.points: p = self.canvas.point_doc_to_win(self.points[-1]) last = bezier_base_point(p) if is_point_in_rect2(subpoint, start, w, h) and len(self.points) > 1: self.path[2] = sk2const.CURVE_CLOSED if len(point) == 2: self.points.append([] + self.path[0]) else: p = doc_point self.points.append([p[0], p[1], [] + self.path[0], p[3]]) if not self.ctrl_mask: self.release_curve() else: self.draw = False self.release_curve(False) self.on_timer() elif not is_point_in_rect2(subpoint, last, w, h): self.points.append(doc_point) self.path[1] = self.points else: if not is_point_in_rect2(subpoint, start, w, h): self.points.append(doc_point) self.path[1] = self.points else: self.path[0] = doc_point self.paths.append(self.path) def release_curve(self, stop=True): if self.points: self.cursor = [] flag = config.curve_autoclose_flag if flag and self.path[2] == sk2const.CURVE_OPENED: self.path[2] = sk2const.CURVE_CLOSED self.points.append([] + self.path[0]) paths = self.paths obj = self.obj if stop: self.stop_() if obj is None: obj = self.api.create_curve(paths) else: self.api.update_curve(obj, paths) if not stop: self.obj = obj def _calc_points(self, event): start = self.point cursor = event.get_point() ctrl = event.is_ctrl() shift = event.is_shift() if not shift and start and cursor: if ctrl: # restrict movement to horizontal or vertical fixed_angle = config.curve_fixed_angle cursor = round_angle_point(start, cursor, fixed_angle) return self._snap(cursor) def _snap(self, point): if self.check_snap and not self.shift_mask: snapped = self.snap.snap_point(point)[1:] else: snapped = [point, self.canvas.win_to_doc(point)] return snapped class PathsCreator(PolyLineCreator): mode = modes.CURVE_MODE # Actual curve event point curve_point = [] control_point0 = [] control_point1 = [] control_point2 = [] curve_point_doc = [] control_point0_doc = [] control_point1_doc = [] control_point2_doc = [] def __init__(self, canvas, presenter): PolyLineCreator.__init__(self, canvas, presenter) def standby(self): self.init_timer() self.cursor = [] self.on_timer() def restore(self): self.point = self.canvas.point_doc_to_win(self.doc_point) self.curve_point = self.canvas.point_doc_to_win(self.curve_point_doc) self.control_point0 = self.canvas.point_doc_to_win(self.control_point0_doc) self.control_point1 = self.canvas.point_doc_to_win(self.control_point1_doc) self.control_point2 = self.canvas.point_doc_to_win(self.control_point2_doc) self.on_timer() def update_from_obj(self, obj): self.obj = obj self.paths = apply_trafo_to_paths(self.obj.paths, self.obj.trafo) path = self.paths[-1] if path[-1] == sk2const.CURVE_OPENED: self.path = path self.points = self.path[1] paths = self.canvas.paths_doc_to_win(self.paths) self.canvas.renderer.paint_curve(paths) last = bezier_base_point(self.points[-1]) self.control_point0 = self.canvas.point_doc_to_win(last) self.control_point0_doc = [] + last self.point = [] + self.control_point0 self.doc_point = [] + last self.control_point2 = [] + self.control_point0 self.control_point2_doc = [] + last self.curve_point = [] + self.control_point0 self.curve_point_doc = [] + last else: paths = self.canvas.paths_doc_to_win(self.paths) self.canvas.renderer.paint_curve(paths) self.draw = True def mouse_down(self, event): if not self.draw: self.draw = True self.clear_data() self.curve_point, self.curve_point_doc = self._calc_points(event) self.control_point2 = [] + self.curve_point self.control_point2_doc = [] + self.curve_point_doc self.create = True self.init_timer() def mouse_up(self, event): if not self.draw: return self.create = False self.set_key_mask(event) self.control_point2, self.control_point2_doc = self._calc_points(event) self.cursor = [] + self.control_point2 if self.path[0]: if self.alt_mask: self.point, self.doc_point = self._calc_points(event) self.add_point([] + self.point, [] + self.doc_point) self.control_point0 = [] + self.point self.cursor = event.get_point() self.curve_point = [] + self.point elif self.control_point2: self.point = [] + self.curve_point self.doc_point = [] + self.curve_point_doc self.control_point1 = contra_point( self.control_point2, self.curve_point ) self.control_point1_doc = contra_point( self.control_point2_doc, self.curve_point_doc ) node_type = sk2const.NODE_SYMMETRICAL if len(self.points): bp_doc = bezier_base_point(self.points[-1]) else: bp_doc = self.path[0] if ( self.control_point0_doc == bp_doc and self.control_point1_doc == self.curve_point_doc ): node_type = sk2const.NODE_CUSP p0d = midpoint(bp_doc, self.curve_point_doc, 1.0 / 3.0) self.control_point0_doc = p0d p1d = midpoint(bp_doc, self.curve_point_doc, 2.0 / 3.0) self.control_point1_doc = p1d self.control_point0 = self.canvas.doc_to_win(p0d) self.control_point1 = self.canvas.doc_to_win(p1d) self.add_point( [ self.control_point0, self.control_point1, self.curve_point, node_type, ], [ self.control_point0_doc, self.control_point1_doc, self.curve_point_doc, node_type, ], ) self.control_point0 = [] + self.control_point2 self.control_point0_doc = [] + self.control_point2_doc snapped = self._calc_points(event) self.cursor = [] + snapped[0] self.curve_point, self.curve_point_doc = snapped else: self.point, self.doc_point = self._calc_points(event) self.add_point(self.point, self.doc_point) self.control_point0 = [] + self.point self.control_point0_doc = [] + self.doc_point self.on_timer() def mouse_move(self, event): self.set_key_mask(event) if self.draw: snapped = self._calc_points(event) self.cursor = [] + snapped[0] self.control_point2, self.control_point2_doc = snapped if not self.create: self.curve_point = [] + self.control_point2 self.curve_point_doc = [] + self.control_point2_doc self.set_repaint_timer() else: self.init_timer() self.counter += 1 if self.counter > 5: self.counter = 0 point = event.get_point() dpoint = self.canvas.win_to_doc(point) if self.selection.is_point_over_marker(dpoint): mark = self.selection.is_point_over_marker(dpoint)[0] self.canvas.resize_marker = mark self.cursor = [] self.canvas.set_temp_mode(modes.RESIZE_MODE) def repaint_draw(self): if self.path[0] or self.paths: paths = self.canvas.paths_doc_to_win(self.paths) cursor = self.cursor if not self.path[0]: cursor = [] elif cursor and not self.create: snapped = self.snap.snap_point(cursor)[1:] self.curve_point, self.curve_point_doc = snapped path = [] if self.control_point0 and not self.alt_mask: if not self.control_point2_doc: return True self.control_point1_doc = contra_point( self.control_point2_doc, self.curve_point_doc ) path = [ self.doc_point, [ self.control_point0_doc, self.control_point1_doc, self.curve_point_doc, ], sk2const.CURVE_OPENED, ] path = self.canvas.paths_doc_to_win( [ path, ] )[0] cpoint = [] if self.create: cpoint = self.canvas.doc_to_win(self.control_point2_doc) self.canvas.renderer.paint_curve(paths, cursor, path, cpoint) return True def init_data(self): PolyLineCreator.init_data(self) self.curve_point = [] self.control_point0 = [] self.control_point1 = [] self.control_point2 = [] self.curve_point_doc = [] self.control_point0_doc = [] self.control_point1_doc = [] self.control_point2_doc = [] def _calc_points(self, event): if self.curve_point != self.control_point2: start = self.curve_point else: start = self.point cursor = event.get_point() ctrl = event.is_ctrl() shift = event.is_shift() if not shift and start and cursor: if ctrl: # restrict movement to horizontal or vertical fixed_angle = config.curve_fixed_angle cursor = round_angle_point(start, cursor, fixed_angle) return self._snap(cursor)
migrations
0036_session_recording_events_materialized_columns
from infi.clickhouse_orm import migrations from posthog.client import sync_execute from posthog.session_recordings.sql.session_recording_event_sql import ( MATERIALIZED_COLUMNS, ) from posthog.settings import CLICKHOUSE_CLUSTER def create_events_summary_mat_columns(database): columns_to_add = [ "events_summary", "click_count", "keypress_count", "timestamps_summary", "first_event_timestamp", "last_event_timestamp", "urls", ] for column in columns_to_add: data = MATERIALIZED_COLUMNS[column] sync_execute( f""" ALTER TABLE sharded_session_recording_events ON CLUSTER '{CLICKHOUSE_CLUSTER}' ADD COLUMN IF NOT EXISTS {column} {data["schema"]} {data["materializer"]} """ ) sync_execute( f""" ALTER TABLE session_recording_events ON CLUSTER '{CLICKHOUSE_CLUSTER}' ADD COLUMN IF NOT EXISTS {column} {data["schema"]} """ ) sync_execute( f""" ALTER TABLE session_recording_events ON CLUSTER '{CLICKHOUSE_CLUSTER}' COMMENT COLUMN {column} 'column_materializer::{column}' """ ) operations = [migrations.RunPython(create_events_summary_mat_columns)]
lib
archivematicaFunctions
# This file is part of Archivematica. # # Copyright 2010-2013 Artefactual Systems Inc. <http://artefactual.com> # # Archivematica is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Archivematica is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Archivematica. If not, see <http://www.gnu.org/licenses/>. # @package Archivematica # @subpackage archivematicaCommon # @author Joseph Perry <joseph@artefactual.com> """archivematicaFunctions provides various helper functions across the different Archivematica modules. """ import base64 import collections import errno import glob import hashlib import locale import os import pprint import re from itertools import zip_longest from pathlib import Path from uuid import uuid4 from amclient import AMClient from django.apps import apps from lxml import etree from namespaces import NSMAP, xml_find_premis REQUIRED_DIRECTORIES = ( "logs", "logs/fileMeta", "metadata", "metadata/submissionDocumentation", "objects", ) OPTIONAL_FILES = ("processingMCP.xml", "README.html") MANUAL_NORMALIZATION_DIRECTORIES = [ "objects/manualNormalization/access", "objects/manualNormalization/preservation", ] AMCLIENT_ERROR_CODES = (1, 2, 3, 4, -1) # Package UUID suffix is a single dash followed by a UUID v4 with hyphens. PACKAGE_UUID_SUFFIX_LENGTH = 37 # Package extension constants here are copied from Storage Service's # storage_service.common.utils module. COMPRESS_EXTENSION_7Z = ".7z" COMPRESS_EXTENSION_BZIP2 = ".bz2" COMPRESS_EXTENSION_GZIP = ".gz" COMPRESS_EXTENSIONS = ( COMPRESS_EXTENSION_7Z, COMPRESS_EXTENSION_BZIP2, COMPRESS_EXTENSION_GZIP, ) PACKAGE_EXTENSIONS = (".tar",) + COMPRESS_EXTENSIONS def get_setting(setting, default=""): """Get Dashboard setting from database model.""" DashboardSetting = apps.get_model(app_label="main", model_name="DashboardSetting") try: return DashboardSetting.objects.get(name=setting).value except DashboardSetting.DoesNotExist: return default def get_dashboard_uuid(): """Get Dashboard uuid via the Dashboard database mode.""" return get_setting("dashboard_uuid", default=None) def setup_amclient(): """Initialize and return an AMClient instance.""" client = AMClient( ss_api_key=get_setting("storage_service_apikey", ""), ss_user_name=get_setting("storage_service_user", ""), ss_url=get_setting("storage_service_url", "").rstrip("/"), ) return client class OrderedListsDict(collections.OrderedDict): """ OrderedDict where all keys are lists, and elements are appended automatically. """ def __setitem__(self, key, value): # When inserting, insert into a list of items with the same key try: self[key] except KeyError: super().__setitem__(key, []) self[key].append(value) def b64encode_string(data): return base64.b64encode(data.encode("utf8")).decode("utf8") def b64decode_string(data): return base64.b64decode(data.encode("utf8")).decode("utf8") def get_locale_encoding(): """Return the default locale of the machine calling this function.""" default = "UTF-8" try: return locale.getdefaultlocale()[1] or default except IndexError: return default def cmd_line_arg_to_unicode(cmd_line_arg): """Decode a command-line argument (bytestring, type ``str``) to Unicode (type ``unicode``) by decoding it using the default system encoding (if retrievable) or UTF-8 otherwise. """ try: return cmd_line_arg.decode(get_locale_encoding()) except (LookupError, UnicodeDecodeError): return cmd_line_arg def getTagged(root, tag): """Return the XML elements with the given tag argument.""" ret = [] for element in root: if element.tag == tag: ret.append(element) return ret def escapeForCommand(string): """Escape special characters in a given string.""" ret = string if isinstance(ret, str): ret = ret.replace("\\", "\\\\") ret = ret.replace('"', '\\"') ret = ret.replace("`", r"\`") # ret = ret.replace("'", "\\'") # ret = ret.replace("$", "\\$") return ret def escape(string): """Replace non-unicode characters with a replacement character. Use this primarily for arbitrary strings (e.g. filenames, paths) that might not be valid unicode to begin with. """ if isinstance(string, bytes): string = string.decode("utf-8", errors="replace") return string def normalizeNonDcElementName(string): """Normalize non-DC CONTENTdm metadata element names to match those used in transfer's metadata.csv files. """ # Convert non-alphanumerics to _, remove extra _ from ends of string. normalized_string = re.sub(r"\W+", "_", string) normalized_string = normalized_string.strip("_") # Lower case string. normalized_string = normalized_string.lower() return normalized_string def get_file_checksum(filename, algorithm="sha256"): """ Perform a checksum on the specified file. This function reads in files incrementally to avoid memory exhaustion. See: https://stackoverflow.com/a/4213255 :param filename: The path to the file we want to check :param algorithm: Which algorithm to use for hashing, e.g. 'md5' :return: Returns a checksum string for the specified file. """ hash_ = hashlib.new(algorithm) with open(filename, "rb") as file_: for chunk in iter(lambda: file_.read(1024 * hash_.block_size), b""): hash_.update(chunk) return hash_.hexdigest() def find_metadata_files(sip_path, filename, only_transfers=False): """ Check the SIP and transfer metadata directories for filename. Helper function to collect all of a particular metadata file (e.g. metadata.csv) in a SIP. SIP-level files will be at the end of the list, if they exist. :param sip_path: Path of the SIP to check :param filename: Name of the metadata file to search for :param only_transfers: True if it should only look at Transfer metadata, False if it should look at SIP metadata too. :return: List of full paths to instances of filename """ paths = [] # Check transfer metadata. transfers_md_path = os.path.join(sip_path, "objects", "metadata", "transfers") try: transfers = os.listdir(transfers_md_path) except OSError: transfers = [] for transfer in transfers: path = os.path.join(transfers_md_path, transfer, filename) if os.path.isfile(path): paths.append(path) # Check the SIP metadata dir. if not only_transfers: path = os.path.join(sip_path, "objects", "metadata", filename) if os.path.isfile(path): paths.append(path) return paths def find_mets_file(unit_path): """Return the location of the original METS in a Archivematica AIP transfer. :returns: Path to original METS file (str) :raises: METSDiscoveryError if no or multiple METS are found. """ src = os.path.join(unit_path, "metadata") mets_paths = glob.glob(os.path.join(src, "METS.*.xml")) if len(mets_paths) == 1: return mets_paths[0] elif len(mets_paths) == 0: raise OSError(errno.EEXIST, f"No METS file found in {src}") else: raise OSError(errno.EEXIST, f"Multiple METS files found in {src}: {mets_paths}") def create_directories(directories, basepath="", printing=False, printfn=print): """Create arbitrary directory structures given an iterable list of directory paths. """ for directory in directories: dir_path = os.path.join(basepath, directory) if not os.path.isdir(dir_path): os.makedirs(dir_path) if printing: printfn("Creating directory", dir_path) def create_structured_directory( basepath, manual_normalization=False, printing=False, printfn=print ): """Wrapper for create_directories for various structures required by Archivematica. """ create_directories( REQUIRED_DIRECTORIES, basepath=basepath, printing=printing, printfn=printfn ) if manual_normalization: create_directories( MANUAL_NORMALIZATION_DIRECTORIES, basepath=basepath, printing=printing, printfn=printfn, ) def get_dir_uuids(dir_paths, logger=None, printfn=print): """Return a generator of dict instances, each containing one of the directory paths in ``dir_paths`` and its newly minted UUID. Used by multiple client scripts. """ for dir_path in dir_paths: dir_uuid = str(uuid4()) msg = f"Assigning UUID {dir_uuid} to directory path {dir_path}" printfn(msg) if logger: logger.info(msg) yield {"currentLocation": dir_path, "uuid": dir_uuid} def format_subdir_path(dir_path, path_prefix_to_repl): """Add "/" to end of ``dir_path`` and replace actual root directory ``path_prefix_to_repl`` with a placeholder. Used when creating ``originallocation`` attributes for ``Directory`` models. """ return os.path.join(dir_path, "").replace( path_prefix_to_repl, "%transferDirectory%", 1 ) def walk_dir(dir_path): """Calculate directory size by recursively walking files. :param dir_path: absolute path to directory :return: size in bytes (int) """ size = 0 for dirpath, _, filenames in os.walk(dir_path): for filename in filenames: file_path = os.path.join(dirpath, filename) size += os.path.getsize(file_path) return size def get_bag_size(bag, path): """Return size of BagIt Bag, using Payload-Oxum if present. Payload-Oxum, like other Bag Metadata elements, is optional per the BagIt specification: https://tools.ietf.org/html/rfc8493#section-2.2.2 If the Bag does not have a Payload-Oxum, calculate size by recursively walking files. :param transfer_path: Bag object :param path: path to Bag directory :return: size in bytes (int) """ oxum = bag.info.get("Payload-Oxum") if oxum is not None: return int(oxum.split(".")[0]) return walk_dir(path) def str2bool(val): """'True' is ``True``; aught else is ``False.""" if val == "True": return True return False NORMATIVE_STRUCTMAP_LABEL = "Normative Directory Structure" def div_el_to_dir_paths(div_el, parent="", include=True): """Recursively extract the list of filesystem directory paths encoded in <mets:div> element ``div_el``. """ paths = [] path = parent dir_name = div_el.get("LABEL") if parent == "" and dir_name in ("metadata", "submissionDocumentation"): return [] if include: path = os.path.join(parent, dir_name) paths.append(path) for sub_div_el in div_el.findall('mets:div[@TYPE="Directory"]', NSMAP): paths += div_el_to_dir_paths(sub_div_el, parent=path) return paths def reconstruct_empty_directories(mets_file_path, objects_path, logger=None): """Reconstruct in objects/ path ``objects_path`` the empty directories documented in METS file ``mets_file_path``. :param str mets_file_path: absolute path to an AIP/SIP's METS file. :param str objects_path: absolute path to an AIP/SIP's objects/ directory on disk. :returns None: """ if not os.path.isfile(mets_file_path) or not os.path.isdir(objects_path): if logger: logger.info( "Unable to construct empty directories, either because" " there is no METS file at {} or because there is no" " objects/ directory at {}".format(mets_file_path, objects_path) ) return doc = etree.parse(mets_file_path, etree.XMLParser(remove_blank_text=True)) logical_struct_map_el = doc.find( 'mets:structMap[@TYPE="logical"][@LABEL="{}"]'.format( NORMATIVE_STRUCTMAP_LABEL ), NSMAP, ) if logical_struct_map_el is None: if logger: logger.info( "Unable to locate a logical structMap labelled {}." " Aborting attempt to reconstruct empty" " directories.".format(NORMATIVE_STRUCTMAP_LABEL) ) return root_div_el = logical_struct_map_el.find( 'mets:div/mets:div[@LABEL="objects"]', NSMAP ) if root_div_el is None: if logger: logger.info( "Unable to locate a logical structMap labelled {}." " Aborting attempt to reconstruct empty" " directories.".format(NORMATIVE_STRUCTMAP_LABEL) ) return paths = div_el_to_dir_paths(root_div_el, include=False) if logger: logger.info("paths extracted from METS file:") logger.info(pprint.pformat(paths)) for path in paths: path = os.path.join(objects_path, path) if not os.path.isdir(path): os.makedirs(path) def find_transfer_path_from_ingest(transfer_path, shared_path): """Find path of a transfer arranged or coming straight from processing. In Ingest, access to the original transfers is needed in order to copy submission docs, metadata and logs. Transfers can be found under ``currentlyProcessing`` unless they come from SIP Arrangement, in which case they're found under the temporary shared directory. TODO: use ``Transfer.currentlocation`` or a model method? """ transfer_uuid = transfer_path.rstrip("/")[-36:] path = transfer_path.replace("%sharedPath%", shared_path, 1) if os.path.isdir(path): return path path = os.path.join(shared_path, "tmp", f"transfer-{transfer_uuid}") if os.path.isdir(path): return path raise Exception("Transfer directory not physically found") def find_aic_mets_filename(mets_root): """Find name of AIC METS file within AIP METS document. :param mets_root: AIP METS document root. :returns: AIC METS filename or None. """ return xml_find_premis( mets_root, "mets:fileSec/mets:fileGrp[@USE='metadata']/mets:file/mets:FLocat" ).get("{" + NSMAP["xlink"] + "}href") def find_aip_dirname(mets_root): """Find name of AIP directory within AIP METS document. :param mets_root: AIP METS document root. :returns: AIP dirname or None. """ return xml_find_premis(mets_root, "mets:structMap/mets:div").get("LABEL") def find_aips_in_aic(aic_root): """Find extent of AIPs in AIC within AIC METS document. :param aic_root" AIC METS document root. :returns: Count of AIPs in AIC or None. """ extent = xml_find_premis( aic_root, "mets:dmdSec/mets:mdWrap/mets:xmlData/dcterms:dublincore/dcterms:extent", ) try: return re.search(r"\d+", extent.text).group() except AttributeError: return None def package_name_from_path(current_path, remove_uuid_suffix=False): """Return name of package without file extensions from current path. This helper works for all package types (e.g. transfer, AIP, AIC). :param current_path: Current path to package. :param remove_uuid_suffix: Optional boolean to additionally remove UUID suffix. :returns: Package name minus any file extensions. """ path = Path(current_path) name, chars_to_remove = path.name, 0 if remove_uuid_suffix is True: chars_to_remove = PACKAGE_UUID_SUFFIX_LENGTH for suffix in reversed(path.suffixes): if suffix not in PACKAGE_EXTENSIONS: break chars_to_remove += len(suffix) # Check if we have characters to remove to avoid accidentally # returning an empty string with name[:-0]. if not chars_to_remove: return name return name[:-chars_to_remove] def relative_path_to_aip_mets_file(uuid, current_path): """Return relative path to AIP METS file. :param uuid: AIP UUID. :param current_path: Current path to AIP. :returns: Relative path to AIP METS file. """ package_name_without_extensions = package_name_from_path(current_path) mets_name = f"METS.{uuid}.xml" mets_path = f"{package_name_without_extensions}/data/{mets_name}" return mets_path def chunk_iterable(iterable, chunk_size=10, fillvalue=None): """Collect data into fixed-length chunks or blocks. >>> list(chunk_iterable('ABCDEFG', 3, 'x')) [('A', 'B', 'C'), ('D', 'E', 'F'), ('G', 'x', 'x')] """ args = [iter(iterable)] * chunk_size return zip_longest(fillvalue=fillvalue, *args)