section stringlengths 2 30 | filename stringlengths 1 82 | text stringlengths 783 28M |
|---|---|---|
transports | scgi | #!/usr/bin/python
# rtorrent_xmlrpc
# (c) 2011 Roger Que <alerante@bellsouth.net>
#
# Modified portions:
# (c) 2013 Dean Gardiner <gardiner91@gmail.com>
#
# Python module for interacting with rtorrent's XML-RPC interface
# directly over SCGI, instead of through an HTTP server intermediary.
# Inspired by Glenn Washburn's xmlrpc2scgi.py [1], but subclasses the
# built-in xmlrpclib classes so that it is compatible with features
# such as MultiCall objects.
#
# [1] <http://libtorrent.rakshasa.no/wiki/UtilsXmlrpc2scgi>
#
# Usage: server = SCGIServerProxy('scgi://localhost:7000/')
# server = SCGIServerProxy('scgi:///path/to/scgi.sock')
# print server.system.listMethods()
# mc = xmlrpclib.MultiCall(server)
# mc.get_up_rate()
# mc.get_down_rate()
# print mc()
#
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the
# OpenSSL library under certain conditions as described in each
# individual source file, and distribute linked combinations
# including the two.
#
# You must obey the GNU General Public License in all respects for
# all of the code used other than OpenSSL. If you modify file(s)
# with this exception, you may extend this exception to your version
# of the file(s), but you are not obligated to do so. If you do not
# wish to do so, delete this exception statement from your version.
# If you delete this exception statement from all source files in the
# program, then also delete it here.
#
#
#
# Portions based on Python's xmlrpclib:
#
# Copyright (c) 1999-2002 by Secret Labs AB
# Copyright (c) 1999-2002 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
import errno
import re
import socket
import urllib
import httplib
import xmlrpclib
class SCGITransport(xmlrpclib.Transport):
# Added request() from Python 2.7 xmlrpclib here to backport to Python 2.6
def request(self, host, handler, request_body, verbose=0):
#retry request once if cached connection has gone cold
for i in (0, 1):
try:
return self.single_request(host, handler, request_body, verbose)
except socket.error, e:
if i or e.errno not in (errno.ECONNRESET, errno.ECONNABORTED, errno.EPIPE):
raise
except httplib.BadStatusLine: #close after we sent request
if i:
raise
def single_request(self, host, handler, request_body, verbose=0):
# Add SCGI headers to the request.
headers = {'CONTENT_LENGTH': str(len(request_body)), 'SCGI': '1'}
header = '\x00'.join(('%s\x00%s' % item for item in headers.iteritems())) + '\x00'
header = '%d:%s' % (len(header), header)
request_body = '%s,%s' % (header, request_body)
sock = None
try:
if host:
host, port = urllib.splitport(host)
addrinfo = socket.getaddrinfo(host, int(port), socket.AF_INET,
socket.SOCK_STREAM)
sock = socket.socket(*addrinfo[0][:3])
sock.connect(addrinfo[0][4])
else:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(handler)
self.verbose = verbose
sock.send(request_body)
return self.parse_response(sock.makefile())
finally:
if sock:
sock.close()
def parse_response(self, response):
p, u = self.getparser()
response_body = ''
while True:
data = response.read(1024)
if not data:
break
response_body += data
# Remove SCGI headers from the response.
response_header, response_body = re.split(r'\n\s*?\n', response_body,
maxsplit=1)
if self.verbose:
print 'body:', repr(response_body)
p.feed(response_body)
p.close()
return u.close()
|
aliceVision | DepthMap | __version__ = "4.0"
from meshroom.core import desc
class DepthMap(desc.AVCommandLineNode):
commandLine = "aliceVision_depthMapEstimation {allParams}"
gpu = desc.Level.INTENSIVE
size = desc.DynamicNodeSize("input")
parallelization = desc.Parallelization(blockSize=3)
commandLineRange = "--rangeStart {rangeStart} --rangeSize {rangeBlockSize}"
category = "Dense Reconstruction"
documentation = """
Estimate a depth map for each calibrated camera using Plane Sweeping, a multi-view stereo algorithm notable for its efficiency on modern graphics hardware (GPU).
Adjust the downscale factor to compute depth maps at a higher/lower resolution.
Use a downscale factor of one (full-resolution) only if the quality of the input images is really high (camera on a tripod with high-quality optics).
## Online
[https://alicevision.org/#photogrammetry/depth_maps_estimation](https://alicevision.org/#photogrammetry/depth_maps_estimation)
"""
inputs = [
desc.File(
name="input",
label="SfMData",
description="Input SfMData file.",
value="",
uid=[0],
),
desc.File(
name="imagesFolder",
label="Images Folder",
description="Use images from a specific folder instead of those specified in the SfMData file.\n"
"Filename should be the image UID.",
value="",
uid=[0],
),
desc.ChoiceParam(
name="downscale",
label="Downscale",
description="Downscale the input images to compute the depth map.\n"
"Full resolution (downscale = 1) gives the best result,\n"
"but using a larger downscale will reduce computation time at the expense of quality.\n"
"If the images are noisy, blurry or if the surfaces are challenging (weakly-textured or with specularities), a larger downscale may improve.",
value=2,
values=[1, 2, 4, 8, 16],
exclusive=True,
uid=[0],
),
desc.FloatParam(
name="minViewAngle",
label="Min View Angle",
description="Minimum angle between two views (select the neighbouring cameras, select depth planes from epipolar segment point).",
value=2.0,
range=(0.0, 10.0, 0.1),
uid=[0],
advanced=True,
),
desc.FloatParam(
name="maxViewAngle",
label="Max View Angle",
description="Maximum angle between two views (select the neighbouring cameras, select depth planes from epipolar segment point).",
value=70.0,
range=(10.0, 120.0, 1.0),
uid=[0],
advanced=True,
),
desc.GroupAttribute(
name="tiling",
label="Tiling",
description="Tiles are used to split the computation into fixed buffers to fit the GPU best.",
group=None,
groupDesc=[
desc.IntParam(
name="tileBufferWidth",
label="Buffer Width",
description="Maximum tile buffer width.",
value=1024,
range=(-1, 2000, 10),
uid=[0],
),
desc.IntParam(
name="tileBufferHeight",
label="Buffer Height",
description="Maximum tile buffer height.",
value=1024,
range=(-1, 2000, 10),
uid=[0],
),
desc.IntParam(
name="tilePadding",
label="Padding",
description="Buffer padding for overlapping tiles.",
value=64,
range=(0, 500, 1),
uid=[0],
),
desc.BoolParam(
name="autoAdjustSmallImage",
label="Auto Adjust Small Image",
description="Automatically adjust depth map parameters if images are smaller than one tile\n"
"(maxTCamsPerTile = maxTCams, adjust step if needed).",
value=True,
uid=[0],
advanced=True,
),
],
),
desc.BoolParam(
name="chooseTCamsPerTile",
label="Choose Neighbour Cameras Per Tile",
description="Choose neighbour cameras per tile or globally to the image.",
value=True,
uid=[0],
advanced=True,
),
desc.IntParam(
name="maxTCams",
label="Max Nb Neighbour Cameras",
description="Maximum number of neighbour cameras per image.",
value=10,
range=(1, 20, 1),
uid=[0],
),
desc.GroupAttribute(
name="sgm",
label="SGM",
description="The Semi-Global Matching (SGM) step computes a similarity volume and extracts the initial low-resolution depth map.\n"
"This method is highly robust but has limited depth precision (banding artifacts due to a limited list of depth planes).",
group=None,
groupDesc=[
desc.IntParam(
name="sgmScale",
label="Downscale Factor",
description="Downscale factor applied on source images for the SGM step (in addition to the global downscale).",
value=2,
range=(-1, 10, 1),
uid=[0],
),
desc.IntParam(
name="sgmStepXY",
label="Step XY",
description="The step is used to compute the similarity volume for one pixel over N (in the XY image plane).",
value=2,
range=(-1, 10, 1),
uid=[0],
),
desc.IntParam(
name="sgmStepZ",
label="Step Z",
description="Initial step used to compute the similarity volume on Z axis (every N pixels on the epilolar line).\n"
"-1 means automatic estimation.\n"
"This value will be adjusted in all case to fit in the max memory (sgmMaxDepths).",
value=-1,
range=(-1, 10, 1),
uid=[0],
),
desc.IntParam(
name="sgmMaxTCamsPerTile",
label="Max Nb Neighbour Cameras Per Tile",
description="Maximum number of neighbour cameras used per tile.",
value=4,
range=(1, 20, 1),
uid=[0],
),
desc.IntParam(
name="sgmWSH",
label="WSH",
description="Half-size of the patch used to compute the similarity. Patch width is wsh*2+1.",
value=4,
range=(1, 20, 1),
uid=[0],
advanced=True,
),
desc.BoolParam(
name="sgmUseSfmSeeds",
label="Use SfM Landmarks",
description="Use landmarks from Structure-from-Motion as input seeds to define min/max depth ranges.",
value=True,
uid=[0],
advanced=True,
),
desc.FloatParam(
name="sgmSeedsRangeInflate",
label="Seeds Range Inflate",
description="Inflate factor to add margins around SfM seeds.",
value=0.2,
range=(0.0, 2.0, 0.1),
uid=[0],
advanced=True,
),
desc.FloatParam(
name="sgmDepthThicknessInflate",
label="Thickness Inflate",
description="Inflate factor to add margins to the depth thickness.",
value=0.0,
range=(0.0, 2.0, 0.1),
uid=[0],
advanced=True,
),
desc.FloatParam(
name="sgmMaxSimilarity",
label="Max Similarity",
description="Maximum similarity threshold (between 0 and 1) used to filter out poorly supported depth values.",
value=1.0,
range=(0.0, 1.0, 0.01),
uid=[0],
advanced=True,
),
desc.FloatParam(
name="sgmGammaC",
label="GammaC",
description="GammaC threshold used for similarity computation.",
value=5.5,
range=(0.0, 30.0, 0.5),
uid=[0],
advanced=True,
),
desc.FloatParam(
name="sgmGammaP",
label="GammaP",
description="GammaP threshold used for similarity computation.",
value=8.0,
range=(0.0, 30.0, 0.5),
uid=[0],
advanced=True,
),
desc.FloatParam(
name="sgmP1",
label="P1",
description="P1 parameter for SGM filtering.",
value=10.0,
range=(0.0, 255.0, 0.5),
uid=[0],
advanced=True,
),
desc.FloatParam(
name="sgmP2Weighting",
label="P2 Weighting",
description="P2 weighting parameter for SGM filtering.",
value=100.0,
range=(-255.0, 255.0, 0.5),
uid=[0],
advanced=True,
),
desc.IntParam(
name="sgmMaxDepths",
label="Max Depths",
description="Maximum number of depths in the similarity volume.",
value=1500,
range=(1, 5000, 1),
uid=[0],
advanced=True,
),
desc.StringParam(
name="sgmFilteringAxes",
label="Filtering Axes",
description="Define axes for the filtering of the similarity volume.",
value="YX",
uid=[0],
advanced=True,
),
desc.BoolParam(
name="sgmDepthListPerTile",
label="Depth List Per Tile",
description="Select the list of depth planes per tile or globally to the image.",
value=True,
uid=[0],
advanced=True,
),
desc.BoolParam(
name="sgmUseConsistentScale",
label="Consistent Scale",
description="Compare patch with consistent scale for similarity volume computation.",
value=False,
uid=[0],
),
],
),
desc.GroupAttribute(
name="refine",
label="Refine",
description="The refine step computes a similarity volume in higher resolution but with a small depth range around the SGM depth map.\n"
"This allows to compute a depth map with sub-pixel accuracy.",
group=None,
groupDesc=[
desc.BoolParam(
name="refineEnabled",
label="Enable",
description="Enable depth/similarity map refinement process.",
value=True,
uid=[0],
),
desc.IntParam(
name="refineScale",
label="Downscale Factor",
description="Downscale factor applied on source images for the Refine step (in addition to the global downscale).",
value=1,
range=(-1, 10, 1),
uid=[0],
enabled=lambda node: node.refine.refineEnabled.value,
),
desc.IntParam(
name="refineStepXY",
label="Step XY",
description="The step is used to compute the refine volume for one pixel over N (in the XY image plane).",
value=1,
range=(-1, 10, 1),
uid=[0],
enabled=lambda node: node.refine.refineEnabled.value,
),
desc.IntParam(
name="refineMaxTCamsPerTile",
label="Max Nb Neighbour Cameras Per Tile",
description="Maximum number of neighbour cameras used per tile.",
value=4,
range=(1, 20, 1),
uid=[0],
enabled=lambda node: node.refine.refineEnabled.value,
),
desc.IntParam(
name="refineSubsampling",
label="Number Of Subsamples",
description="The number of subsamples used to extract the best depth from the refine volume (sliding gaussian window precision).",
value=10,
range=(1, 30, 1),
uid=[0],
advanced=True,
enabled=lambda node: node.refine.refineEnabled.value,
),
desc.IntParam(
name="refineHalfNbDepths",
label="Half Number Of Depths",
description="The thickness of the refine area around the initial depth map.\n"
"This parameter defines the number of depths in front of and behind the initial value\n"
"for which we evaluate the similarity with a finer z sampling.",
value=15,
range=(1, 50, 1),
uid=[0],
advanced=True,
enabled=lambda node: node.refine.refineEnabled.value,
),
desc.IntParam(
name="refineWSH",
label="WSH",
description="Half-size of the patch used to compute the similarity. Patch width is wsh*2+1.",
value=3,
range=(1, 20, 1),
uid=[0],
advanced=True,
enabled=lambda node: node.refine.refineEnabled.value,
),
desc.FloatParam(
name="refineSigma",
label="Sigma",
description="Sigma (2*sigma^2) of the Gaussian filter used to extract the best depth from the refine volume.",
value=15.0,
range=(0.0, 30.0, 0.5),
uid=[0],
advanced=True,
enabled=lambda node: node.refine.refineEnabled.value,
),
desc.FloatParam(
name="refineGammaC",
label="GammaC",
description="GammaC threshold used for similarity computation.",
value=15.5,
range=(0.0, 30.0, 0.5),
uid=[0],
advanced=True,
enabled=lambda node: node.refine.refineEnabled.value,
),
desc.FloatParam(
name="refineGammaP",
label="GammaP",
description="GammaP threshold used for similarity computation.",
value=8.0,
range=(0.0, 30.0, 0.5),
uid=[0],
advanced=True,
enabled=lambda node: node.refine.refineEnabled.value,
),
desc.BoolParam(
name="refineInterpolateMiddleDepth",
label="Interpolate Middle Depth",
description="Enable middle depth bilinear interpolation.",
value=False,
uid=[0],
enabled=lambda node: node.refine.refineEnabled.value,
),
desc.BoolParam(
name="refineUseConsistentScale",
label="Consistent Scale",
description="Compare patch with consistent scale for similarity volume computation.",
value=False,
uid=[0],
enabled=lambda node: node.refine.refineEnabled.value,
),
],
),
desc.GroupAttribute(
name="colorOptimization",
label="Color Optimization",
description="Color optimization post-process parameters.",
group=None,
groupDesc=[
desc.BoolParam(
name="colorOptimizationEnabled",
label="Enable",
description="Enable depth/similarity map post-process color optimization.",
value=True,
uid=[0],
),
desc.IntParam(
name="colorOptimizationNbIterations",
label="Number Of Iterations",
description="Number of iterations for the optimization.",
value=100,
range=(1, 500, 10),
uid=[0],
advanced=True,
enabled=lambda node: node.colorOptimization.colorOptimizationEnabled.value,
),
],
),
desc.GroupAttribute(
name="customPatchPattern",
label="Custom Patch Pattern",
description="User custom patch pattern for similarity comparison.",
advanced=True,
group=None,
groupDesc=[
desc.BoolParam(
name="sgmUseCustomPatchPattern",
label="Enable For SGM",
description="Enable custom patch pattern for similarity volume computation at the SGM step.",
value=False,
uid=[0],
advanced=True,
),
desc.BoolParam(
name="refineUseCustomPatchPattern",
label="Enable For Refine",
description="Enable custom patch pattern for similarity volume computation at the Refine step.",
value=False,
uid=[0],
advanced=True,
),
desc.ListAttribute(
name="customPatchPatternSubparts",
label="Subparts",
description="User custom patch pattern subparts for similarity volume computation.",
advanced=True,
enabled=lambda node: (
node.customPatchPattern.sgmUseCustomPatchPattern.value
or node.customPatchPattern.refineUseCustomPatchPattern.value
),
elementDesc=desc.GroupAttribute(
name="customPatchPatternSubpart",
label="Patch Pattern Subpart",
description="Custom patch pattern subpart configuration for similarity volume computation.",
joinChar=":",
group=None,
groupDesc=[
desc.ChoiceParam(
name="customPatchPatternSubpartType",
label="Type",
description="Patch pattern subpart type.",
value="full",
values=["full", "circle"],
exclusive=True,
uid=[0],
),
desc.FloatParam(
name="customPatchPatternSubpartRadius",
label="Radius / WSH",
description="Patch pattern subpart half-width or circle radius.",
value=2.5,
range=(0.5, 30.0, 0.1),
uid=[0],
),
desc.IntParam(
name="customPatchPatternSubpartNbCoords",
label="Coordinates",
description="Patch pattern subpart number of coordinates (for circle or ignore).",
value=12,
range=(3, 24, 1),
uid=[0],
),
desc.IntParam(
name="customPatchPatternSubpartLevel",
label="Level",
description="Patch pattern subpart image level.",
value=0,
range=(0, 2, 1),
uid=[0],
),
desc.FloatParam(
name="customPatchPatternSubpartWeight",
label="Weight",
description="Patch pattern subpart weight.",
value=1.0,
range=(0.0, 1.0, 0.1),
uid=[0],
),
],
),
),
desc.BoolParam(
name="customPatchPatternGroupSubpartsPerLevel",
label="Group Subparts Per Level",
description="Group all subparts with the same image level.",
value=False,
uid=[0],
advanced=True,
enabled=lambda node: (
node.customPatchPattern.sgmUseCustomPatchPattern.value
or node.customPatchPattern.refineUseCustomPatchPattern.value
),
),
],
),
desc.GroupAttribute(
name="intermediateResults",
label="Intermediate Results",
description="Intermediate results parameters for debug purposes.\n"
"Warning: Dramatically affect performances and use large amount of storage.",
advanced=True,
group=None,
groupDesc=[
desc.BoolParam(
name="exportIntermediateDepthSimMaps",
label="Export Depth Maps",
description="Export intermediate depth/similarity maps from the SGM and Refine steps.",
value=False,
uid=[0],
advanced=True,
),
desc.BoolParam(
name="exportIntermediateNormalMaps",
label="Export Normal Maps",
description="Export intermediate normal maps from the SGM and Refine steps.",
value=False,
uid=[0],
advanced=True,
),
desc.BoolParam(
name="exportIntermediateVolumes",
label="Export Volumes",
description="Export intermediate full similarity volumes from the SGM and Refine steps.",
value=False,
uid=[0],
advanced=True,
),
desc.BoolParam(
name="exportIntermediateCrossVolumes",
label="Export Cross Volumes",
description="Export intermediate similarity cross volumes from the SGM and Refine steps.",
value=False,
uid=[0],
advanced=True,
),
desc.BoolParam(
name="exportIntermediateTopographicCutVolumes",
label="Export Cut Volumes",
description="Export intermediate similarity topographic cut volumes from the SGM and Refine steps.",
value=False,
uid=[0],
advanced=True,
),
desc.BoolParam(
name="exportIntermediateVolume9pCsv",
label="Export 9 Points",
description="Export intermediate volumes 9 points from the SGM and Refine steps in CSV files.",
value=False,
uid=[0],
advanced=True,
),
desc.BoolParam(
name="exportTilePattern",
label="Export Tile Pattern",
description="Export the bounding boxes of tiles volumes as meshes. This allows to visualize the depth map search areas.",
value=False,
uid=[0],
advanced=True,
),
],
),
desc.IntParam(
name="nbGPUs",
label="Number Of GPUs",
description="Number of GPUs to use (0 means that all the available GPUs will be used).",
value=0,
range=(0, 5, 1),
uid=[],
advanced=True,
),
desc.ChoiceParam(
name="verboseLevel",
label="Verbose Level",
description="Verbosity level (fatal, error, warning, info, debug, trace).",
value="info",
values=["fatal", "error", "warning", "info", "debug", "trace"],
exclusive=True,
uid=[],
),
]
outputs = [
desc.File(
name="output",
label="Folder",
description="Output folder for generated depth maps.",
value=desc.Node.internalFolder,
uid=[],
),
# these attributes are only here to describe more accurately the output of the node
# by specifying that it generates 2 sequences of images
# (see in Viewer2D.qml how these attributes can be used)
desc.File(
name="depth",
label="Depth Maps",
description="Generated depth maps.",
semantic="image",
value=desc.Node.internalFolder + "<VIEW_ID>_depthMap.exr",
uid=[],
group="", # do not export on the command line
),
desc.File(
name="sim",
label="Sim Maps",
description="Generated sim maps.",
semantic="image",
value=desc.Node.internalFolder + "<VIEW_ID>_simMap.exr",
uid=[],
group="", # do not export on the command line
),
desc.File(
name="tilePattern",
label="Tile Pattern",
description="Debug: Tile pattern.",
value=desc.Node.internalFolder + "<VIEW_ID>_tilePattern.obj",
uid=[],
group="", # do not export on the command line
enabled=lambda node: node.intermediateResults.exportTilePattern.value,
),
desc.File(
name="depthSgm",
label="Depth Maps SGM",
description="Debug: Depth maps SGM",
semantic="image",
value=desc.Node.internalFolder + "<VIEW_ID>_depthMap_sgm.exr",
uid=[],
group="", # do not export on the command line
enabled=lambda node: node.intermediateResults.exportIntermediateDepthSimMaps.value,
),
desc.File(
name="depthSgmUpscaled",
label="Depth Maps SGM Upscaled",
description="Debug: Depth maps SGM upscaled.",
semantic="image",
value=desc.Node.internalFolder + "<VIEW_ID>_depthMap_sgmUpscaled.exr",
uid=[],
group="", # do not export on the command line
enabled=lambda node: node.intermediateResults.exportIntermediateDepthSimMaps.value,
),
desc.File(
name="depthRefined",
label="Depth Maps Refined",
description="Debug: Depth maps after refinement",
semantic="image",
value=desc.Node.internalFolder + "<VIEW_ID>_depthMap_refinedFused.exr",
uid=[],
group="", # do not export on the command line
enabled=lambda node: node.intermediateResults.exportIntermediateDepthSimMaps.value,
),
]
|
scenarios | slack_usergroup | import typing
from apps.slack.scenarios import scenario_step
from apps.slack.types import EventPayload, EventType, PayloadType, ScenarioRoute
from django.utils import timezone
if typing.TYPE_CHECKING:
from apps.slack.models import SlackTeamIdentity, SlackUserIdentity
class SlackUserGroupEventStep(scenario_step.ScenarioStep):
def process_scenario(
self,
slack_user_identity: "SlackUserIdentity",
slack_team_identity: "SlackTeamIdentity",
payload: EventPayload,
) -> None:
"""
Triggered by action: creation user groups or changes in user groups except its members.
"""
from apps.slack.models import SlackUserGroup
slack_id = payload["event"]["subteam"]["id"]
usergroup_name = payload["event"]["subteam"]["name"]
usergroup_handle = payload["event"]["subteam"]["handle"]
members = payload["event"]["subteam"].get("users", [])
is_active = payload["event"]["subteam"]["date_delete"] == 0
SlackUserGroup.objects.update_or_create(
slack_id=slack_id,
slack_team_identity=slack_team_identity,
defaults={
"name": usergroup_name,
"handle": usergroup_handle,
"members": members,
"is_active": is_active,
"last_populated": timezone.now().date(),
},
)
class SlackUserGroupMembersChangedEventStep(scenario_step.ScenarioStep):
def process_scenario(
self,
slack_user_identity: "SlackUserIdentity",
slack_team_identity: "SlackTeamIdentity",
payload: EventPayload,
) -> None:
"""
Triggered by action: changed members in user group.
"""
from apps.slack.models import SlackUserGroup
slack_id = payload["event"]["subteam_id"]
try:
user_group = slack_team_identity.usergroups.get(slack_id=slack_id)
except SlackUserGroup.DoesNotExist:
# If Slack group does not exist, create and populate it
SlackUserGroup.update_or_create_slack_usergroup_from_slack(
slack_id, slack_team_identity
)
else:
# else update its members from payload
members = set(user_group.members)
members_added = payload["event"]["added_users"]
members_removed = payload["event"]["removed_users"]
members.update(members_added)
members.difference_update(members_removed)
user_group.members = list(members)
user_group.save(update_fields=["members"])
STEPS_ROUTING: ScenarioRoute.RoutingSteps = [
{
"payload_type": PayloadType.EVENT_CALLBACK,
"event_type": EventType.SUBTEAM_CREATED,
"step": SlackUserGroupEventStep,
},
{
"payload_type": PayloadType.EVENT_CALLBACK,
"event_type": EventType.SUBTEAM_UPDATED,
"step": SlackUserGroupEventStep,
},
{
"payload_type": PayloadType.EVENT_CALLBACK,
"event_type": EventType.SUBTEAM_MEMBERS_CHANGED,
"step": SlackUserGroupMembersChangedEventStep,
},
]
|
scripts | check_invalid_imports | import os
import re
import sys
from pathlib import Path
"""
Run this file with the Cura project root as the working directory
Checks for invalid imports. When importing from plugins, there will be no problems when running from source,
but for some build types the plugins dir is not on the path, so relative imports should be used instead. eg:
from ..UltimakerCloudScope import UltimakerCloudScope <-- OK
import plugins.Marketplace.src ... <-- NOT OK
"""
class InvalidImportsChecker:
# compile regex
REGEX = re.compile(r"^\s*(from plugins|import plugins)")
def check(self):
"""Checks for invalid imports
:return: True if checks passed, False when the test fails
"""
cwd = os.getcwd()
cura_result = checker.check_dir(os.path.join(cwd, "cura"))
plugins_result = checker.check_dir(os.path.join(cwd, "plugins"))
result = cura_result and plugins_result
if not result:
print(
"error: sources contain invalid imports. Use relative imports when referencing plugin source files"
)
return result
def check_dir(self, root_dir: str) -> bool:
"""Checks a directory for invalid imports
:return: True if checks passed, False when the test fails
"""
passed = True
for path_like in Path(root_dir).rglob("*.py"):
if not self.check_file(str(path_like)):
passed = False
return passed
def check_file(self, file_path):
"""Checks a file for invalid imports
:return: True if checks passed, False when the test fails
"""
passed = True
with open(file_path, "r", encoding="utf-8") as inputFile:
# loop through each line in file
for line_i, line in enumerate(inputFile, 1):
# check if we have a regex match
match = self.REGEX.search(line)
if match:
path = os.path.relpath(file_path)
print(
"{path}:{line_i}:{match}".format(
path=path, line_i=line_i, match=match.group(1)
)
)
passed = False
return passed
if __name__ == "__main__":
checker = InvalidImportsChecker()
sys.exit(0 if checker.check() else 1)
|
PyObjCTest | test_ikimagebrowserview | from PyObjCTools.TestSupport import *
from Quartz import *
try:
unicode
except NameError:
unicode = str
class TestIKImageBrowserViewHelper(NSObject):
# IKImageBrowserDataSource
def numberOfItemsInImageBrowser_(self, b):
return 1
def imageBrowser_itemAtIndex_(self, b, i):
return None
def imageBrowser_moveItemsAtIndexes_toIndex_(self, b, st, i):
return False
def imageBrowser_writeItemsAtIndexes_toPasteboard_(self, b, st, pb):
return 44
def numberOfGroupsInImageBrowser_(self, b):
return 1
def imageBrowser_groupAtIndex_(self, b, idx):
return None
# IKImageBrowserItem
def imageVersion(self):
return 1
def isSelectable(self):
return True
# IKImageBrowserDelegate
def imageBrowser_cellWasDoubleClickedAtIndex_(self, b, idx):
pass
def imageBrowser_cellWasRightClickedAtIndex_withEvent_(self, b, idx, e):
pass
class TestIKImageBrowserView(TestCase):
@min_os_level("10.5")
def testProtocols(self):
self.assertIsInstance(
protocols.IKImageBrowserDataSource, objc.informal_protocol
)
self.assertResultHasType(
TestIKImageBrowserViewHelper.numberOfItemsInImageBrowser_,
objc._C_NSUInteger,
)
self.assertArgHasType(
TestIKImageBrowserViewHelper.imageBrowser_itemAtIndex_,
1,
objc._C_NSUInteger,
)
self.assertResultIsBOOL(
TestIKImageBrowserViewHelper.imageBrowser_moveItemsAtIndexes_toIndex_
)
self.assertArgHasType(
TestIKImageBrowserViewHelper.imageBrowser_moveItemsAtIndexes_toIndex_,
2,
objc._C_NSUInteger,
)
self.assertResultHasType(
TestIKImageBrowserViewHelper.imageBrowser_writeItemsAtIndexes_toPasteboard_,
objc._C_NSUInteger,
)
self.assertResultHasType(
TestIKImageBrowserViewHelper.numberOfGroupsInImageBrowser_,
objc._C_NSUInteger,
)
self.assertArgHasType(
TestIKImageBrowserViewHelper.imageBrowser_groupAtIndex_,
1,
objc._C_NSUInteger,
)
self.assertIsInstance(protocols.IKImageBrowserItem, objc.informal_protocol)
self.assertResultHasType(
TestIKImageBrowserViewHelper.imageVersion, objc._C_NSUInteger
)
self.assertResultIsBOOL(TestIKImageBrowserViewHelper.isSelectable)
self.assertIsInstance(protocols.IKImageBrowserDelegate, objc.informal_protocol)
self.assertArgHasType(
TestIKImageBrowserViewHelper.imageBrowser_cellWasDoubleClickedAtIndex_,
1,
objc._C_NSUInteger,
)
self.assertArgHasType(
TestIKImageBrowserViewHelper.imageBrowser_cellWasRightClickedAtIndex_withEvent_,
1,
objc._C_NSUInteger,
)
@min_os_level("10.5")
def testMethods(self):
self.assertResultIsBOOL(IKImageBrowserView.constrainsToOriginalSize)
self.assertArgIsBOOL(IKImageBrowserView.setConstrainsToOriginalSize_, 0)
self.assertArgIsBOOL(
IKImageBrowserView.setSelectionIndexes_byExtendingSelection_, 1
)
self.assertResultIsBOOL(IKImageBrowserView.allowsMultipleSelection)
self.assertArgIsBOOL(IKImageBrowserView.setAllowsMultipleSelection_, 0)
self.assertResultIsBOOL(IKImageBrowserView.allowsEmptySelection)
self.assertArgIsBOOL(IKImageBrowserView.setAllowsEmptySelection_, 0)
self.assertResultIsBOOL(IKImageBrowserView.allowsReordering)
self.assertArgIsBOOL(IKImageBrowserView.setAllowsReordering_, 0)
self.assertResultIsBOOL(IKImageBrowserView.animates)
self.assertArgIsBOOL(IKImageBrowserView.setAnimates_, 0)
# Method does not exist?
# view = IKImageBrowserView.alloc().init()
# self.assertResultIsBOOL(view.isGroupExpandedAtIndex_)
@min_os_level("10.5")
def testConstants(self):
self.assertEqual(IKCellsStyleNone, 0)
self.assertEqual(IKCellsStyleShadowed, 1)
self.assertEqual(IKCellsStyleOutlined, 2)
self.assertEqual(IKCellsStyleTitled, 4)
self.assertEqual(IKCellsStyleSubtitled, 8)
self.assertEqual(IKGroupBezelStyle, 0)
self.assertEqual(IKGroupDisclosureStyle, 1)
self.assertIsInstance(IKImageBrowserPathRepresentationType, unicode)
self.assertIsInstance(IKImageBrowserNSURLRepresentationType, unicode)
self.assertIsInstance(IKImageBrowserNSImageRepresentationType, unicode)
self.assertIsInstance(IKImageBrowserCGImageRepresentationType, unicode)
self.assertIsInstance(IKImageBrowserCGImageSourceRepresentationType, unicode)
self.assertIsInstance(IKImageBrowserNSDataRepresentationType, unicode)
self.assertIsInstance(IKImageBrowserNSBitmapImageRepresentationType, unicode)
self.assertIsInstance(IKImageBrowserQTMovieRepresentationType, unicode)
self.assertIsInstance(IKImageBrowserQTMoviePathRepresentationType, unicode)
self.assertIsInstance(IKImageBrowserQCCompositionRepresentationType, unicode)
self.assertIsInstance(
IKImageBrowserQCCompositionPathRepresentationType, unicode
)
self.assertIsInstance(IKImageBrowserQuickLookPathRepresentationType, unicode)
self.assertIsInstance(IKImageBrowserIconRefPathRepresentationType, unicode)
self.assertIsInstance(IKImageBrowserIconRefRepresentationType, unicode)
self.assertIsInstance(IKImageBrowserBackgroundColorKey, unicode)
self.assertIsInstance(IKImageBrowserSelectionColorKey, unicode)
self.assertIsInstance(IKImageBrowserCellsOutlineColorKey, unicode)
self.assertIsInstance(IKImageBrowserCellsTitleAttributesKey, unicode)
self.assertIsInstance(IKImageBrowserCellsHighlightedTitleAttributesKey, unicode)
self.assertIsInstance(IKImageBrowserCellsSubtitleAttributesKey, unicode)
self.assertIsInstance(IKImageBrowserGroupRangeKey, unicode)
self.assertIsInstance(IKImageBrowserGroupBackgroundColorKey, unicode)
self.assertIsInstance(IKImageBrowserGroupTitleKey, unicode)
self.assertIsInstance(IKImageBrowserGroupStyleKey, unicode)
@min_os_level("10.6")
def testConstants10_6(self):
self.assertEqual(IKImageBrowserDropOn, 0)
self.assertEqual(IKImageBrowserDropBefore, 1)
self.assertIsInstance(IKImageBrowserPDFPageRepresentationType, unicode)
self.assertIsInstance(IKImageBrowserGroupHeaderLayer, unicode)
self.assertIsInstance(IKImageBrowserGroupHeaderLayer, unicode)
@min_os_level("10.6")
def testMethods10_6(self):
self.assertResultIsBOOL(IKImageBrowserView.canControlQuickLookPanel)
self.assertArgIsBOOL(IKImageBrowserView.setCanControlQuickLookPanel_, 0)
self.assertResultIsBOOL(IKImageBrowserView.allowsDroppingOnItems)
self.assertArgIsBOOL(IKImageBrowserView.setAllowsDroppingOnItems_, 0)
if __name__ == "__main__":
main()
|
commands | remove_editions | """ PROCEED WITH CAUTION: this permanently deletes book data """
from bookwyrm import models
from django.core.management.base import BaseCommand
from django.db.models import Count, Q
def remove_editions():
"""combine duplicate editions and update related models"""
# not in use
filters = {
"%s__isnull" % r.name: True for r in models.Edition._meta.related_objects
}
# no cover, no identifying fields
filters["cover"] = ""
null_fields = {
"%s__isnull" % f: True for f in ["isbn_10", "isbn_13", "oclc_number"]
}
editions = (
models.Edition.objects.filter(
Q(languages=[]) | Q(languages__contains=["English"]),
**filters,
**null_fields,
)
.annotate(Count("parent_work__editions"))
.filter(
# mustn't be the only edition for the work
parent_work__editions__count__gt=1
)
)
print(editions.count())
editions.delete()
class Command(BaseCommand):
"""deduplicate allllll the book data models"""
help = "merges duplicate book data"
# pylint: disable=no-self-use,unused-argument
def handle(self, *args, **options):
"""run deduplications"""
remove_editions()
|
browser | downloadview | # SPDX-FileCopyrightText: Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# SPDX-License-Identifier: GPL-3.0-or-later
"""The ListView to display downloads in."""
import functools
from typing import Callable, MutableSequence, Tuple, Union
from qutebrowser.browser import downloads
from qutebrowser.config import stylesheet
from qutebrowser.qt.core import QSize, Qt, pyqtSlot
from qutebrowser.qt.widgets import QListView, QMenu, QSizePolicy, QStyleFactory
from qutebrowser.utils import qtutils, utils
_ActionListType = MutableSequence[
Union[
Tuple[None, None], # separator
Tuple[str, Callable[[], None]],
]
]
class DownloadView(QListView):
"""QListView which shows currently running downloads as a bar.
Attributes:
_menu: The QMenu which is currently displayed.
"""
STYLESHEET = """
QListView {
background-color: {{ conf.colors.downloads.bar.bg }};
font: {{ conf.fonts.downloads }};
border: 0;
}
QListView::item {
padding-right: 2px;
}
"""
def __init__(self, model, parent=None):
super().__init__(parent)
if not utils.is_mac:
self.setStyle(QStyleFactory.create("Fusion"))
stylesheet.set_register(self)
self.setResizeMode(QListView.ResizeMode.Adjust)
self.setVerticalScrollBarPolicy(Qt.ScrollBarPolicy.ScrollBarAlwaysOff)
self.setSizePolicy(
QSizePolicy.Policy.MinimumExpanding, QSizePolicy.Policy.Fixed
)
self.setFocusPolicy(Qt.FocusPolicy.NoFocus)
self.setFlow(QListView.Flow.LeftToRight)
self.setSpacing(1)
self._menu = None
model.rowsInserted.connect(self._update_geometry)
model.rowsRemoved.connect(self._update_geometry)
model.dataChanged.connect(self._update_geometry)
self.setModel(model)
self.setWrapping(True)
self.setContextMenuPolicy(Qt.ContextMenuPolicy.CustomContextMenu)
self.customContextMenuRequested.connect(self.show_context_menu)
self.clicked.connect(self.on_clicked)
def __repr__(self):
model = qtutils.add_optional(self.model())
count: Union[int, str]
if model is None:
count = "None"
else:
count = model.rowCount()
return utils.get_repr(self, count=count)
def _model(self) -> downloads.DownloadModel:
"""Get the current download model.
Ensures the model is not None.
"""
model = self.model()
assert isinstance(model, downloads.DownloadModel), model
return model
@pyqtSlot()
def _update_geometry(self):
"""Wrapper to call updateGeometry.
For some reason, this is needed so that PyQt disconnects the signals and handles
arguments correctly. Probably a WORKAROUND for an unknown PyQt bug.
"""
self.updateGeometry()
@pyqtSlot(bool)
def on_fullscreen_requested(self, on):
"""Hide/show the downloadview when entering/leaving fullscreen."""
if on:
self.hide()
else:
self.show()
@pyqtSlot("QModelIndex")
def on_clicked(self, index):
"""Handle clicking of an item.
Args:
index: The QModelIndex of the clicked item.
"""
if not index.isValid():
return
item = self._model().data(index, downloads.ModelRole.item)
if item.done and item.successful:
item.open_file()
item.remove()
def _get_menu_actions(
self, item: downloads.AbstractDownloadItem
) -> _ActionListType:
"""Get the available context menu actions for a given DownloadItem.
Args:
item: The DownloadItem to get the actions for, or None.
"""
model = self._model()
actions: _ActionListType = []
if item is None:
pass
elif item.done:
if item.successful:
actions.append(("Open", item.open_file))
actions.append(
(
"Open directory",
functools.partial(item.open_file, open_dir=True, cmdline=None),
)
)
else:
actions.append(("Retry", item.try_retry))
actions.append(("Remove", item.remove))
else:
actions.append(("Cancel", item.cancel))
if item is not None:
actions.append(
(
"Copy URL",
functools.partial(
utils.set_clipboard, item.url().toDisplayString()
),
)
)
if model.can_clear():
actions.append((None, None))
actions.append(("Remove all finished", model.download_clear))
return actions
@pyqtSlot("QPoint")
def show_context_menu(self, point):
"""Show the context menu."""
index = self.indexAt(point)
if index.isValid():
item = self._model().data(index, downloads.ModelRole.item)
else:
item = None
self._menu = QMenu(self)
actions = self._get_menu_actions(item)
for name, handler in actions:
if name is None and handler is None:
self._menu.addSeparator()
else:
assert name is not None
assert handler is not None
action = self._menu.addAction(name)
assert action is not None
action.triggered.connect(handler)
if actions:
viewport = self.viewport()
assert viewport is not None
self._menu.popup(viewport.mapToGlobal(point))
def minimumSizeHint(self):
"""Override minimumSizeHint so the size is correct in a layout."""
return self.sizeHint()
def sizeHint(self):
"""Return sizeHint based on the view contents."""
idx = self._model().last_index()
bottom = self.visualRect(idx).bottom()
if bottom != -1:
margins = self.contentsMargins()
height = bottom + margins.top() + margins.bottom() + 2 * self.spacing()
size = QSize(0, height)
else:
size = QSize(0, 0)
qtutils.ensure_valid(size)
return size
|
propsdlg | pdfpropsdlg | # -*- coding: utf-8 -*-
#
# Copyright (C) 2016 by Ihor E. Novikov
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import wal
from generic import PrnProsDialog
from sk1 import _
from sk1.pwidgets import StaticUnitLabel, StaticUnitSpin
from sk1.resources import get_icon, icons
from uc2 import uc2const
from uc2.formats.pdf import pdfconst
CS = [uc2const.COLOR_GRAY, uc2const.COLOR_CMYK, uc2const.COLOR_RGB]
class GenModePanel(wal.LabeledPanel):
printer = None
def __init__(self, parent, printer):
self.printer = printer
wal.LabeledPanel.__init__(self, parent, _("Generation options"))
vpanel = wal.VPanel(self)
grid = wal.GridPanel(vpanel, 2, 2, 2, 3)
grid.add_growable_col(1)
grid.pack(wal.Label(grid, _("PDF version:")))
self.ver_combo = wal.Combolist(grid, items=pdfconst.PDF_VER_NAMES)
grid.pack(self.ver_combo, fill=True)
grid.pack(wal.Label(grid, _("Color space:")))
self.cs_combo = wal.Combolist(grid, items=CS)
grid.pack(self.cs_combo, fill=True)
vpanel.pack(grid, fill=True)
vpanel.pack((3, 3))
self.use_spot = wal.Checkbox(vpanel, _("Use SPOT colors"))
vpanel.pack(self.use_spot, align_center=False)
self.compressed = wal.Checkbox(vpanel, _("Use compression"))
vpanel.pack(self.compressed, align_center=False)
self.pack(vpanel, fill=True, expand=True, padding_all=5)
index = pdfconst.PDF_VERSIONS.index(self.printer.pdf_version)
self.ver_combo.set_active(index)
self.cs_combo.set_active(CS.index(self.printer.colorspace))
self.use_spot.set_value(self.printer.use_spot)
self.compressed.set_value(self.printer.compressed)
def save(self):
self.printer.use_spot = self.use_spot.get_value()
self.printer.compressed = self.compressed.get_value()
index = self.ver_combo.get_active()
self.printer.pdf_version = pdfconst.PDF_VERSIONS[index]
self.printer.colorspace = CS[self.cs_combo.get_active()]
class PagePanel(wal.LabeledPanel):
app = None
printer = None
items = []
def __init__(self, parent, printer, app):
self.app = app
self.printer = printer
wal.LabeledPanel.__init__(self, parent, _("Document page"))
grid = wal.GridPanel(self, 3, 2, 3, 3)
grid.add_growable_col(1)
grid.pack(wal.Label(grid, _("Page size:")))
self.size_combo = wal.Combolist(grid, onchange=self.on_change)
grid.pack(self.size_combo, fill=True)
grid.pack(wal.Label(grid, _("Width:")))
hpanel = wal.HPanel(grid)
self.wspin = StaticUnitSpin(self.app, hpanel)
hpanel.pack(self.wspin)
hpanel.pack((5, 5))
hpanel.pack(wal.Label(grid, _("Height:")), padding=5)
self.hspin = StaticUnitSpin(self.app, hpanel)
hpanel.pack(self.hspin)
hpanel.pack(StaticUnitLabel(self.app, hpanel), padding=5)
grid.pack(hpanel)
grid.pack(wal.Label(grid, _("Orientation:")))
hpanel = wal.HPanel(grid)
self.port_opt = wal.Radiobutton(hpanel, uc2const.ORIENTS_NAMES[0], group=True)
self.land_opt = wal.Radiobutton(hpanel, uc2const.ORIENTS_NAMES[1])
hpanel.pack(self.port_opt)
hpanel.pack((15, 5))
hpanel.pack(self.land_opt)
grid.pack(hpanel)
self.pack(grid, fill=True, expand=True, padding_all=7)
self.set_data()
def set_data(self):
self.land_opt.set_value(True)
if self.printer.page_orientation == uc2const.PORTRAIT:
self.port_opt.set_value(True)
self.items = self.printer.get_format_items()
self.size_combo.set_items(self.items)
if not self.printer.page_format[0] == "Custom":
index = self.items.index(self.printer.page_format[0])
else:
index = len(self.items) - 1
self.size_combo.set_active(index)
if self.printer.is_custom_supported():
minw, minh = self.printer.customs[0]
maxw, maxh = self.printer.customs[1]
self.wspin.set_point_range((minw, maxw))
self.hspin.set_point_range((minh, maxh))
self.on_change()
def on_change(self):
index = self.size_combo.get_active()
status = False
if index == len(self.items) - 1:
if (
not self.hspin.get_point_value()
and self.printer.page_format[0] == "Custom"
):
w, h = self.printer.page_format[1]
self.wspin.set_point_value(w)
self.hspin.set_point_value(h)
status = True
else:
w, h = uc2const.PAGE_FORMATS[uc2const.PAGE_FORMAT_NAMES[index]]
self.wspin.set_point_value(w)
self.hspin.set_point_value(h)
self.wspin.set_enable(status)
self.hspin.set_enable(status)
def save(self):
index = self.size_combo.get_active()
prn = self.printer
if prn.is_custom_supported() and index == len(self.items) - 1:
w = self.wspin.get_point_value()
h = self.hspin.get_point_value()
prn.page_format = ("Custom", (w, h))
else:
media = uc2const.PAGE_FORMAT_NAMES[index]
w, h = uc2const.PAGE_FORMATS[media]
prn.page_format = (media, (w, h))
self.printer.page_orientation = uc2const.LANDSCAPE
if self.port_opt.get_value():
self.printer.page_orientation = uc2const.PORTRAIT
class DocInfoPanel(wal.LabeledPanel):
printer = None
def __init__(self, parent, printer, app):
self.printer = printer
self.app = app
wal.LabeledPanel.__init__(self, parent, _("Document metainfo"))
grid = wal.GridPanel(self, 4, 2, 2, 5)
grid.add_growable_col(1)
grid.pack(wal.Label(grid, _("Title:")))
self.title = wal.Entry(grid, self.printer.meta_title)
grid.pack(self.title, fill=True)
grid.pack(wal.Label(grid, _("Subject:")))
self.subject = wal.Entry(grid, self.printer.meta_subject)
grid.pack(self.subject, fill=True)
grid.pack(wal.Label(grid, _("Author:")))
self.author = wal.Entry(grid, self.printer.meta_author)
grid.pack(self.author, fill=True)
grid.pack(wal.Label(grid, _("Keywords:")))
self.keywords = wal.Entry(grid, self.printer.meta_keywords)
grid.pack(self.keywords, fill=True)
self.pack(grid, fill=True, expand=True, padding_all=7)
def is_metadata(self):
metainfo = self.app.current_doc.model.metainfo
if metainfo:
return bool(metainfo[0] or metainfo[2])
return False
def import_data(self):
metainfo = self.app.current_doc.model.metainfo
self.title.set_value(self.app.current_doc.doc_name)
self.author.set_value(metainfo[0])
self.keywords.set_value(metainfo[2])
def save(self):
self.printer.meta_title = self.title.get_value()
self.printer.meta_subject = self.subject.get_value()
self.printer.meta_author = self.author.get_value()
self.printer.meta_keywords = self.keywords.get_value()
class MainPanel(wal.VPanel):
app = None
printer = None
panels = []
def __init__(self, parent, printer, app):
self.app = app
self.printer = printer
wal.VPanel.__init__(self, parent)
hpanel = wal.HPanel(self)
icon = get_icon(icons.PD_PRINTER_PDF, size=wal.DEF_SIZE)
hpanel.pack(wal.Bitmap(hpanel, icon), padding=5)
self.prnmode_panel = GenModePanel(hpanel, self.printer)
hpanel.pack(self.prnmode_panel, fill=True, expand=True)
self.pack(hpanel, fill=True)
self.page_panel = PagePanel(self, self.printer, self.app)
self.pack(self.page_panel, fill=True)
self.doc_info = DocInfoPanel(self, self.printer, self.app)
self.pack(self.doc_info, fill=True, expand=True)
self.panels = [self.prnmode_panel, self.page_panel, self.doc_info]
def save(self):
for item in self.panels:
item.save()
class PDF_PrnPropsDialog(PrnProsDialog):
def build(self):
PrnProsDialog.build(self)
self.panel.pack((5, 5))
self.main_panel = MainPanel(self.panel, self.printer, self.app)
self.panel.pack(self.main_panel, fill=True, expand=True)
def set_dialog_buttons(self):
PrnProsDialog.set_dialog_buttons(self)
self.import_btn = wal.Button(
self.left_button_box,
_("Set metainfo"),
tooltip=_("Set metainfo from current document"),
onclick=self.main_panel.doc_info.import_data,
)
self.left_button_box.pack(self.import_btn)
self.import_btn.set_enable(self.main_panel.doc_info.is_metadata())
def get_result(self):
self.main_panel.save()
return True
|
httplib2 | iri2uri | """
iri2uri
Converts an IRI to a URI.
"""
__author__ = "Joe Gregorio (joe@bitworking.org)"
__copyright__ = "Copyright 2006, Joe Gregorio"
__contributors__ = []
__version__ = "1.0.0"
__license__ = "MIT"
__history__ = """
"""
import urlparse
# Convert an IRI to a URI following the rules in RFC 3987
#
# The characters we need to enocde and escape are defined in the spec:
#
# iprivate = %xE000-F8FF / %xF0000-FFFFD / %x100000-10FFFD
# ucschar = %xA0-D7FF / %xF900-FDCF / %xFDF0-FFEF
# / %x10000-1FFFD / %x20000-2FFFD / %x30000-3FFFD
# / %x40000-4FFFD / %x50000-5FFFD / %x60000-6FFFD
# / %x70000-7FFFD / %x80000-8FFFD / %x90000-9FFFD
# / %xA0000-AFFFD / %xB0000-BFFFD / %xC0000-CFFFD
# / %xD0000-DFFFD / %xE1000-EFFFD
escape_range = [
(0xA0, 0xD7FF),
(0xE000, 0xF8FF),
(0xF900, 0xFDCF),
(0xFDF0, 0xFFEF),
(0x10000, 0x1FFFD),
(0x20000, 0x2FFFD),
(0x30000, 0x3FFFD),
(0x40000, 0x4FFFD),
(0x50000, 0x5FFFD),
(0x60000, 0x6FFFD),
(0x70000, 0x7FFFD),
(0x80000, 0x8FFFD),
(0x90000, 0x9FFFD),
(0xA0000, 0xAFFFD),
(0xB0000, 0xBFFFD),
(0xC0000, 0xCFFFD),
(0xD0000, 0xDFFFD),
(0xE1000, 0xEFFFD),
(0xF0000, 0xFFFFD),
(0x100000, 0x10FFFD),
]
def encode(c):
retval = c
i = ord(c)
for low, high in escape_range:
if i < low:
break
if i >= low and i <= high:
retval = "".join(["%%%2X" % ord(o) for o in c.encode("utf-8")])
break
return retval
def iri2uri(uri):
"""Convert an IRI to a URI. Note that IRIs must be
passed in a unicode strings. That is, do not utf-8 encode
the IRI before passing it into the function."""
if isinstance(uri, unicode):
(scheme, authority, path, query, fragment) = urlparse.urlsplit(uri)
authority = authority.encode("idna")
# For each character in 'ucschar' or 'iprivate'
# 1. encode as utf-8
# 2. then %-encode each octet of that utf-8
uri = urlparse.urlunsplit((scheme, authority, path, query, fragment))
uri = "".join([encode(c) for c in uri])
return uri
if __name__ == "__main__":
import unittest
class Test(unittest.TestCase):
def test_uris(self):
"""Test that URIs are invariant under the transformation."""
invariant = [
"ftp://ftp.is.co.za/rfc/rfc1808.txt",
"http://www.ietf.org/rfc/rfc2396.txt",
"ldap://[2001:db8::7]/c=GB?objectClass?one",
"mailto:John.Doe@example.com",
"news:comp.infosystems.www.servers.unix",
"tel:+1-816-555-1212",
"telnet://192.0.2.16:80/",
"urn:oasis:names:specification:docbook:dtd:xml:4.1.2",
]
for uri in invariant:
self.assertEqual(uri, iri2uri(uri))
def test_iri(self):
"""Test that the right type of escaping is done for each part of the URI."""
self.assertEqual(
"http://xn--o3h.com/%E2%98%84",
iri2uri("http://\N{COMET}.com/\N{COMET}"),
)
self.assertEqual(
"http://bitworking.org/?fred=%E2%98%84",
iri2uri("http://bitworking.org/?fred=\N{COMET}"),
)
self.assertEqual(
"http://bitworking.org/#%E2%98%84",
iri2uri("http://bitworking.org/#\N{COMET}"),
)
self.assertEqual("#%E2%98%84", iri2uri("#\N{COMET}"))
self.assertEqual(
"/fred?bar=%E2%98%9A#%E2%98%84",
iri2uri("/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}"),
)
self.assertEqual(
"/fred?bar=%E2%98%9A#%E2%98%84",
iri2uri(iri2uri("/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}")),
)
self.assertNotEqual(
"/fred?bar=%E2%98%9A#%E2%98%84",
iri2uri(
"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}".encode("utf-8")
),
)
unittest.main()
|
builtinViewColumns | price | # =============================================================================
# Copyright (C) 2010 Diego Duclos, Lucas Thode
#
# This file is part of pyfa.
#
# pyfa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyfa. If not, see <http://www.gnu.org/licenses/>.
# =============================================================================
# noinspection PyPackageRequirements
import wx
from eos.saveddata.cargo import Cargo
from eos.saveddata.drone import Drone
from eos.saveddata.fighter import Fighter
from eos.saveddata.module import Module
from eos.saveddata.price import PriceStatus
from gui.bitmap_loader import BitmapLoader
from gui.utils.numberFormatter import formatAmount
from gui.viewColumn import ViewColumn
from service.price import Price as ServicePrice
def formatPrice(stuff, priceObj):
textItems = []
if priceObj.price:
mult = 1
if isinstance(stuff, (Drone, Fighter, Cargo)):
mult = stuff.amount
textItems.append(formatAmount(priceObj.price * mult, 3, 3, 9, currency=True))
if priceObj.status in (PriceStatus.fetchFail, PriceStatus.fetchTimeout):
textItems.append("(!)")
return " ".join(textItems)
class Price(ViewColumn):
name = "Price"
def __init__(self, fittingView, params):
ViewColumn.__init__(self, fittingView)
self.mask = wx.LIST_MASK_IMAGE
self.bitmap = BitmapLoader.getBitmap("totalPrice_small", "gui")
self.imageId = fittingView.imageList.GetImageIndex("totalPrice_small", "gui")
def getText(self, stuff):
if stuff.item is None or stuff.item.group.name == "Ship Modifiers":
return ""
if hasattr(stuff, "isEmpty"):
if stuff.isEmpty:
return ""
if isinstance(stuff, Module) and stuff.isMutated:
return ""
priceObj = stuff.item.price
if not priceObj.isValid():
return False
return formatPrice(stuff, priceObj)
def delayedText(self, mod, display, colItem):
sPrice = ServicePrice.getInstance()
def callback(item):
priceObj = item[0]
colItem.SetText(formatPrice(mod, priceObj))
display.SetItem(colItem)
sPrice.getPrices([mod.item], callback, waitforthread=True)
def getImageId(self, mod):
return -1
def getToolTip(self, mod):
return self.name
Price.register()
|
aliceVision | MeshResampling | __version__ = "1.0"
from meshroom.core import desc
class MeshResampling(desc.AVCommandLineNode):
commandLine = "aliceVision_meshResampling {allParams}"
cpu = desc.Level.NORMAL
ram = desc.Level.NORMAL
category = "Mesh Post-Processing"
documentation = """
This node allows to recompute the mesh surface with a new topology and uniform density.
"""
inputs = [
desc.File(
name="input",
label="Input Mesh",
description="Input mesh in the OBJ file format.",
value="",
uid=[0],
),
desc.FloatParam(
name="simplificationFactor",
label="Simplification Factor",
description="Simplification factor for the resampling.",
value=0.5,
range=(0.0, 1.0, 0.01),
uid=[0],
),
desc.IntParam(
name="nbVertices",
label="Fixed Number Of Vertices",
description="Fixed number of output vertices.",
value=0,
range=(0, 1000000, 1),
uid=[0],
),
desc.IntParam(
name="minVertices",
label="Min Vertices",
description="Minimum number of output vertices.",
value=0,
range=(0, 1000000, 1),
uid=[0],
),
desc.IntParam(
name="maxVertices",
label="Max Vertices",
description="Maximum number of output vertices.",
value=0,
range=(0, 1000000, 1),
uid=[0],
),
desc.IntParam(
name="nbLloydIter",
label="Number Of Pre-Smoothing Iteration",
description="Number of iterations for Lloyd pre-smoothing.",
value=40,
range=(0, 100, 1),
uid=[0],
),
desc.BoolParam(
name="flipNormals",
label="Flip Normals",
description="Option to flip face normals.\n"
"It can be needed as it depends on the vertices order in triangles and the convention changes from one software to another.",
value=False,
uid=[0],
),
desc.ChoiceParam(
name="verboseLevel",
label="Verbose Level",
description="Verbosity level (fatal, error, warning, info, debug, trace).",
value="info",
values=["fatal", "error", "warning", "info", "debug", "trace"],
exclusive=True,
uid=[],
),
]
outputs = [
desc.File(
name="output",
label="Mesh",
description="Output mesh in the OBJ file format.",
value=desc.Node.internalFolder + "mesh.obj",
uid=[],
),
]
|
i18n | summary | #!/usr/bin/env python3
# summary.py - Text-based visual translation completeness summary
# Thomas Perl <thp@gpodder.org>, 2009-01-03
#
# Usage: make statistics | python summary.py
#
import glob
import math
import os
import re
import subprocess
width = 40
class Language(object):
def __init__(self, language, translated, fuzzy, untranslated):
self.language = language
self.translated = int(translated)
self.fuzzy = int(fuzzy)
self.untranslated = int(untranslated)
def get_translated_ratio(self):
return self.translated / (self.translated + self.fuzzy + self.untranslated)
def get_fuzzy_ratio(self):
return self.fuzzy / (self.translated + self.fuzzy + self.untranslated)
def get_untranslated_ratio(self):
return self.untranslated / (self.translated + self.fuzzy + self.untranslated)
def __cmp__(self, other):
return cmp(self.get_translated_ratio(), other.get_translated_ratio())
languages = []
COUNTS_RE = "((\d+) translated message[s]?)?(, (\d+) fuzzy translation[s]?)?(, (\d+) untranslated message[s]?)?\."
po_folder = os.path.join(os.path.dirname(__file__), "..", "..", "po")
for filename in glob.glob(os.path.join(po_folder, "*.po")):
language, _ = os.path.splitext(os.path.basename(filename))
msgfmt = subprocess.Popen(
["msgfmt", "--statistics", filename], stderr=subprocess.PIPE
)
_, stderr = msgfmt.communicate()
match = re.match(COUNTS_RE, stderr).groups()
languages.append(
Language(language, match[1] or "0", match[3] or "0", match[5] or "0")
)
print("")
for language in sorted(languages):
tc = "#" * (int(math.floor(width * language.get_translated_ratio())))
fc = "~" * (int(math.floor(width * language.get_fuzzy_ratio())))
uc = " " * (width - len(tc) - len(fc))
print(
" %5s [%s%s%s] -- %3.0f %% translated"
% (language.language, tc, fc, uc, language.get_translated_ratio() * 100)
)
print(
"""
Total translations: %s
"""
% (len(languages))
)
|
v1 | submissions | from typing import List
from CTFd.api.v1.helpers.request import validate_args
from CTFd.api.v1.helpers.schemas import sqlalchemy_to_pydantic
from CTFd.api.v1.schemas import (
APIDetailedSuccessResponse,
PaginatedAPIListSuccessResponse,
)
from CTFd.cache import clear_challenges, clear_standings
from CTFd.constants import RawEnum
from CTFd.models import Solves, Submissions, db
from CTFd.schemas.submissions import SubmissionSchema
from CTFd.utils.decorators import admins_only
from CTFd.utils.helpers.models import build_model_filters
from flask import request
from flask_restx import Namespace, Resource
submissions_namespace = Namespace(
"submissions", description="Endpoint to retrieve Submission"
)
SubmissionModel = sqlalchemy_to_pydantic(Submissions)
TransientSubmissionModel = sqlalchemy_to_pydantic(Submissions, exclude=["id"])
class SubmissionDetailedSuccessResponse(APIDetailedSuccessResponse):
data: SubmissionModel
class SubmissionListSuccessResponse(PaginatedAPIListSuccessResponse):
data: List[SubmissionModel]
submissions_namespace.schema_model(
"SubmissionDetailedSuccessResponse", SubmissionDetailedSuccessResponse.apidoc()
)
submissions_namespace.schema_model(
"SubmissionListSuccessResponse", SubmissionListSuccessResponse.apidoc()
)
@submissions_namespace.route("")
class SubmissionsList(Resource):
@admins_only
@submissions_namespace.doc(
description="Endpoint to get submission objects in bulk",
responses={
200: ("Success", "SubmissionListSuccessResponse"),
400: (
"An error occured processing the provided or stored data",
"APISimpleErrorResponse",
),
},
)
@validate_args(
{
"challenge_id": (int, None),
"user_id": (int, None),
"team_id": (int, None),
"ip": (str, None),
"provided": (str, None),
"type": (str, None),
"q": (str, None),
"field": (
RawEnum(
"SubmissionFields",
{
"challenge_id": "challenge_id",
"user_id": "user_id",
"team_id": "team_id",
"ip": "ip",
"provided": "provided",
"type": "type",
},
),
None,
),
},
location="query",
)
def get(self, query_args):
q = query_args.pop("q", None)
field = str(query_args.pop("field", None))
filters = build_model_filters(model=Submissions, query=q, field=field)
args = query_args
schema = SubmissionSchema(many=True)
submissions = (
Submissions.query.filter_by(**args)
.filter(*filters)
.paginate(max_per_page=100)
)
response = schema.dump(submissions.items)
if response.errors:
return {"success": False, "errors": response.errors}, 400
return {
"meta": {
"pagination": {
"page": submissions.page,
"next": submissions.next_num,
"prev": submissions.prev_num,
"pages": submissions.pages,
"per_page": submissions.per_page,
"total": submissions.total,
}
},
"success": True,
"data": response.data,
}
@admins_only
@submissions_namespace.doc(
description="Endpoint to create a submission object. Users should interact with the attempt endpoint to submit flags.",
responses={
200: ("Success", "SubmissionListSuccessResponse"),
400: (
"An error occured processing the provided or stored data",
"APISimpleErrorResponse",
),
},
)
@validate_args(TransientSubmissionModel, location="json")
def post(self, json_args):
req = json_args
Model = Submissions.get_child(type=req.get("type"))
schema = SubmissionSchema(instance=Model())
response = schema.load(req)
if response.errors:
return {"success": False, "errors": response.errors}, 400
db.session.add(response.data)
db.session.commit()
response = schema.dump(response.data)
db.session.close()
# Delete standings cache
clear_standings()
# Delete challenges cache
clear_challenges()
return {"success": True, "data": response.data}
@submissions_namespace.route("/<submission_id>")
@submissions_namespace.param("submission_id", "A Submission ID")
class Submission(Resource):
@admins_only
@submissions_namespace.doc(
description="Endpoint to get a submission object",
responses={
200: ("Success", "SubmissionDetailedSuccessResponse"),
400: (
"An error occured processing the provided or stored data",
"APISimpleErrorResponse",
),
},
)
def get(self, submission_id):
submission = Submissions.query.filter_by(id=submission_id).first_or_404()
schema = SubmissionSchema()
response = schema.dump(submission)
if response.errors:
return {"success": False, "errors": response.errors}, 400
return {"success": True, "data": response.data}
@admins_only
@submissions_namespace.doc(
description="Endpoint to edit a submission object",
responses={
200: ("Success", "SubmissionDetailedSuccessResponse"),
400: (
"An error occured processing the provided or stored data",
"APISimpleErrorResponse",
),
},
)
def patch(self, submission_id):
submission = Submissions.query.filter_by(id=submission_id).first_or_404()
req = request.get_json()
submission_type = req.get("type")
if submission_type == "correct":
solve = Solves(
user_id=submission.user_id,
challenge_id=submission.challenge_id,
team_id=submission.team_id,
ip=submission.ip,
provided=submission.provided,
date=submission.date,
)
db.session.add(solve)
submission.type = "discard"
db.session.commit()
# Delete standings cache
clear_standings()
clear_challenges()
submission = solve
schema = SubmissionSchema()
response = schema.dump(submission)
if response.errors:
return {"success": False, "errors": response.errors}, 400
return {"success": True, "data": response.data}
@admins_only
@submissions_namespace.doc(
description="Endpoint to delete a submission object",
responses={
200: ("Success", "APISimpleSuccessResponse"),
400: (
"An error occured processing the provided or stored data",
"APISimpleErrorResponse",
),
},
)
def delete(self, submission_id):
submission = Submissions.query.filter_by(id=submission_id).first_or_404()
db.session.delete(submission)
db.session.commit()
db.session.close()
# Delete standings cache
clear_standings()
clear_challenges()
return {"success": True}
|
pdu | qa_pdu_to_stream | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2018-2021 National Technology & Engineering Solutions of Sandia, LLC
# (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government
# retains certain rights in this software.
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
import time
import numpy as np
import pmt
from gnuradio import blocks, gr, gr_unittest, pdu
class qa_pdu_to_bursts(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
self.p2s = pdu.pdu_to_stream_c(pdu.EARLY_BURST_APPEND, 64)
self.vs = blocks.vector_sink_c(1)
self.tag_debug = blocks.tag_debug(gr.sizeof_gr_complex * 1, "", "")
self.tag_debug.set_display(True)
self.tb.connect((self.p2s, 0), (self.vs, 0))
self.tb.connect((self.p2s, 0), (self.tag_debug, 0))
def tearDown(self):
self.tb = None
def test_001_basic(self):
in_data = [
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
1,
1,
1,
1,
1,
0,
1,
0,
1,
0,
1,
0,
1,
]
in_pdu = pmt.cons(pmt.make_dict(), pmt.init_c32vector(len(in_data), in_data))
e_tag_0 = gr.tag_utils.python_to_tag(
(0, pmt.intern("tx_sob"), pmt.PMT_T, pmt.PMT_NIL)
)
e_tag_1 = gr.tag_utils.python_to_tag(
(len(in_data) - 1, pmt.intern("tx_eob"), pmt.PMT_T, pmt.PMT_NIL)
)
self.tb.start()
self.p2s.to_basic_block()._post(pmt.intern("pdus"), pmt.intern("MALFORMED PDU"))
self.p2s.to_basic_block()._post(pmt.intern("pdus"), in_pdu)
self.waitFor(lambda: len(self.vs.tags()) == 2, timeout=1.0, poll_interval=0.01)
self.tb.stop()
self.tb.wait()
tags = self.vs.tags()
self.assertEqual(len(tags), 2)
self.assertEqual(tags[0].offset, e_tag_0.offset)
self.assertTrue(pmt.equal(tags[0].key, e_tag_0.key))
self.assertTrue(pmt.equal(tags[0].value, e_tag_0.value))
self.assertEqual(tags[1].offset, e_tag_1.offset)
self.assertTrue(pmt.equal(tags[1].key, e_tag_1.key))
self.assertTrue(pmt.equal(tags[1].value, e_tag_1.value))
self.assertTrue((in_data == np.real(self.vs.data())).all())
def test_002_timed(self):
in_data = [
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
1,
1,
1,
1,
1,
0,
1,
0,
1,
0,
1,
0,
1,
]
tag_time = pmt.make_tuple(pmt.from_uint64(11), pmt.from_double(0.123456))
in_dict = pmt.dict_add(pmt.make_dict(), pmt.intern("tx_time"), tag_time)
in_pdu = pmt.cons(in_dict, pmt.init_c32vector(len(in_data), in_data))
e_tag_0 = gr.tag_utils.python_to_tag(
(0, pmt.intern("tx_sob"), pmt.PMT_T, pmt.PMT_NIL)
)
e_tag_1 = gr.tag_utils.python_to_tag(
(0, pmt.intern("tx_time"), tag_time, pmt.PMT_NIL)
)
e_tag_2 = gr.tag_utils.python_to_tag(
(len(in_data) - 1, pmt.intern("tx_eob"), pmt.PMT_T, pmt.PMT_NIL)
)
self.tb.start()
self.p2s.to_basic_block()._post(pmt.intern("pdus"), pmt.intern("MALFORMED PDU"))
self.p2s.to_basic_block()._post(pmt.intern("pdus"), in_pdu)
self.waitFor(lambda: len(self.vs.tags()) == 3, timeout=1.0, poll_interval=0.01)
self.tb.stop()
self.tb.wait()
tags = self.vs.tags()
self.assertEqual(len(tags), 3)
self.assertEqual(tags[0].offset, e_tag_0.offset)
self.assertTrue(pmt.equal(tags[0].key, e_tag_0.key))
self.assertTrue(pmt.equal(tags[0].value, e_tag_0.value))
self.assertEqual(tags[1].offset, e_tag_1.offset)
self.assertTrue(pmt.equal(tags[1].key, e_tag_1.key))
self.assertTrue(pmt.equal(tags[1].value, e_tag_1.value))
self.assertEqual(tags[2].offset, e_tag_2.offset)
self.assertTrue(pmt.equal(tags[2].key, e_tag_2.key))
self.assertTrue(pmt.equal(tags[2].value, e_tag_2.value))
self.assertTrue((in_data == np.real(self.vs.data())).all())
def test_003_timed_long(self):
in_data = np.arange(25000).tolist()
tag_time = pmt.make_tuple(pmt.from_uint64(11), pmt.from_double(0.123456))
in_dict = pmt.dict_add(pmt.make_dict(), pmt.intern("tx_time"), tag_time)
in_pdu = pmt.cons(in_dict, pmt.init_c32vector(len(in_data), in_data))
e_tag_0 = gr.tag_utils.python_to_tag(
(0, pmt.intern("tx_sob"), pmt.PMT_T, pmt.PMT_NIL)
)
e_tag_1 = gr.tag_utils.python_to_tag(
(0, pmt.intern("tx_time"), tag_time, pmt.PMT_NIL)
)
e_tag_2 = gr.tag_utils.python_to_tag(
(len(in_data) - 1, pmt.intern("tx_eob"), pmt.PMT_T, pmt.PMT_NIL)
)
self.tb.start()
self.p2s.to_basic_block()._post(pmt.intern("pdus"), pmt.intern("MALFORMED PDU"))
self.p2s.to_basic_block()._post(pmt.intern("pdus"), in_pdu)
self.waitFor(lambda: len(self.vs.tags()) == 3, timeout=1.0, poll_interval=0.01)
self.tb.stop()
self.tb.wait()
tags = self.vs.tags()
self.assertEqual(len(tags), 3)
self.assertEqual(tags[0].offset, e_tag_0.offset)
self.assertTrue(pmt.equal(tags[0].key, e_tag_0.key))
self.assertTrue(pmt.equal(tags[0].value, e_tag_0.value))
self.assertEqual(tags[1].offset, e_tag_1.offset)
self.assertTrue(pmt.equal(tags[1].key, e_tag_1.key))
self.assertTrue(pmt.equal(tags[1].value, e_tag_1.value))
self.assertEqual(tags[2].offset, e_tag_2.offset)
self.assertTrue(pmt.equal(tags[2].key, e_tag_2.key))
self.assertTrue(pmt.equal(tags[2].value, e_tag_2.value))
self.assertTrue((in_data == np.real(self.vs.data())).all())
if __name__ == "__main__":
gr_unittest.run(qa_pdu_to_bursts)
|
plugins | vtvgo | """
$description Live TV channels from VTV, a Vietnamese public, state-owned broadcaster.
$url vtvgo.vn
$type live
"""
import logging
import re
from streamlink.plugin import Plugin, pluginmatcher
from streamlink.plugin.api import validate
from streamlink.stream.hls import HLSStream
log = logging.getLogger(__name__)
@pluginmatcher(
re.compile(
r"https?://vtvgo\.vn/xem-truc-tuyen-kenh-",
)
)
class VTVgo(Plugin):
AJAX_URL = "https://vtvgo.vn/ajax-get-stream"
def _get_streams(self):
# get cookies
self.session.http.get("https://vtvgo.vn/")
self.session.http.headers.update(
{
"Origin": "https://vtvgo.vn",
"Referer": self.url,
"Sec-Fetch-Site": "same-origin",
"X-Requested-With": "XMLHttpRequest",
}
)
params = self.session.http.get(
self.url,
schema=validate.Schema(
validate.parse_html(),
validate.xml_xpath_string(".//script[contains(text(),'setplayer(')][1]/text()"),
validate.none_or_all(
validate.regex(
re.compile(r"""var\s+(?P<key>(?:type_)?id|time|token)\s*=\s*["']?(?P<value>[^"']+)["']?;"""),
method="findall",
),
[
("id", int),
("type_id", str),
("time", str),
("token", str),
],
),
),
)
if not params:
return
log.trace(f"{params!r}")
hls_url = self.session.http.post(
self.AJAX_URL,
data=dict(params),
schema=validate.Schema(
validate.parse_json(),
{"stream_url": [validate.url()]},
validate.get(("stream_url", 0)),
),
)
return HLSStream.parse_variant_playlist(self.session, hls_url)
__plugin__ = VTVgo
|
ui | edittagdialog | # -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2011-2014 Michael Wiencek
# Copyright (C) 2014 Sophist-UK
# Copyright (C) 2016-2017 Sambhav Kothari
# Copyright (C) 2017 Wieland Hoffmann
# Copyright (C) 2017-2018, 2020-2022 Laurent Monin
# Copyright (C) 2018 Vishal Choudhary
# Copyright (C) 2019-2022 Philipp Wolfer
# Copyright (C) 2023 certuna
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from picard.const import (
RELEASE_FORMATS,
RELEASE_PRIMARY_GROUPS,
RELEASE_SECONDARY_GROUPS,
RELEASE_STATUS,
)
from picard.const.countries import RELEASE_COUNTRIES
from picard.ui import PicardDialog
from picard.ui.ui_edittagdialog import Ui_EditTagDialog
from picard.util.tags import TAG_NAMES
from PyQt5 import QtCore, QtGui, QtWidgets
AUTOCOMPLETE_RELEASE_TYPES = [
s.lower() for s in sorted(RELEASE_PRIMARY_GROUPS) + sorted(RELEASE_SECONDARY_GROUPS)
]
AUTOCOMPLETE_RELEASE_STATUS = sorted(s.lower() for s in RELEASE_STATUS)
AUTOCOMPLETE_RELEASE_COUNTRIES = sorted(RELEASE_COUNTRIES, key=str.casefold)
AUTOCOMPLETE_RELEASE_FORMATS = sorted(RELEASE_FORMATS, key=str.casefold)
class TagEditorDelegate(QtWidgets.QItemDelegate):
def createEditor(self, parent, option, index):
if not index.isValid():
return None
tag = self.get_tag_name(index)
if tag.partition(":")[0] in {"comment", "lyrics"}:
editor = QtWidgets.QPlainTextEdit(parent)
editor.setFrameStyle(
editor.style().styleHint(
QtWidgets.QStyle.StyleHint.SH_ItemView_DrawDelegateFrame,
None,
editor,
)
)
editor.setMinimumSize(QtCore.QSize(0, 80))
else:
editor = super().createEditor(parent, option, index)
completer = None
if tag in {"date", "originaldate", "releasedate"}:
editor.setPlaceholderText(_("YYYY-MM-DD"))
elif tag == "originalyear":
editor.setPlaceholderText(_("YYYY"))
elif tag == "releasetype":
completer = QtWidgets.QCompleter(AUTOCOMPLETE_RELEASE_TYPES, editor)
elif tag == "releasestatus":
completer = QtWidgets.QCompleter(AUTOCOMPLETE_RELEASE_STATUS, editor)
completer.setModelSorting(
QtWidgets.QCompleter.ModelSorting.CaseInsensitivelySortedModel
)
elif tag == "releasecountry":
completer = QtWidgets.QCompleter(AUTOCOMPLETE_RELEASE_COUNTRIES, editor)
completer.setModelSorting(
QtWidgets.QCompleter.ModelSorting.CaseInsensitivelySortedModel
)
elif tag == "media":
completer = QtWidgets.QCompleter(AUTOCOMPLETE_RELEASE_FORMATS, editor)
completer.setModelSorting(
QtWidgets.QCompleter.ModelSorting.CaseInsensitivelySortedModel
)
if editor and completer:
completer.setCompletionMode(
QtWidgets.QCompleter.CompletionMode.UnfilteredPopupCompletion
)
completer.setCaseSensitivity(QtCore.Qt.CaseSensitivity.CaseInsensitive)
editor.setCompleter(completer)
return editor
def get_tag_name(self, index):
return self.parent().tag
class EditTagDialog(PicardDialog):
def __init__(self, window, tag):
super().__init__(window)
self.ui = Ui_EditTagDialog()
self.ui.setupUi(self)
self.window = window
self.value_list = self.ui.value_list
self.metadata_box = window.metadata_box
self.tag = tag
self.modified_tags = {}
self.different = False
self.default_tags = sorted(
set(list(TAG_NAMES.keys()) + self.metadata_box.tag_diff.tag_names)
)
if len(self.metadata_box.files) == 1:
current_file = list(self.metadata_box.files)[0]
self.default_tags = list(
filter(current_file.supports_tag, self.default_tags)
)
tag_names = self.ui.tag_names
tag_names.addItem("")
visible_tags = [tn for tn in self.default_tags if not tn.startswith("~")]
tag_names.addItems(visible_tags)
self.completer = QtWidgets.QCompleter(visible_tags, tag_names)
self.completer.setCompletionMode(
QtWidgets.QCompleter.CompletionMode.PopupCompletion
)
tag_names.setCompleter(self.completer)
self.value_list.model().rowsInserted.connect(self.on_rows_inserted)
self.value_list.model().rowsRemoved.connect(self.on_rows_removed)
self.value_list.setItemDelegate(TagEditorDelegate(self))
self.tag_changed(tag)
self.value_selection_changed()
def keyPressEvent(self, event):
if (
event.modifiers() == QtCore.Qt.KeyboardModifier.NoModifier
and event.key() in {QtCore.Qt.Key.Key_Enter, QtCore.Qt.Key.Key_Return}
):
self.add_or_edit_value()
event.accept()
elif event.matches(QtGui.QKeySequence.StandardKey.Delete):
self.remove_value()
elif event.key() == QtCore.Qt.Key.Key_Insert:
self.add_value()
else:
super().keyPressEvent(event)
def tag_selected(self, index):
self.add_or_edit_value()
def edit_value(self):
item = self.value_list.currentItem()
if item:
# Do not initialize editing if editor is already active. Avoids flickering of the edit field
# when already in edit mode. `isPersistentEditorOpen` is only supported in Qt 5.10 and later.
if hasattr(
self.value_list, "isPersistentEditorOpen"
) and self.value_list.isPersistentEditorOpen(item):
return
self.value_list.editItem(item)
def add_value(self):
item = QtWidgets.QListWidgetItem()
item.setFlags(
QtCore.Qt.ItemFlag.ItemIsSelectable
| QtCore.Qt.ItemFlag.ItemIsEnabled
| QtCore.Qt.ItemFlag.ItemIsEditable
)
self.value_list.addItem(item)
self.value_list.setCurrentItem(item)
self.value_list.editItem(item)
def add_or_edit_value(self):
last_item = self.value_list.item(self.value_list.count() - 1)
# Edit the last item, if it is empty, or add a new empty item
if last_item and not last_item.text():
self.value_list.setCurrentItem(last_item)
self.edit_value()
else:
self.add_value()
def remove_value(self):
value_list = self.value_list
row = value_list.currentRow()
if row == 0 and self.different:
self.different = False
self.ui.add_value.setEnabled(True)
value_list.takeItem(row)
def on_rows_inserted(self, parent, first, last):
for row in range(first, last + 1):
item = self.value_list.item(row)
self._modified_tag().insert(row, item.text())
def on_rows_removed(self, parent, first, last):
for row in range(first, last + 1):
del self._modified_tag()[row]
def move_row_up(self):
row = self.value_list.currentRow()
if row > 0:
self._move_row(row, -1)
def move_row_down(self):
row = self.value_list.currentRow()
if row + 1 < self.value_list.count():
self._move_row(row, 1)
def _move_row(self, row, direction):
value_list = self.value_list
item = value_list.takeItem(row)
new_row = row + direction
value_list.insertItem(new_row, item)
value_list.setCurrentRow(new_row)
def disable_all(self):
self.value_list.clear()
self.value_list.setEnabled(False)
self.ui.add_value.setEnabled(False)
def enable_all(self):
self.value_list.setEnabled(True)
self.ui.add_value.setEnabled(True)
def tag_changed(self, tag):
tag_names = self.ui.tag_names
tag_names.editTextChanged.disconnect(self.tag_changed)
line_edit = tag_names.lineEdit()
cursor_pos = line_edit.cursorPosition()
flags = (
QtCore.Qt.MatchFlag.MatchFixedString
| QtCore.Qt.MatchFlag.MatchCaseSensitive
)
# if the previous tag was new and has no value, remove it from the QComboBox.
# e.g. typing "XYZ" should not leave "X" or "XY" in the QComboBox.
if (
self.tag
and self.tag not in self.default_tags
and self._modified_tag() == [""]
):
tag_names.removeItem(tag_names.findText(self.tag, flags))
row = tag_names.findText(tag, flags)
self.tag = tag
if row <= 0:
if tag:
# add custom tags to the QComboBox immediately
tag_names.addItem(tag)
tag_names.model().sort(0)
row = tag_names.findText(tag, flags)
else:
# the QLineEdit is empty, disable everything
self.disable_all()
tag_names.setCurrentIndex(0)
tag_names.editTextChanged.connect(self.tag_changed)
return
self.enable_all()
tag_names.setCurrentIndex(row)
line_edit.setCursorPosition(cursor_pos)
self.value_list.clear()
values = self.modified_tags.get(self.tag, None)
if values is None:
new_tags = self.metadata_box.tag_diff.new
display_value, self.different = new_tags.display_value(self.tag)
values = [display_value] if self.different else new_tags[self.tag]
self.ui.add_value.setEnabled(not self.different)
self.value_list.model().rowsInserted.disconnect(self.on_rows_inserted)
self._add_value_items(values)
self.value_list.model().rowsInserted.connect(self.on_rows_inserted)
self.value_list.setCurrentItem(
self.value_list.item(0),
QtCore.QItemSelectionModel.SelectionFlag.SelectCurrent,
)
tag_names.editTextChanged.connect(self.tag_changed)
def _add_value_items(self, values):
values = [v for v in values if v] or [""]
for value in values:
item = QtWidgets.QListWidgetItem(value)
item.setFlags(
QtCore.Qt.ItemFlag.ItemIsSelectable
| QtCore.Qt.ItemFlag.ItemIsEnabled
| QtCore.Qt.ItemFlag.ItemIsEditable
| QtCore.Qt.ItemFlag.ItemIsDragEnabled
)
font = item.font()
font.setItalic(self.different)
item.setFont(font)
self.value_list.addItem(item)
def value_edited(self, item):
row = self.value_list.row(item)
value = item.text()
if row == 0 and self.different:
self.modified_tags[self.tag] = [value]
self.different = False
font = item.font()
font.setItalic(False)
item.setFont(font)
self.ui.add_value.setEnabled(True)
else:
self._modified_tag()[row] = value
# add tags to the completer model once they get values
cm = self.completer.model()
if self.tag not in cm.stringList():
cm.insertRows(0, 1)
cm.setData(cm.index(0, 0), self.tag)
cm.sort(0)
def value_selection_changed(self):
selection = len(self.value_list.selectedItems()) > 0
self.ui.edit_value.setEnabled(selection)
self.ui.remove_value.setEnabled(selection)
self.ui.move_value_up.setEnabled(selection)
self.ui.move_value_down.setEnabled(selection)
def _modified_tag(self):
return self.modified_tags.setdefault(
self.tag, list(self.metadata_box.tag_diff.new[self.tag]) or [""]
)
def accept(self):
with self.window.ignore_selection_changes:
for tag, values in self.modified_tags.items():
self.modified_tags[tag] = [v for v in values if v]
modified_tags = self.modified_tags.items()
for obj in self.metadata_box.objects:
for tag, values in modified_tags:
obj.metadata[tag] = list(values)
obj.update()
super().accept()
|
find-file-extensions | default | from __future__ import absolute_import
import fnmatch
import os
from builtins import map
from gi.repository import Gtk
from sunflower.plugin_base.find_extension import FindExtension
class DefaultFindFiles(FindExtension):
"""Default extension for find files tool"""
def __init__(self, parent):
FindExtension.__init__(self, parent)
self.active = True
self._pattern = "*"
self._compare_method = fnmatch.fnmatch
# prepare options
plugin_options = parent._application.plugin_options
self._options = plugin_options.create_section(self.__class__.__name__)
# connect notify signal
parent.connect("notify-start", self.__handle_notify_start)
# create label showing pattern help
label_help = Gtk.Label()
label_help.set_alignment(0, 0)
label_help.set_use_markup(True)
label_help.set_markup(
_(
"<b>Pattern matching</b>\n"
"*\t\tEverything\n"
"?\t\tAny single character\n"
"[seq]\tAny character in <i>seq</i>\n"
"[!seq]\tAny character <u>not</u> in <i>seq</i>"
)
)
# create containers
hbox = Gtk.HBox(True, 15)
vbox_left = Gtk.VBox(False, 5)
vbox_right = Gtk.VBox(False, 0)
# create interface
vbox_pattern = Gtk.VBox(False, 0)
label_pattern = Gtk.Label(label=_("Search for:"))
label_pattern.set_alignment(0, 0.5)
self._entry_pattern = Gtk.ComboBoxText.new_with_entry()
self._entry_pattern.connect("changed", self.__handle_pattern_change)
self._checkbox_case_sensitive = Gtk.CheckButton(_("Case sensitive"))
self._checkbox_case_sensitive.connect(
"toggled", self.__handle_case_sensitive_toggle
)
# pack interface
vbox_pattern.pack_start(label_pattern, False, False, 0)
vbox_pattern.pack_start(self._entry_pattern, False, False, 0)
vbox_left.pack_start(vbox_pattern, False, False, 0)
vbox_left.pack_start(self._checkbox_case_sensitive, False, False, 0)
vbox_right.pack_start(label_help, True, True, 0)
hbox.pack_start(vbox_left, True, True, 0)
hbox.pack_start(vbox_right, True, True, 0)
self.container.pack_start(hbox, True, True, 0)
# load saved values
self._load_history()
self._pattern = self._entry_pattern.get_active_text()
self._case_sensitive = False
def __handle_case_sensitive_toggle(self, widget, data=None):
"""Handle toggling case sensitive check box"""
self._compare_method = (fnmatch.fnmatch, fnmatch.fnmatchcase)[
widget.get_active()
]
self._case_sensitive = widget.get_active()
def __handle_pattern_change(self, widget, data=None):
"""Handle changing pattern"""
self._pattern = widget.get_child().get_text()
def __handle_notify_start(self, data=None):
"""Handle starting search."""
entries = self._options.get("patterns") or []
# insert pattern to search history
if self._pattern is not None and self._pattern not in entries:
entries.insert(0, self._pattern)
entries = entries[:20]
# save history
self._options.set("patterns", entries)
def _load_history(self):
"""Load previously stored patterns."""
entries = self._options.get("patterns") or ["*"]
for entry in entries:
self._entry_pattern.append_text(entry)
# select first entry
self._entry_pattern.handler_block_by_func(self.__handle_pattern_change)
self._entry_pattern.get_child().set_text(entries[0])
self._entry_pattern.handler_unblock_by_func(self.__handle_pattern_change)
def get_title(self):
"""Return i18n title for extension"""
return _("Basic")
def is_path_ok(self, provider, path):
"""Check is specified path fits the cirteria"""
result = False
file_name = os.path.basename(path)
# prepare patterns
patterns = (
(self._pattern,) if ";" not in self._pattern else self._pattern.split(";")
)
if not self._case_sensitive:
file_name = file_name.lower()
patterns = map(lambda x: x.lower(), patterns)
# try to match any of the patterns
for pattern in patterns:
if self._compare_method(file_name, pattern):
result = True
break
return result
|
nfoview | util | # -*- coding: utf-8 -*-
# Copyright (C) 2005 Osmo Salomaa
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import codecs
import contextlib
import sys
import traceback
import urllib.parse
import webbrowser
import nfoview
from gi.repository import Gdk, Gtk, Pango
def affirm(value):
if not value:
raise nfoview.AffirmationError(f"Not True: {value!r}")
def apply_style(widget):
name = nfoview.conf.color_scheme
scheme = nfoview.schemes.get(name, "default")
font_desc = Pango.FontDescription(nfoview.conf.font)
css = """
.nfoview-text-view, .nfoview-text-view text {{
background-color: {bg};
color: {fg};
font-family: "{family}", monospace;
font-size: {size}px;
font-weight: {weight};
}}""".format(
bg=scheme.background,
fg=scheme.foreground,
family=font_desc.get_family().split(",")[0].strip('"'),
size=int(round(font_desc.get_size() / Pango.SCALE)),
# Round weight to hundreds to work around CSS errors
# with weird weights such as Unscii's 101.
weight=round(font_desc.get_weight(), -2),
)
css = css.replace("font-size: 0px;", "")
css = css.replace("font-weight: 0;", "")
css = "\n".join(filter(lambda x: x.strip(), css.splitlines()))
provider = Gtk.CssProvider()
try:
# The call signature of 'load_from_data' seems to have changed
# in some GTK version. Also, the whole function is deprecated
# and since GTK 4.12 we should use 'load_from_string'.
provider.load_from_data(css, -1)
except Exception:
provider.load_from_data(bytes(css.encode()))
style = widget.get_style_context()
style.add_class("nfoview-text-view")
display = Gdk.Display.get_default()
priority = Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION
style.add_provider_for_display(display, provider, priority)
def connect(observer, observable, signal, *args):
# If observable is a string, it should be an attribute of observer.
# If observable is not a string it should be the same as observer.
method_name = signal.replace("-", "_").replace("::", "_")
if observer is not observable:
method_name = "_".join((observable, method_name))
method_name = f"_on_{method_name}".replace("__", "_")
if not hasattr(observer, method_name):
method_name = method_name[1:]
method = getattr(observer, method_name)
if observer is not observable:
observable = getattr(observer, observable)
return observable.connect(signal, method, *args)
def detect_encoding(path, default="cp437"):
with open(path, "rb") as f:
line = f.readline()
if line.startswith(codecs.BOM_UTF32_BE) and is_valid_encoding("utf_32_be"):
return "utf_32_be"
if line.startswith(codecs.BOM_UTF32_LE) and is_valid_encoding("utf_32_le"):
return "utf_32_le"
if line.startswith(codecs.BOM_UTF8) and is_valid_encoding("utf_8_sig"):
return "utf_8_sig"
if line.startswith(codecs.BOM_UTF16_BE) and is_valid_encoding("utf_16_be"):
return "utf_16_be"
if line.startswith(codecs.BOM_UTF16_LE) and is_valid_encoding("utf_16_le"):
return "utf_16_le"
return default
def get_max_text_view_size():
max_chars = nfoview.conf.text_view_max_chars
max_lines = nfoview.conf.text_view_max_lines
max_text = "\n".join(("x" * max_chars,) * max_lines)
return get_text_view_size(max_text)
def get_monitor():
display = Gdk.Display.get_default()
for monitor in display.get_monitors():
if monitor is not None:
return monitor
def get_screen_size(monitor=None):
monitor = monitor or get_monitor()
rect = monitor.get_geometry()
return rect.width, rect.height
def get_text_view_size(text):
label = Gtk.Label()
apply_style(label)
label.set_text(text)
label.show()
width = label.measure(Gtk.Orientation.HORIZONTAL, -1)
height = label.measure(Gtk.Orientation.VERTICAL, -1)
return width.natural, height.natural
def hex_to_rgba(string):
rgba = Gdk.RGBA()
success = rgba.parse(string)
if success:
return rgba
raise ValueError(f"Parsing {string!r} failed")
def is_valid_encoding(encoding):
try:
return codecs.lookup(encoding)
except LookupError:
return False
def lookup_color(name, fallback):
entry = Gtk.Entry()
entry.show()
style = entry.get_style_context()
found, color = style.lookup_color(name)
if found:
return rgba_to_hex(color)
return fallback
def rgba_to_hex(color):
return "#{:02x}{:02x}{:02x}".format(
int(color.red * 255),
int(color.green * 255),
int(color.blue * 255),
)
def show_uri(uri):
try:
return Gtk.show_uri(None, uri, Gdk.CURRENT_TIME)
except Exception:
# Gtk.show_uri fails on Windows and some misconfigured installations.
# GError: No application is registered as handling this file
# Gtk.show_uri: Operation not supported
if uri.startswith(("http://", "https://")):
return webbrowser.open(uri)
raise # Exception
@contextlib.contextmanager
def silent(*exceptions, tb=False):
try:
yield
except exceptions:
if tb:
traceback.print_exc()
def uri_to_path(uri):
uri = urllib.parse.unquote(uri)
if sys.platform == "win32":
path = urllib.parse.urlsplit(uri)[2]
while path.startswith("/"):
path = path[1:]
return path.replace("/", "\\")
return urllib.parse.urlsplit(uri)[2]
|
models | region | import logging
from django.db import models
logger = logging.getLogger(__name__)
def sync_regions(regions: list[dict]):
from apps.user_management.models import Region
gcom_regions = {region["slug"]: region for region in regions}
existing_region_slugs = set(Region.objects.all().values_list("slug", flat=True))
# create new regions
regions_to_create = tuple(
Region(
name=region["name"],
slug=region["slug"],
oncall_backend_url=region["oncallApiUrl"],
)
for region in gcom_regions.values()
if region["slug"] not in existing_region_slugs
)
Region.objects.bulk_create(regions_to_create, batch_size=5000)
# delete excess regions
regions_to_delete = existing_region_slugs - gcom_regions.keys()
Region.objects.filter(slug__in=regions_to_delete).delete()
# update existing regions
regions_to_update = []
for region in Region.objects.filter(slug__in=existing_region_slugs):
gcom_region = gcom_regions[region.slug]
if (
region.name != gcom_region["name"]
or region.oncall_backend_url != gcom_region["oncallApiUrl"]
):
region.name = gcom_region["name"]
region.oncall_backend_url = gcom_region["oncallApiUrl"]
regions_to_update.append(region)
Region.objects.bulk_update(
regions_to_update, ["name", "oncall_backend_url"], batch_size=5000
)
class Region(models.Model):
name = models.CharField(max_length=300)
slug = models.CharField(max_length=50, unique=True)
oncall_backend_url = models.URLField(null=True)
|
PyObjCTest | test_subclass | import sys
import types
import objc
from PyObjCTest.testbndl import PyObjC_TestClass3
from PyObjCTools.TestSupport import *
# Most useful systems will at least have 'NSObject'.
NSObject = objc.lookUpClass("NSObject")
NSArray = objc.lookUpClass("NSArray")
NSAutoreleasePool = objc.lookUpClass("NSAutoreleasePool")
class TestSubclassing(TestCase):
def testMethodRaise(self):
# Defining a method whose name is a keyword followed by two underscores
# should define the method name without underscores in the runtime,
# and this method should be accesible both with and without the
# underscores.
class RaiseClass(NSObject):
def raise__(self):
pass
self.assertNotHasAttr(NSObject, "raise__")
self.assertNotHasAttr(NSObject, "raise")
self.assertHasAttr(RaiseClass, "raise__")
self.assertHasAttr(RaiseClass, "raise")
self.assertEqual(RaiseClass.raise__.selector, b"raise")
self.assertEqual(getattr(RaiseClass, "raise").selector, b"raise")
def testMIObjC(self):
try:
class MIClass1(NSObject, NSArray):
pass
self.fail("Can multiple inherit from two objc classes")
except TypeError:
pass
def testSubclassOfSubclass(self):
class Level1Class(NSObject):
def hello(self):
return "level1"
class Level2Class(Level1Class):
def hello(self):
return "level2"
def superHello(self):
return objc.super(Level2Class, self).hello()
def description(self):
return objc.super(Level2Class, self).description()
obj = Level1Class.alloc().init()
v = obj.hello()
self.assertEqual(v, "level1")
obj = Level2Class.alloc().init()
v = obj.hello()
self.assertEqual(v, "level2")
v = obj.superHello()
self.assertEqual(v, "level1")
v = obj.description()
# this may be a bit hardwired for comfort
self.assertEqual(v.find("<Level2Class"), 0)
def testMethodSignature(self):
class Signature(NSObject):
def test_x_(self, arg, x):
pass
test_x_ = objc.selector(test_x_, signature=b"v@:@i")
v = Signature.new()
self.assertIsInstance(v, Signature)
self.assertEqual(v.methodSignatureForSelector_("foo:"), None)
x = v.methodSignatureForSelector_("test:x:")
self.assertIsNotNone(x)
self.assertEqual(x.methodReturnType(), b"v")
self.assertEqual(x.numberOfArguments(), 4)
self.assertEqual(x.getArgumentTypeAtIndex_(0), b"@")
self.assertEqual(x.getArgumentTypeAtIndex_(1), b":")
self.assertEqual(x.getArgumentTypeAtIndex_(2), b"@")
self.assertEqual(x.getArgumentTypeAtIndex_(3), b"i")
class TestSelectors(TestCase):
def testSelectorRepr(self):
class SelectorRepr(NSObject):
def foo(self):
pass
self.assertTrue(
repr(SelectorRepr.foo).startswith(
"<unbound selector foo of SelectorRepr at"
)
)
class TestCopying(TestCase):
def testCopy(self):
class MyCopyClass(NSObject):
def copyWithZone_(self, zone):
# NSObject doesn't implement the copying protocol
# o = super(MyCopyClass, self).copyWithZone_(zone)
o = self.__class__.alloc().init()
o.foobar = 2
return o
copyWithZone_ = objc.selector(
copyWithZone_,
signature=NSObject.copyWithZone_.signature,
isClassMethod=0,
)
# Make sure the runtime correctly marked our copyWithZone_
# implementation.
o = MyCopyClass.alloc().init()
self.assertFalse((o.copyWithZone_.__metadata__()["classmethod"]))
self.assertTrue(o.copyWithZone_.__metadata__()["retval"]["already_retained"])
# self.assertTrue(o.copyWithZone_.callable == MyCopyClass.__dict__['copyWithZone_'].callable)
o = MyCopyClass.alloc().init()
o.foobar = 1
self.assertEqual(o.foobar, 1)
# Make a copy from ObjC (see testbundle.m)
c = PyObjC_TestClass3.copyValue_(o)
self.assertIsInstance(c, MyCopyClass)
self.assertEqual(c.foobar, 2)
def testMultipleInheritance1(self):
# New-style class mixin
class MixinClass1(object):
def mixinMethod(self):
return "foo"
class MITestClass1(NSObject, MixinClass1):
def init(self):
return NSObject.pyobjc_instanceMethods.init(self)
self.assertHasAttr(MITestClass1, "mixinMethod")
o = MITestClass1.alloc().init()
self.assertEqual(o.mixinMethod(), "foo")
def testMultipleInheritance2(self):
# old-style class mixin
class MixinClass2:
def mixinMethod(self):
return "foo"
class MITestClass2(NSObject, MixinClass2):
def init(self):
return NSObject.pyobjc_instanceMethods.init(self)
self.assertHasAttr(MITestClass2, "mixinMethod")
o = MITestClass2.alloc().init()
self.assertEqual(o.mixinMethod(), "foo")
class TestClassMethods(TestCase):
def testClassMethod(self):
"""check that classmethod()-s are converted to selectors"""
class ClassMethodTest(NSObject):
def clsMeth(self):
return "hello"
clsMeth = classmethod(clsMeth)
self.assertIsInstance(ClassMethodTest.clsMeth, objc.selector)
self.assertTrue(ClassMethodTest.clsMeth.isClassMethod)
def testStaticMethod(self):
"""check that staticmethod()-s are not converted to selectors"""
class StaticMethodTest(NSObject):
def stMeth(self):
return "hello"
stMeth = staticmethod(stMeth)
def func():
pass
self.assertIsInstance(StaticMethodTest.stMeth, type(func))
class TestOverridingSpecials(TestCase):
def testOverrideSpecialMethods(self):
aList = [0]
class ClassWithAlloc(NSObject):
def alloc(cls):
aList[0] += 1
return objc.super(ClassWithAlloc, cls).alloc()
self.assertEqual(aList[0], 0)
o = ClassWithAlloc.alloc().init()
self.assertEqual(aList[0], 1)
self.assertIsInstance(o, NSObject)
del o
class ClassWithRetaining(NSObject):
def retain(self):
aList.append("retain")
v = objc.super(ClassWithRetaining, self).retain()
return v
def release(self):
aList.append("release")
return objc.super(ClassWithRetaining, self).release()
def __del__(self):
aList.append("__del__")
del aList[:]
o = ClassWithRetaining.alloc().init()
v = o.retainCount()
o.retain()
self.assertEqual(aList, ["retain"])
self.assertEqual(o.retainCount(), v + 1)
o.release()
self.assertEqual(aList, ["retain", "release"])
self.assertEqual(o.retainCount(), v)
del o
self.assertEqual(aList, ["retain", "release", "release", "__del__"])
# Test again, now remove all python references and create one
# again.
del aList[:]
pool = NSAutoreleasePool.alloc().init()
o = ClassWithRetaining.alloc().init()
v = NSArray.arrayWithArray_([o])
del o
self.assertEqual(aList, ["retain"])
o = v[0]
self.assertEqual(aList, ["retain"])
del v
del o
del pool
self.assertEqual(aList, ["retain", "release", "release", "__del__"])
class ClassWithRetainCount(NSObject):
def retainCount(self):
aList.append("retainCount")
return objc.super(ClassWithRetainCount, self).retainCount()
del aList[:]
o = ClassWithRetainCount.alloc().init()
self.assertEqual(aList, [])
v = o.retainCount()
self.assertIsInstance(v, int)
self.assertEqual(aList, ["retainCount"])
del o
def testOverrideDealloc(self):
aList = []
class Dummy:
def __del__(self):
aList.append("__del__")
self.assertEqual(aList, [])
Dummy()
self.assertEqual(aList, ["__del__"])
class ClassWithDealloc(NSObject):
def init(self):
self = objc.super(ClassWithDealloc, self).init()
if self is not None:
self.obj = Dummy()
return self
def dealloc(self):
aList.append("dealloc")
return objc.super(ClassWithDealloc, self).dealloc()
del aList[:]
o = ClassWithDealloc.alloc().init()
self.assertEqual(aList, [])
del o
self.assertEqual(len(aList), 2)
self.assertIn("dealloc", aList)
self.assertIn("__del__", aList)
class SubClassWithDealloc(ClassWithDealloc):
def dealloc(self):
aList.append("dealloc.dealloc")
return objc.super(SubClassWithDealloc, self).dealloc()
del aList[:]
o = SubClassWithDealloc.alloc().init()
self.assertEqual(aList, [])
del o
self.assertEqual(len(aList), 3)
self.assertIn("dealloc.dealloc", aList)
self.assertIn("dealloc", aList)
self.assertIn("__del__", aList)
class ClassWithDeallocAndDel(NSObject):
def init(self):
self = objc.super(ClassWithDeallocAndDel, self).init()
if self is not None:
self.obj = Dummy()
return self
def dealloc(self):
aList.append("dealloc")
return objc.super(ClassWithDeallocAndDel, self).dealloc()
def __del__(self):
aList.append("mydel")
del aList[:]
o = ClassWithDeallocAndDel.alloc().init()
self.assertEqual(aList, [])
del o
self.assertEqual(len(aList), 3)
self.assertIn("mydel", aList)
self.assertIn("dealloc", aList)
self.assertIn("__del__", aList)
def testMethodNames(self):
class MethodNamesClass(NSObject):
def someName_andArg_(self, name, arg):
pass
def _someName_andArg_(self, name, arg):
pass
def raise__(self):
pass
def froobnicate__(self, a, b):
pass
# XXX: workaround for a 'feature' in class-builder.m, that code
# ignores methods whose name starts with two underscores. That code
# is not necessary, or the other ways of adding methods to a class
# should be changed.
def __foo_bar__(self, a, b, c):
pass
MethodNamesClass.__foo_bar__ = __foo_bar__
self.assertEqual(
MethodNamesClass.someName_andArg_.selector, b"someName:andArg:"
)
self.assertEqual(
MethodNamesClass._someName_andArg_.selector, b"_someName:andArg:"
)
self.assertEqual(MethodNamesClass.__foo_bar__.selector, b"__foo_bar__")
self.assertEqual(MethodNamesClass.raise__.selector, b"raise")
self.assertEqual(MethodNamesClass.froobnicate__.selector, b"froobnicate::")
def testOverrideRespondsToSelector(self):
class OC_RespondsClass(NSObject):
def initWithList_(self, lst):
objc.super(OC_RespondsClass, self).init()
self.lst = lst
return self
def respondsToSelector_(self, selector):
self.lst.append(selector)
return objc.super(OC_RespondsClass, self).respondsToSelector_(selector)
lst = []
o = OC_RespondsClass.alloc().initWithList_(lst)
self.assertEqual(lst, [])
b = o.respondsToSelector_("init")
self.assertTrue(b)
self.assertEqual(lst, ["init"])
b = o.respondsToSelector_("alloc")
self.assertFalse(b)
self.assertEqual(lst, ["init", "alloc"])
def testOverrideInstancesRespondToSelector(self):
lst = []
class OC_InstancesRespondClass(NSObject):
@classmethod
def instancesRespondToSelector_(cls, selector):
lst.append(selector)
return objc.super(
OC_InstancesRespondClass, cls
).instancesRespondToSelector_(selector)
self.assertEqual(lst, [])
b = OC_InstancesRespondClass.instancesRespondToSelector_("init")
self.assertTrue(b)
self.assertEqual(lst, ["init"])
b = OC_InstancesRespondClass.instancesRespondToSelector_("alloc")
self.assertFalse(b)
self.assertEqual(lst, ["init", "alloc"])
def testImplementingSetValueForKey(self):
values = {}
class CrashTest(NSObject):
def setValue_forKey_(self, v, k):
values[k] = v
o = CrashTest.alloc().init()
o.setValue_forKey_(42, "key")
self.assertEqual(values, {"key": 42})
if __name__ == "__main__":
main()
|
pwidgets | gradientctrls | # -*- coding: utf-8 -*-
#
# Copyright (C) 2015 by Ihor E. Novikov
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
from copy import deepcopy
import wal
from colorbtn import PDColorButton
from colorctrls import SwatchCanvas
from sk1 import _
from sk1.resources import get_icon, icons
from uc2 import sk2const, uc2const
CMYK_PALETTE = [
[
[0.0, [uc2const.COLOR_CMYK, [0.0, 0.0, 0.0, 1.0], 1.0, "Black"]],
[1.0, [uc2const.COLOR_CMYK, [0.0, 0.0, 0.0, 1.0], 0.0, "Black"]],
],
[
[0.0, [uc2const.COLOR_CMYK, [0.0, 0.0, 0.0, 1.0], 1.0, "Black"]],
[1.0, [uc2const.COLOR_CMYK, [0.0, 0.0, 0.0, 0.0], 1.0, "White"]],
],
[
[0.0, [uc2const.COLOR_CMYK, [0.0, 0.0, 0.0, 0.0], 0.0, "White"]],
[1.0, [uc2const.COLOR_CMYK, [0.0, 0.0, 0.0, 0.0], 1.0, "White"]],
],
]
RGB_PALETTE = [
[
[0.0, [uc2const.COLOR_RGB, [0.0, 0.0, 0.0], 1.0, "Black"]],
[1.0, [uc2const.COLOR_RGB, [0.0, 0.0, 0.0], 0.0, "Black"]],
],
[
[0.0, [uc2const.COLOR_RGB, [0.0, 0.0, 0.0], 1.0, "Black"]],
[1.0, [uc2const.COLOR_RGB, [1.0, 1.0, 1.0], 1.0, "White"]],
],
[
[0.0, [uc2const.COLOR_RGB, [1.0, 1.0, 1.0], 0.0, "White"]],
[1.0, [uc2const.COLOR_RGB, [1.0, 1.0, 1.0], 1.0, "White"]],
],
]
GRAY_PALETTE = [
[
[
0.0,
[
uc2const.COLOR_GRAY,
[
0.0,
],
1.0,
"Black",
],
],
[
1.0,
[
uc2const.COLOR_GRAY,
[
0.0,
],
0.0,
"Black",
],
],
],
[
[
0.0,
[
uc2const.COLOR_GRAY,
[
0.0,
],
1.0,
"Black",
],
],
[
1.0,
[
uc2const.COLOR_GRAY,
[
1.0,
],
1.0,
"White",
],
],
],
[
[
0.0,
[
uc2const.COLOR_GRAY,
[
1.0,
],
0.0,
"White",
],
],
[
1.0,
[
uc2const.COLOR_GRAY,
[
1.0,
],
1.0,
"White",
],
],
],
]
PALETTES = {
uc2const.COLOR_CMYK: CMYK_PALETTE,
uc2const.COLOR_RGB: RGB_PALETTE,
uc2const.COLOR_GRAY: GRAY_PALETTE,
}
DEFAULT_STOPS = [
[0.0, [uc2const.COLOR_CMYK, [0.0, 0.0, 0.0, 1.0], 1.0, "Black"]],
[1.0, [uc2const.COLOR_CMYK, [0.0, 0.0, 0.0, 1.0], 0.0, "Black"]],
]
class GradientPaletteSwatch(wal.VPanel, SwatchCanvas):
callback = None
stops = None
def __init__(self, parent, cms, stops, size=(80, 20), onclick=None):
self.color = None
self.cms = cms
wal.VPanel.__init__(self, parent)
SwatchCanvas.__init__(self)
self.pack(size)
if onclick:
self.callback = onclick
self.set_stops(stops)
def set_stops(self, stops):
self.stops = stops
self.fill = [
0,
sk2const.FILL_GRADIENT,
[sk2const.GRADIENT_LINEAR, [], self.stops],
]
self.refresh()
def mouse_left_up(self, point):
if self.callback:
self.callback(deepcopy(self.stops))
class GradientMiniPalette(wal.VPanel):
callback = None
cells = None
def __init__(self, parent, cms_ref, stops=None, onclick=None):
stops = stops or []
wal.VPanel.__init__(self, parent)
self.set_bg(wal.BLACK)
grid = wal.GridPanel(parent, 2, 3, 1, 1)
grid.set_bg(wal.BLACK)
self.cells = []
for item in range(6):
self.cells.append(
GradientPaletteSwatch(
grid, cms_ref, deepcopy(DEFAULT_STOPS), onclick=self.on_click
)
)
grid.pack(self.cells[-1])
self.pack(grid, padding_all=1)
if onclick:
self.callback = onclick
self.set_stops(stops)
def set_stops(self, stops=None):
if stops:
palette = PALETTES[stops[0][1][0]]
for item in palette:
self.cells[palette.index(item)].set_stops(item)
clr1 = deepcopy(stops[0][1])
clr1[2] = 1.0
clr1a = deepcopy(stops[0][1])
clr1a[2] = 0.0
clr2 = deepcopy(stops[-1][1])
clr2[2] = 1.0
clr2a = deepcopy(stops[-1][1])
clr2a[2] = 0.0
stops = [
[[0.0, clr1], [1.0, clr1a]],
[[0.0, clr1], [1.0, clr2]],
[[0.0, clr2a], [1.0, clr2]],
]
for item in stops:
self.cells[stops.index(item) + 3].set_stops(item)
def on_click(self, stops):
if self.callback:
self.callback(stops)
class StopPanel(wal.LabeledPanel):
dlg = None
cms = None
stops = None
selected_stop = 0
pos_callback = None
color_callback = None
stop = 0.0
def __init__(
self, parent, dlg, cms, stops, sel_stop=0, onposition=None, oncolor=None
):
self.dlg = dlg
self.cms = cms
self.stops = stops
self.selected_stop = sel_stop
self.pos_callback = onposition
self.color_callback = oncolor
wal.LabeledPanel.__init__(self, parent, text=_("Gradient stop"))
grid = wal.GridPanel(self, cols=3, vgap=5, hgap=5)
grid.pack(wal.Label(grid, _("Color value:")))
clr = self.stops[self.selected_stop][1]
txt = _("Change stop color")
self.swatch = PDColorButton(
grid, self.dlg, self.cms, clr, txt, onchange=self.edit_color
)
grid.pack(self.swatch)
grid.pack((1, 1))
grid.pack(wal.Label(grid, _("Position:")))
self.position = wal.FloatSpin(
grid,
range_val=(0.0, 100.0),
step=1.0,
onchange=self.pos_changed,
onenter=self.pos_changed,
)
grid.pack(self.position)
grid.pack(wal.Label(grid, "%"))
self.pack(grid, align_center=False, padding_all=10)
def edit_color(self, color):
self.stops[self.selected_stop][1] = color
self.update()
if self.color_callback:
self.color_callback()
def pos_changed(self):
if self.pos_callback and not self.stop == self.position.get_value():
self.stop = self.position.get_value()
pos = self.position.get_value() / 100.0
self.pos_callback(self.selected_stop, pos)
def set_selected_stop(self, index):
self.selected_stop = index
self.update()
def set_stops(self, stops):
self.stops = stops
self.set_selected_stop(0)
def update(self):
last_indx = len(self.stops) - 1
self.swatch.set_color(self.stops[self.selected_stop][1])
self.position.set_value(self.stops[self.selected_stop][0] * 100.0)
self.position.set_enable(self.selected_stop not in [0, last_indx])
class StopsEditor(wal.VPanel, wal.SensitiveDrawableWidget):
stops = []
selected_stop = 0
bmp_knob = None
bmp_stop = None
bmp_knob_s = None
bmp_stop_s = None
callback = None
callback2 = None
pick_flag = False
def __init__(self, parent, stops, size=(418, 10), onchange=None, onmove=None):
self.stops = stops
self.callback = onchange
self.callback2 = onmove
wal.VPanel.__init__(self, parent)
wal.SensitiveDrawableWidget.__init__(self, True)
self.pack(size)
self.bmp_knob = get_icon(icons.SLIDER_KNOB, size=wal.DEF_SIZE)
self.bmp_knob_s = get_icon(icons.SLIDER_KNOB_SEL, size=wal.DEF_SIZE)
self.bmp_stop = get_icon(icons.SLIDER_STOP, size=wal.DEF_SIZE)
self.bmp_stop_s = get_icon(icons.SLIDER_STOP_SEL, size=wal.DEF_SIZE)
def paint(self):
self.draw_stops()
self.draw_selected_stop()
def draw_stops(self):
w = self.get_size()[0]
self.draw_bitmap(self.bmp_stop, 0, 0)
self.draw_bitmap(self.bmp_stop, w - 5, 0)
if len(self.stops) > 2:
for stop in self.stops[1:-1]:
pos = int(stop[0] * 400 + 6)
self.draw_bitmap(self.bmp_knob, pos, 0)
def draw_selected_stop(self):
w = self.get_size()[0]
if self.selected_stop == 0:
self.draw_bitmap(self.bmp_stop_s, 0, 0)
elif self.selected_stop == len(self.stops) - 1:
self.draw_bitmap(self.bmp_stop_s, w - 5, 0)
else:
stop = self.stops[self.selected_stop]
pos = int(stop[0] * 400 + 6)
self.draw_bitmap(self.bmp_knob_s, pos, 0)
def set_stops(self, stops):
self.stops = stops
self.refresh()
def set_selected_stop(self, index):
self.selected_stop = index
self.refresh()
def change_selected_stop(self, index):
if self.callback:
self.callback(index)
def check_stop_point(self, val):
ret = None
w = self.get_size()[0] - 12
index = 1
for stop in self.stops[1:-1]:
if w * stop[0] - 4 < val < w * stop[0] + 4:
ret = index
index += 1
return ret
def mouse_move(self, point):
if self.pick_flag:
if self.callback2:
w = self.get_size()[0]
pos = point[0] - 9
if pos > w - 18:
pos = w - 18
if pos < 0:
pos = 0
pos /= (w - 18) * 1.0
self.callback2(self.selected_stop, pos)
def mouse_left_up(self, point):
self.pick_flag = False
def mouse_left_down(self, point):
x = point[0]
w = self.get_size()[0]
if x < 9:
self.change_selected_stop(0)
elif x > w - 8:
self.change_selected_stop(len(self.stops) - 1)
else:
index = self.check_stop_point(x - 8)
if index is not None:
self.change_selected_stop(index)
self.pick_flag = True
class GradientViewer(wal.VPanel, SwatchCanvas):
color = None
callback = None
def __init__(self, parent, cms, stops, size=(402, 40), border="news", onclick=None):
self.cms = cms
wal.VPanel.__init__(self, parent)
SwatchCanvas.__init__(self, border)
self.pack(size)
self.fill = [0, sk2const.FILL_GRADIENT, [sk2const.GRADIENT_LINEAR, [], []]]
self.set_stops(stops)
if onclick:
self.callback = onclick
def mouse_left_dclick(self, point):
x = point[0] if point[0] <= 400 else 400
x = x - 1 if x else 0
if self.callback:
self.callback(x / 400.0)
def set_stops(self, stops):
self.fill[2][2] = stops
self.refresh()
class GradientEditor(wal.VPanel):
dlg = None
stops = None
callback = None
selected_stop = 0
def __init__(self, parent, dlg, cms, stops, onchange=None):
self.dlg = dlg
self.cms = cms
self.stops = deepcopy(stops)
if onchange:
self.callback = onchange
wal.VPanel.__init__(self, parent)
self.pack((5, 5))
self.viewer = GradientViewer(
self, self.cms, self.stops, onclick=self.insert_stop
)
self.pack(self.viewer)
self.stop_editor = StopsEditor(
self,
self.stops,
onchange=self.set_selected_stop,
onmove=self.change_stop_position,
)
self.pack(self.stop_editor)
self.pack((5, 5))
hpanel = wal.HPanel(self)
self.stop_panel = StopPanel(
hpanel,
self.dlg,
self.cms,
self.stops,
onposition=self.change_stop_position,
oncolor=self.update_colors,
)
hpanel.pack((5, 5))
hpanel.pack(self.stop_panel)
hpanel.pack((5, 5))
vpanel = wal.VPanel(hpanel)
selpanel = wal.HPanel(vpanel)
txt = _("Select first stop")
self.go_first = wal.ImageButton(
selpanel,
icons.PD_GOTO_FIRST,
wal.SIZE_16,
tooltip=txt,
flat=False,
onclick=self.select_first,
)
selpanel.pack(self.go_first, padding=5)
txt = _("Select previous stop")
self.go_prev = wal.ImageButton(
selpanel,
icons.PD_GO_BACK,
wal.SIZE_16,
tooltip=txt,
flat=False,
onclick=self.select_prev,
)
selpanel.pack(self.go_prev, padding=5)
txt = _("Select next stop")
self.go_next = wal.ImageButton(
selpanel,
icons.PD_GO_FORWARD,
wal.SIZE_16,
tooltip=txt,
flat=False,
onclick=self.select_next,
)
selpanel.pack(self.go_next, padding=5)
txt = _("Select last stop")
self.go_last = wal.ImageButton(
selpanel,
icons.PD_GOTO_LAST,
wal.SIZE_16,
tooltip=txt,
flat=False,
onclick=self.select_last,
)
selpanel.pack(self.go_last, padding=5)
vpanel.pack(selpanel)
bpanel = wal.HPanel(vpanel)
txt = _("Add stop")
self.add_button = wal.ImageButton(
bpanel,
icons.PD_ADD,
wal.SIZE_16,
tooltip=txt,
flat=False,
onclick=self.add_stop,
)
bpanel.pack(self.add_button, padding=5)
txt = _("Delete selected stop")
self.del_button = wal.ImageButton(
bpanel,
icons.PD_REMOVE,
wal.SIZE_16,
tooltip=txt,
flat=False,
onclick=self.delete_selected,
)
bpanel.pack(self.del_button)
txt = _("Reverse gradient")
bpanel.pack(
wal.Button(
vpanel, _("Reverse"), tooltip=txt, onclick=self.reverse_gradient
),
padding=15,
)
vpanel.pack(bpanel, padding=5)
txt = _("Keep gradient vector")
self.keep_vector = wal.Checkbox(vpanel, txt, True)
vpanel.pack(self.keep_vector)
hpanel.pack(vpanel, fill=True, expand=True, padding=5)
self.pack(hpanel, fill=True)
self.update()
def use_vector(self):
return self.keep_vector.get_value()
def add_stop(self):
indx = self.selected_stop
if indx < len(self.stops) - 1:
start = self.stops[indx][0]
end = self.stops[indx + 1][0]
else:
start = self.stops[indx - 1][0]
end = self.stops[indx][0]
pos = start + (end - start) / 2.0
self.insert_stop(pos)
def delete_selected(self):
indx = self.selected_stop
self.stops = self.stops[:indx] + self.stops[indx + 1 :]
if self.callback:
self.callback()
self.selected_stop = indx
self.update()
def reverse_gradient(self):
self.stops.reverse()
for item in self.stops:
item[0] = 1.0 - item[0]
indx = len(self.stops) - 1 - self.selected_stop
if self.callback:
self.callback()
self.selected_stop = indx
self.update()
def select_first(self):
self.set_selected_stop(0)
def select_prev(self):
self.set_selected_stop(self.selected_stop - 1)
def select_next(self):
self.set_selected_stop(self.selected_stop + 1)
def select_last(self):
self.set_selected_stop(len(self.stops) - 1)
def set_selected_stop(self, index):
self.selected_stop = index
self.update()
def set_stops(self, stops):
self.stops = deepcopy(stops)
self.selected_stop = 0
self.update()
def get_stops(self):
return deepcopy(self.stops)
def change_stop_position(self, index, pos):
stops = self.stops[:index] + self.stops[index + 1 :]
stop = self.stops[index]
stop[0] = pos
before = 0
after = len(stops) - 1
if not pos == 1.0:
index = -1
for item in stops:
index += 1
if item[0] <= pos:
before = index
after = 1 + before
stops.insert(after, stop)
self.stops = stops
if self.callback:
self.callback()
self.selected_stop = after
self.update()
def insert_stop(self, pos):
before = 0
after = len(self.stops) - 1
if not pos == 1.0:
index = -1
for stop in self.stops:
index += 1
if stop[0] <= pos:
before = index
after = 1 + before
else:
before = after - 1
c0 = self.stops[before][0]
c1 = self.stops[after][0]
if c1 - c0:
coef = (pos - c0) / (c1 - c0)
clr = self.cms.mix_colors(self.stops[before][1], self.stops[after][1], coef)
else:
clr = deepcopy(self.stops[before][1])
self.stops.insert(after, [pos, clr])
if self.callback:
self.callback()
self.selected_stop = after
self.update()
def update_colors(self):
if self.callback:
self.callback()
self.update()
def update(self):
last_indx = len(self.stops) - 1
self.viewer.set_stops(self.stops)
self.stop_editor.set_stops(self.stops)
self.stop_editor.set_selected_stop(self.selected_stop)
self.stop_panel.set_stops(self.stops)
self.stop_panel.set_selected_stop(self.selected_stop)
self.go_first.set_enable(self.selected_stop > 0)
self.go_prev.set_enable(self.selected_stop > 0)
self.go_next.set_enable(self.selected_stop < last_indx)
self.go_last.set_enable(self.selected_stop < last_indx)
self.del_button.set_enable(self.selected_stop not in (0, last_indx))
|
PyObjCTest | test_cftree | from CoreFoundation import *
from PyObjCTools.TestSupport import *
try:
long
except NameError:
long = int
import sys
if sys.version_info[0] == 3:
def cmp(a, b):
try:
if a < b:
return -1
elif b < a:
return 1
return 0
except TypeError:
return cmp(type(a).__name__, type(b).__name__)
class TestCFTree(TestCase):
def testTypes(self):
self.assertIsCFType(CFTreeRef)
def testCreation(self):
context = object()
tree = CFTreeCreate(None, context)
self.assertIsInstance(tree, CFTreeRef)
self.assertTrue(CFTreeGetContext(tree, None) is context)
CFTreeSetContext(tree, 42)
self.assertEqual(CFTreeGetContext(tree, None), 42)
def testCreateTree(self):
root = CFTreeCreate(None, "root")
for child in range(10):
CFTreeAppendChild(root, CFTreeCreate(None, child))
self.assertEqual(CFTreeGetContext(CFTreeGetFirstChild(root), None), 0)
def compare(l, r, context):
return -cmp(CFTreeGetContext(l, None), CFTreeGetContext(r, None))
CFTreeSortChildren(root, compare, None)
self.assertEqual(CFTreeGetContext(CFTreeGetFirstChild(root), None), 9)
def testTypeID(self):
v = CFTreeGetTypeID()
self.assertIsInstance(v, (int, long))
def testQuerying(self):
root = CFTreeCreate(None, "root")
for child in range(2):
CFTreeAppendChild(root, CFTreeCreate(None, child))
p = CFTreeGetParent(root)
self.assertIs(p, None)
c = CFTreeGetFirstChild(root)
self.assertIsInstance(c, CFTreeRef)
c2 = CFTreeGetChildAtIndex(root, 0)
self.assertIs(c, c2)
p = CFTreeGetParent(c)
self.assertIs(p, root)
s = CFTreeGetNextSibling(c)
self.assertIsInstance(s, CFTreeRef)
p = CFTreeGetParent(s)
self.assertIs(p, root)
s2 = CFTreeGetChildAtIndex(root, 1)
self.assertIs(s, s2)
s = CFTreeGetNextSibling(s)
self.assertIs(s, None)
cnt = CFTreeGetChildCount(root)
self.assertEqual(cnt, 2)
cnt = CFTreeGetChildCount(c)
self.assertEqual(cnt, 0)
children = CFTreeGetChildren(root, None)
self.assertIsInstance(children, (list, tuple))
self.assertEqual(len(children), 2)
r = CFTreeFindRoot(s2)
self.assertIs(r, root)
def testModification(self):
root = CFTreeCreate(None, "root")
for child in range(2):
CFTreeAppendChild(root, CFTreeCreate(None, child))
def applyFunc(node, context):
context.append(CFTreeGetContext(node, None))
l = []
CFTreeApplyFunctionToChildren(root, applyFunc, l)
self.assertEqual(len(l), 2)
preChild = CFTreeCreate(None, "before")
postChild = CFTreeCreate(None, "after")
CFTreePrependChild(root, preChild)
CFTreeAppendChild(root, postChild)
self.assertEqual(CFTreeGetChildCount(root), 4)
n = CFTreeGetChildAtIndex(root, 0)
self.assertIs(n, preChild)
n = CFTreeGetChildAtIndex(root, 3)
self.assertIs(n, postChild)
s = CFTreeCreate(None, "sibling")
CFTreeInsertSibling(preChild, s)
n = CFTreeGetChildAtIndex(root, 1)
self.assertIs(n, s)
self.assertEqual(CFTreeGetChildCount(root), 5)
CFTreeRemove(s)
self.assertEqual(CFTreeGetChildCount(root), 4)
def compare(left, right, context):
left = CFTreeGetContext(left, None)
right = CFTreeGetContext(right, None)
return -cmp(left, right)
before = []
after = []
CFTreeApplyFunctionToChildren(root, applyFunc, before)
CFTreeSortChildren(root, compare, None)
CFTreeApplyFunctionToChildren(root, applyFunc, after)
self.assertItemsEqual(before, after)
CFTreeRemoveAllChildren(root)
self.assertEqual(CFTreeGetChildCount(root), 0)
if __name__ == "__main__":
main()
|
scripts | asciidoc2html | #!/usr/bin/env python3
# SPDX-FileCopyrightText: Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# SPDX-License-Identifier: GPL-3.0-or-later
"""Generate the html documentation based on the asciidoc files."""
import argparse
import io
import os
import pathlib
import re
import shutil
import subprocess
import sys
import tempfile
from typing import Optional
REPO_ROOT = pathlib.Path(__file__).resolve().parents[1]
DOC_DIR = REPO_ROOT / "qutebrowser" / "html" / "doc"
sys.path.insert(0, str(REPO_ROOT))
from scripts import utils
class AsciiDoc:
"""Abstraction of an asciidoc subprocess."""
FILES = [
"faq",
"changelog",
"contributing",
"quickstart",
"userscripts",
"install",
"stacktrace",
]
def __init__(self, website: Optional[str]) -> None:
self._website = website
self._homedir: Optional[pathlib.Path] = None
self._themedir: Optional[pathlib.Path] = None
self._tempdir: Optional[pathlib.Path] = None
self._failed = False
def prepare(self) -> None:
"""Get the asciidoc command and create the homedir to use."""
self._homedir = pathlib.Path(tempfile.mkdtemp())
self._themedir = self._homedir / ".asciidoc" / "themes" / "qute"
self._tempdir = self._homedir / "tmp"
self._tempdir.mkdir(parents=True)
self._themedir.mkdir(parents=True)
def cleanup(self) -> None:
"""Clean up the temporary home directory for asciidoc."""
if self._homedir is not None and not self._failed:
shutil.rmtree(self._homedir)
def build(self) -> None:
"""Build either the website or the docs."""
if self._website:
self._build_website()
else:
self._build_docs()
self._copy_images()
def _build_docs(self) -> None:
"""Render .asciidoc files to .html sites."""
files = [
((REPO_ROOT / "doc" / "{}.asciidoc".format(f)), DOC_DIR / (f + ".html"))
for f in self.FILES
]
for src in (REPO_ROOT / "doc" / "help").glob("*.asciidoc"):
dst = DOC_DIR / (src.stem + ".html")
files.append((src, dst))
replacements = [
# patch image links to use local copy
(
"https://raw.githubusercontent.com/qutebrowser/qutebrowser/main/doc/img/cheatsheet-big.png",
"qute://help/img/cheatsheet-big.png",
),
(
"https://raw.githubusercontent.com/qutebrowser/qutebrowser/main/doc/img/cheatsheet-small.png",
"qute://help/img/cheatsheet-small.png",
),
# patch relative links to work with qute://help flat structure
("link:../", "link:"),
]
asciidoc_args = ["-a", "source-highlighter=pygments"]
for src, dst in files:
assert self._tempdir is not None # for mypy
modified_src = self._tempdir / src.name
with modified_src.open("w", encoding="utf-8") as moded_f, src.open(
"r", encoding="utf-8"
) as f:
for line in f:
for orig, repl in replacements:
line = line.replace(orig, repl)
moded_f.write(line)
self.call(modified_src, dst, *asciidoc_args)
def _copy_images(self) -> None:
"""Copy image files to qutebrowser/html/doc."""
print("Copying files...")
dst_path = DOC_DIR / "img"
dst_path.mkdir(exist_ok=True)
for filename in ["cheatsheet-big.png", "cheatsheet-small.png"]:
src = REPO_ROOT / "doc" / "img" / filename
dst = dst_path / filename
shutil.copy(src, dst)
def _build_website_file(self, root: pathlib.Path, filename: str) -> None:
"""Build a single website file."""
src = root / filename
assert self._website is not None # for mypy
dst = pathlib.Path(self._website)
dst = dst / src.parent.relative_to(REPO_ROOT) / (src.stem + ".html")
dst.parent.mkdir(exist_ok=True)
assert self._tempdir is not None # for mypy
modified_src = self._tempdir / src.name
shutil.copy(REPO_ROOT / "www" / "header.asciidoc", modified_src)
outfp = io.StringIO()
header = modified_src.read_text(encoding="utf-8")
header += "\n\n"
with src.open("r", encoding="utf-8") as infp:
outfp.write("\n\n")
hidden = False
found_title = False
title = ""
last_line = ""
for line in infp:
line = line.rstrip()
if line == "// QUTE_WEB_HIDE":
assert not hidden
hidden = True
elif line == "// QUTE_WEB_HIDE_END":
assert hidden
hidden = False
elif line == "The Compiler <mail@qutebrowser.org>":
continue
elif re.fullmatch(r":\w+:.*", line):
# asciidoc field
continue
if not found_title:
if re.fullmatch(r"=+", line):
line = line.replace("=", "-")
found_title = True
title = last_line + " | qutebrowser\n"
title += "=" * (len(title) - 1)
elif re.fullmatch(r"= .+", line):
line = "==" + line[1:]
found_title = True
title = last_line + " | qutebrowser\n"
title += "=" * (len(title) - 1)
if not hidden:
outfp.write(line.replace(".asciidoc[", ".html[") + "\n")
last_line = line
current_lines = outfp.getvalue()
outfp.close()
modified_str = title + "\n\n" + header + current_lines
modified_src.write_text(modified_str, encoding="utf-8")
asciidoc_args = [
"--theme=qute",
"-a toc",
"-a toc-placement=manual",
"-a",
"source-highlighter=pygments",
]
self.call(modified_src, dst, *asciidoc_args)
def _build_website(self) -> None:
"""Prepare and build the website."""
theme_file = REPO_ROOT / "www" / "qute.css"
assert self._themedir is not None # for mypy
shutil.copy(theme_file, self._themedir)
assert self._website is not None # for mypy
outdir = pathlib.Path(self._website)
for item_path in pathlib.Path(REPO_ROOT).rglob("*.asciidoc"):
if item_path.stem in ["header", "OpenSans-License"]:
continue
self._build_website_file(item_path.parent, item_path.name)
copy = {
"qutebrowser/icons": "icons",
"doc/img": "doc/img",
"www/media": "media/",
}
for src, dest in copy.items():
full_src = REPO_ROOT / src
full_dest = outdir / dest
try:
shutil.rmtree(full_dest)
except FileNotFoundError:
pass
shutil.copytree(full_src, full_dest)
for dst, link_name in [
("README.html", "index.html"),
((pathlib.Path("doc") / "quickstart.html"), "quickstart.html"),
]:
assert isinstance(dst, (str, pathlib.Path)) # for mypy
try:
(outdir / link_name).symlink_to(dst)
except FileExistsError:
pass
def call(self, src: pathlib.Path, dst: pathlib.Path, *args):
"""Call asciidoc for the given files.
Args:
src: The source .asciidoc file.
dst: The destination .html file, or None to auto-guess.
*args: Additional arguments passed to asciidoc.
"""
print("Calling asciidoc for {}...".format(src.name))
cmdline = [sys.executable, "-m", "asciidoc"]
if dst is not None:
cmdline += ["--out-file", str(dst)]
cmdline += args
cmdline.append(str(src))
# So the virtualenv's Pygments is found
bin_path = pathlib.Path(sys.executable).parent
try:
env = os.environ.copy()
env["HOME"] = str(self._homedir)
env["PATH"] = str(bin_path) + os.pathsep + env["PATH"]
subprocess.run(cmdline, check=True, env=env)
except (subprocess.CalledProcessError, OSError) as e:
self._failed = True
utils.print_error(str(e))
print(
"Keeping modified sources in {}.".format(self._homedir), file=sys.stderr
)
sys.exit(1)
def parse_args() -> argparse.Namespace:
"""Parse command-line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument("--website", help="Build website into a given " "directory.")
return parser.parse_args()
def run(**kwargs) -> None:
"""Regenerate documentation."""
DOC_DIR.mkdir(exist_ok=True)
asciidoc = AsciiDoc(**kwargs)
try:
asciidoc.prepare()
except FileNotFoundError:
utils.print_error(
"Could not find asciidoc! Please install it, e.g. via "
"pip install -r misc/requirements/requirements-docs.txt"
)
sys.exit(1)
try:
asciidoc.build()
finally:
asciidoc.cleanup()
def main(colors: bool = False) -> None:
"""Generate html files for the online documentation."""
utils.change_cwd()
utils.use_color = colors
args = parse_args()
run(website=args.website)
if __name__ == "__main__":
main(colors=True)
|
objc | _descriptors | """
Python <-> Objective-C bridge (PyObjC)
This module defines the core interfaces of the Python<->Objective-C bridge.
"""
__all__ = [
"IBOutlet",
"IBAction",
"accessor",
"Accessor",
"typedAccessor",
"callbackFor",
"selectorFor",
"synthesize",
"namedselector",
"typedSelector",
"namedSelector",
"instancemethod",
"signature",
]
import sys
import textwrap
import warnings
from inspect import getargspec
from objc._objc import (
_C_ID,
_C_NSBOOL,
_C_SEL,
_C_NSUInteger,
_makeClosure,
ivar,
selector,
)
_C_NSRange = [b"{_NSRange=II}", b"{_NSRange=QQ}"][sys.maxsize > 2**32]
#
# Interface builder support.
#
def IBOutlet(name=None):
"""
Create an instance variable that can be used as an outlet in
Interface Builder.
"""
if name is None:
return ivar(isOutlet=1)
else:
return ivar(name, isOutlet=1)
def IBAction(func):
"""
Return an Objective-C method object that can be used as an action
in Interface Builder.
"""
if func is None:
raise TypeError("IBAction argument must be a callable")
return selector(func, signature=b"v@:@")
def instancemethod(func):
if func is None:
raise TypeError("instancemethod argument must be a callable")
return selector(func, isClassMethod=False)
def accessor(func, typeSignature=b"@"):
"""
Return an Objective-C method object that is conformant with key-value coding
and key-value observing.
"""
args, varargs, varkw, defaults = getargspec(func)
funcName = func.__name__
maxArgs = len(args)
minArgs = maxArgs - len(defaults or ())
# implicit self
selArgs = 1 + funcName.count("_")
if varargs is not None or varkw is not None:
raise TypeError(
"%s can not be an accessor because it accepts varargs or varkw"
% (funcName,)
)
if not (minArgs <= selArgs <= maxArgs):
if minArgs == maxArgs:
raise TypeError(
"%s expected to take %d args, but must accept %d from Objective-C (implicit self plus count of underscores)"
% (funcName, maxArgs, selArgs)
)
else:
raise TypeError(
"%s expected to take between %d and %d args, but must accept %d from Objective-C (implicit self plus count of underscores)"
% (funcName, minArgs, maxArgs, selArgs)
)
if selArgs == 3:
if funcName.startswith("validate") and funcName.endswith("_error_"):
return selector(func, signature=_C_NSBOOL + b"@:N^@o^@")
if funcName.startswith("insertObject_in") and funcName.endswith("AtIndex_"):
return selector(func, signature=b"v@:" + typeSignature + _C_NSUInteger)
elif funcName.startswith("replaceObjectIn") and funcName.endswith(
"AtIndex_withObject_"
):
return selector(func, signature=b"v@:" + _C_NSUInteger + typeSignature)
elif funcName.startswith("get") and funcName.endswith("_range_"):
return selector(func, signature=b"v@:o^@" + _C_NSRange)
elif funcName.startswith("insert") and funcName.endswith("_atIndexes_"):
return selector(func, signature=b"v@:@@")
elif funcName.startswith("replace") and "AtIndexes_with" in funcName:
return selector(func, signature=b"v@:@@")
# pass through to "too many arguments"
elif selArgs == 2:
if funcName.startswith("objectIn") and funcName.endswith("AtIndex_"):
return selector(func, signature=typeSignature + b"@:" + _C_NSUInteger)
elif funcName.startswith("removeObjectFrom") and funcName.endswith("AtIndex_"):
return selector(func, signature=b"v@:" + _C_NSUInteger)
elif funcName.startswith("remove") and funcName.endswith("AtIndexes_"):
return selector(func, signature=b"v@:@")
elif funcName.endswith("AtIndexes_"):
return selector(func, signature=b"@@:@")
elif funcName.startswith("memberOf"):
return selector(func, signature=_C_NSBOOL + b"@:" + typeSignature)
elif funcName.startswith("add") and funcName.endswith("Object_"):
return selector(func, signature=b"v@:" + typeSignature)
elif funcName.startswith("add"):
return selector(func, signature=b"v@:@")
elif funcName.startswith("intersect"):
return selector(func, signature=b"v@:@")
return selector(func, signature=b"v@:" + typeSignature)
elif selArgs == 1:
if funcName.startswith("countOf"):
typeSignature = _C_NSUInteger
elif funcName.startswith("enumerator"):
typeSignature = b"@"
return selector(func, signature=typeSignature + b"@:")
raise TypeError("%s not recognized as an accessor" % (funcName,))
def typedSelector(signature):
def _typedSelector(func):
if func is None:
raise TypeError("typedSelector() function argument must be a callable")
return selector(func, signature=signature)
return _typedSelector
def namedSelector(name, signature=None):
"""
Decorator for overriding the Objective-C SEL for a method, usage:
@namedSelector("foo:bar:")
def foobar(self, foo, bar):
return foo + bar
"""
if signature is not None:
def _namedselector(func):
if func is None:
raise TypeError("IBAction argument must be a callable")
return selector(func, selector=name, signature=signature)
else:
def _namedselector(func):
if func is None:
raise TypeError("IBAction argument must be a callable")
return selector(func, selector=name)
return _namedselector
def namedselector(name, signature=None):
warnings.warn(
"use objc.namedSelector instead of objc.namedselector",
DeprecationWarning,
stacklevel=2,
)
return namedSelector(name, signature)
def typedAccessor(typeSignature):
"""
Decorator for creating a typed accessor, usage:
@typedAccessor('i')
def someIntegerAccessor(self):
return self.someInteger
@typedAccessor('i')
def setSomeIntegerAccessor_(self, anInteger):
self.someInteger = anInteger
"""
def _typedAccessor(func):
return accessor(func, typeSignature)
return _typedAccessor
def Accessor(func):
warnings.warn("Use objc.accessor instead of objc.Accessor", DeprecationWarning)
return accessor(func)
#
# Callback support
#
def callbackFor(callable, argIndex=-1):
"""
Decorator for converting a function into an object that can be used
as a callback function for (Objective-)C API's that take such a beast
as one of their arguments.
Note that using this decorator for methods is unsupported and that this
decorator is optional when the callback isn't stored by the called function
Usage::
@objc.callbackFor(NSArray.sortedArrayUsingFunction_context_)
def compare(left, right, context):
return 1
"""
def addClosure(function):
closure = _makeClosure(function, callable, argIndex)
function.pyobjc_closure = closure
return function
return addClosure
def selectorFor(callable, argIndex=-1):
"""
Decorator that makes sure that the method has the right signature to be
used as the selector argument to the specified method.
Usage::
@objc.selectorFor(NSApplication.beginSheet_modalForWindow_modalDelegate_didEndSelector_contextInfo_)
def sheetDidEnd_returnCode_contextInfo_(self, sheet, returnCode, info):
pass
"""
if argIndex == -1:
for arg in callable.__metadata__()["arguments"]:
if arg["type"] == _C_SEL and "sel_of_type" in arg:
signature = arg["sel_of_type"]
break
else:
raise ValueError("No selector argument with type information")
else:
try:
signature = callable.__metadata__()["arguments"][argIndex]["sel_of_type"]
except (IndexError, KeyError):
raise ValueError("Not a selector argument with type information")
def addSignature(function):
return selector(function, signature=signature)
return addSignature
def synthesize(name, copy=False, readwrite=True, type=_C_ID, ivarName=None):
"""
Use this in a class dictionary to syntheze simple setting/setter methods.
Note: this is only necessary to get propper behaviour when Key-Value coding
is used and special features (like copying) are needed
usage::
class MyClass (NSObject):
objc.synthesize('someTitle', copy=True)
"""
if not name:
raise ValueError("Empty property name")
if ivarName is None:
ivarName = "_" + name
classDict = sys._getframe(1).f_locals
setterName = "set%s%s_" % (name[0].upper(), name[1:])
if copy:
setter = textwrap.dedent(
"""
def %(name)s(self, value):
self.%(ivar)s = value.copy()
"""
% dict(name=setterName, ivar=ivarName)
)
else:
setter = textwrap.dedent(
"""
def %(name)s(self, value):
self.%(ivar)s = value
"""
% dict(name=setterName, ivar=ivarName)
)
getter = textwrap.dedent(
"""
def %(name)s(self):
return self.%(ivar)s
"""
% dict(name=name, ivar=ivarName)
)
if readwrite:
exec(setter, globals(), classDict)
exec(getter, globals(), classDict)
classDict[ivarName] = ivar(type=type)
def signature(signature, **kw):
"""
A Python method decorator that allows easy specification
of Objective-C selectors.
Usage::
@objc.signature('i@:if')
def methodWithX_andY_(self, x, y):
return 0
"""
warnings.warn(
"Usage objc.typedSelector instead of objc.signature", DeprecationWarning
)
kw["signature"] = signature
def makeSignature(func):
return selector(func, **kw)
return makeSignature
|
ipc | stargate | import os
from sglib import constants
from sglib.lib import util
from sglib.lib.util import get_wait_file_path, wait_for_finished_file
from sglib.log import LOG
from .abstract import AbstractIPC
class StargateIPC(AbstractIPC):
def __init__(
self,
transport,
with_audio,
path="/stargate/main",
):
AbstractIPC.__init__(
self,
transport,
with_audio,
path,
)
def stop_server(self):
LOG.info("stop_server called")
if self.with_audio:
self.send_configure("exit", "")
constants.IPC_ENABLED = False
def kill_engine(self):
self.send_configure("abort", "")
def main_vol(self, a_vol):
self.send_configure("mvol", str(round(a_vol, 8)))
def update_plugin_control(self, a_plugin_uid, a_port, a_val):
self.send_configure(
"pc",
"|".join(str(x) for x in (a_plugin_uid, a_port, a_val)),
)
def configure_plugin(self, a_plugin_uid, a_key, a_message):
self.send_configure(
"co", "|".join(str(x) for x in (a_plugin_uid, a_key, a_message))
)
def midi_learn(self):
self.send_configure("ml", "")
def load_cc_map(self, a_plugin_uid, a_str):
self.send_configure("cm", "|".join(str(x) for x in (a_plugin_uid, a_str)))
def add_to_audio_pool(
self,
a_file: str,
a_uid: int,
vol: float = 0.0,
):
"""Load a new file into the audio pool
@a_file: The path to an audio file
@a_uid: The audio pool uid of the file
"""
path = os.path.join(
constants.PROJECT.samplegraph_folder,
str(a_uid),
)
f_wait_file = get_wait_file_path(path)
a_file = util.pi_path(a_file)
self.send_configure(
"wp",
"|".join(str(x) for x in (a_uid, vol, a_file)),
)
wait_for_finished_file(f_wait_file)
def audio_pool_entry_volume(self, uid, vol):
"""Update the volume of a single audio pool entry
@uid: The uid of the audio pool entry
@vol: The dB amplitude of the audio pool entry
"""
self.send_configure(
"apv",
f"{uid}|{vol}",
)
def rate_env(self, a_in_file, a_out_file, a_start, a_end):
f_wait_file = get_wait_file_path(a_out_file)
self.send_configure(
"renv",
"{}\n{}\n{}|{}".format(
a_in_file,
a_out_file,
a_start,
a_end,
),
)
wait_for_finished_file(f_wait_file)
def pitch_env(self, a_in_file, a_out_file, a_start, a_end):
f_wait_file = get_wait_file_path(a_out_file)
self.send_configure(
"penv",
"{}\n{}\n{}|{}".format(
a_in_file,
a_out_file,
a_start,
a_end,
),
)
wait_for_finished_file(f_wait_file)
def preview_audio(self, a_file):
self.send_configure("preview", util.pi_path(a_file))
def stop_preview(self):
self.send_configure("spr", "")
def set_host(self, a_index):
self.send_configure("abs", str(a_index))
def reload_audio_pool_item(self, a_uid):
self.send_configure("wr", str(a_uid))
def audio_input_volume(self, a_index, a_vol):
self.send_configure(
"aiv",
"|".join(str(x) for x in (a_index, a_vol)),
)
def pause_engine(self):
self.send_configure("engine", "1")
def resume_engine(self):
self.send_configure("engine", "0")
def clean_audio_pool(self, a_msg):
self.send_configure("cwp", a_msg)
|
radio-crawler | clean_uris | #!/usr/bin/env python2
# Copyright 2011,2014 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
from __future__ import print_function
import multiprocessing
import re
import socket
import time
import urlparse
PROCESSES = 300
TIMEOUT = 30
URIS_IN = "uris.txt"
URIS_OUT = "uris_clean.txt"
def fix_uri(uri):
# some are sub pages of the shoutcast admin panel
for subfix in [".html", ".php", ".cgi"]:
if subfix in uri:
return uri[: uri.rfind("/") + 1]
for s in ["%3Fsid%3D1"]:
if uri.endswith(s):
return uri[: -len(s)]
return uri
def filter_uri(uri):
for bl in [
"streamfinder.com",
".shoutcast.com",
".html",
".php",
"www.facebook.com",
".pdf",
"plus.google.com",
]:
if bl in uri:
return False
if not uri:
return False
return True
def reverse_lookup(uri, timeout=TIMEOUT):
socket.setdefaulttimeout(timeout)
old_uri = uri
try:
p = urlparse.urlsplit(uri)
port, hostname = p.port, p.hostname
except ValueError:
return old_uri, uri
c = 0
while c < 50:
try:
rev = socket.gethostbyaddr(hostname)[0]
except KeyboardInterrupt:
return
except socket.timeout:
print("timeout")
time.sleep(0.5)
c += 1
continue
except Exception:
pass
else:
rev = rev.rstrip(".")
if not rev:
break
l = list(p)
l[1] = (port is not None and ":".join([rev, str(port)])) or rev
uri = urlparse.urlunsplit(l)
break
return old_uri, uri
def lookup(uri, timeout=TIMEOUT):
socket.setdefaulttimeout(timeout)
try:
hostname = urlparse.urlsplit(uri).hostname
except ValueError:
return uri, None
addrs = None
c = 0
while c < 50:
try:
addrs = socket.gethostbyname_ex(hostname)[2]
except KeyboardInterrupt:
return
except socket.timeout:
print("timeout")
time.sleep(0.5)
c += 1
continue
except Exception:
pass
break
return uri, addrs
def filter_ip(uri):
# if the hostname is an IP addr
hostname = urlparse.urlsplit(uri).hostname
return bool(re.match(r"\b(?:\d{1,3}\.){3}\d{1,3}\b$", hostname))
def validate_uri(uri):
try:
urlparse.urlsplit(uri)
except ValueError:
return False
return True
def uri_has_num(uri):
hostname = urlparse.urlsplit(uri).hostname
for n in "1 2 3 4 5 6 7 8 9 0".split():
if n in hostname:
return True
return False
def main(in_path, out_path, num_processes):
with open(in_path, "rb") as h:
uris = filter(None, set(h.read().splitlines()))
uris = filter(validate_uri, uris)
clean = []
ips = []
# get all uris with an IP addr as hostname
for uri in uris:
if filter_ip(uri):
ips.append(uri)
else:
clean.append(uri)
print("ips: ", len(ips), " nonip: ", len(clean))
# Look up the IPs of hostnames that look like the IP is encoded in
# them somehow if that is the case for any of the returne IPs, use
# the first returned IP
# get all uris that have a number in them
check_ip = filter(uri_has_num, clean)
pool = multiprocessing.Pool(num_processes)
try:
pfunc = lookup
for i, (uri, addrs) in enumerate(pool.imap_unordered(pfunc, check_ip)):
print("%d/%d " % (i + 1, len(check_ip)))
if not addrs:
continue
not_found = 0
for addr in addrs:
for num in addr.split("."):
if num not in uri:
not_found += 1
break
if not_found == len(addrs):
continue
p = urlparse.urlsplit(uri)
port, hostname = p.port, p.hostname
l = list(p)
l[1] = addrs[0] + ((port is not None and (":" + str(port))) or "")
new_uri = urlparse.urlunsplit(l)
print(uri, " -> ", new_uri)
clean.remove(uri)
clean.append(new_uri)
finally:
pool.terminate()
pool.join()
# Reverse lookup, if the hostname doesn't include the IP,
# use it instead of the IP
pool = multiprocessing.Pool(num_processes)
try:
pfunc = reverse_lookup
for i, (ip_uri, uri) in enumerate(pool.imap_unordered(pfunc, ips)):
print("%d/%d " % (i + 1, len(ips)))
if uri == ip_uri:
clean.append(uri)
continue
hostname = urlparse.urlsplit(ip_uri).hostname
for num in hostname.split("."):
if num not in uri:
print(ip_uri + " -> " + uri)
clean.append(uri)
break
else:
# all ip parts are in the uri, better use the IP only
# example: http://127.0.0.1.someserver.com/
try:
clean.remove(uri)
except ValueError:
pass
clean.append(ip_uri)
finally:
pool.terminate()
pool.join()
print("write %s" % URIS_OUT)
with open(out_path, "wb") as f:
lines = sorted(filter(filter_uri, map(fix_uri, set(clean))))
f.write("\n".join(lines))
if __name__ == "__main__":
main(URIS_IN, URIS_OUT, PROCESSES)
|
Cloud | AbstractCloudOutputDevice | from time import time
from typing import Callable, List, Optional
from cura.CuraApplication import CuraApplication
from cura.PrinterOutput.NetworkedPrinterOutputDevice import AuthState
from cura.PrinterOutput.PrinterOutputDevice import ConnectionType
from PyQt6.QtCore import QObject, pyqtSlot
from PyQt6.QtNetwork import QNetworkReply
from UM import i18nCatalog
from UM.FileHandler.FileHandler import FileHandler
from UM.Logger import Logger
from UM.Resources import Resources
from UM.Scene.SceneNode import SceneNode
from ..Models.Http.CloudClusterWithConfigResponse import CloudClusterWithConfigResponse
from ..UltimakerNetworkedPrinterOutputDevice import (
UltimakerNetworkedPrinterOutputDevice,
)
from .CloudApiClient import CloudApiClient
I18N_CATALOG = i18nCatalog("cura")
class AbstractCloudOutputDevice(UltimakerNetworkedPrinterOutputDevice):
API_CHECK_INTERVAL = 10.0 # seconds
def __init__(
self,
api_client: CloudApiClient,
printer_type: str,
request_write_callback: Callable,
refresh_callback: Callable,
parent: QObject = None,
) -> None:
self._api = api_client
properties = {b"printer_type": printer_type.encode()}
super().__init__(
device_id=f"ABSTRACT_{printer_type}",
address="",
connection_type=ConnectionType.CloudConnection,
properties=properties,
parent=parent,
)
self._on_print_dialog: Optional[QObject] = None
self._nodes: List[SceneNode] = None
self._request_write_callback = request_write_callback
self._refresh_callback = refresh_callback
self._setInterfaceElements()
def connect(self) -> None:
"""Connects this device."""
if self.isConnected():
return
Logger.log("i", "Attempting to connect AbstractCloudOutputDevice %s", self.key)
super().connect()
self._update()
def disconnect(self) -> None:
"""Disconnects the device"""
if not self.isConnected():
return
super().disconnect()
def _update(self) -> None:
"""Called when the network data should be updated."""
super()._update()
if time() - self._time_of_last_request < self.API_CHECK_INTERVAL:
return # avoid calling the cloud too often
self._time_of_last_request = time()
if self._api.account.isLoggedIn:
self.setAuthenticationState(AuthState.Authenticated)
self._last_request_time = time()
self._api.getClustersByMachineType(
self.printerType, self._onCompleted, self._onError
)
else:
self.setAuthenticationState(AuthState.NotAuthenticated)
def _setInterfaceElements(self) -> None:
"""Set all the interface elements and texts for this output device."""
self.setPriority(
2
) # Make sure we end up below the local networking and above 'save to file'.
self.setShortDescription(
I18N_CATALOG.i18nc("@action:button", "Print via cloud")
)
self.setDescription(
I18N_CATALOG.i18nc("@properties:tooltip", "Print via cloud")
)
self.setConnectionText(
I18N_CATALOG.i18nc("@info:status", "Connected via cloud")
)
def _onCompleted(self, clusters: List[CloudClusterWithConfigResponse]) -> None:
self._responseReceived()
all_configurations = []
for resp in clusters:
if resp.configuration is not None:
# Usually when the printer is offline, it doesn't have a configuration...
all_configurations.append(resp.configuration)
self._updatePrinters(all_configurations)
def _onError(self, reply: QNetworkReply, error: QNetworkReply.NetworkError) -> None:
Logger.log("w", f"Failed to get clusters by machine type: {str(error)}.")
@pyqtSlot(str)
def printerSelected(self, unique_id: str):
# The device that it defers the actual write to isn't hooked up correctly. So we should emit the write signal
# here.
self.writeStarted.emit(self)
self._request_write_callback(unique_id, self._nodes)
if self._on_print_dialog:
self._on_print_dialog.close()
@pyqtSlot()
def refresh(self):
self._refresh_callback()
self._update()
def _openChoosePrinterDialog(self) -> None:
if self._on_print_dialog is None:
qml_path = Resources.getPath(
CuraApplication.ResourceTypes.QmlFiles,
"Dialogs",
"ChoosePrinterDialog.qml",
)
self._on_print_dialog = CuraApplication.getInstance().createQmlComponent(
qml_path, {}
)
if self._on_print_dialog is None: # Failed to load QML file.
return
self._on_print_dialog.setProperty("manager", self)
self._on_print_dialog.show()
def requestWrite(
self,
nodes: List[SceneNode],
file_name: Optional[str] = None,
limit_mimetypes: bool = False,
file_handler: Optional[FileHandler] = None,
**kwargs,
) -> None:
if not nodes or len(nodes) < 1:
Logger.log("w", "Nothing to print.")
return
self._nodes = nodes
self._openChoosePrinterDialog()
|
extractor | xuite | # coding: utf-8
from __future__ import unicode_literals
from ..utils import (
ExtractorError,
float_or_none,
get_element_by_attribute,
parse_iso8601,
remove_end,
)
from .common import InfoExtractor
class XuiteIE(InfoExtractor):
IE_DESC = "隨意窩Xuite影音"
_REGEX_BASE64 = r"(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?"
_VALID_URL = r"https?://vlog\.xuite\.net/(?:play|embed)/(?P<id>%s)" % _REGEX_BASE64
_TESTS = [
{
# Audio
"url": "http://vlog.xuite.net/play/RGkzc1ZULTM4NjA5MTQuZmx2",
"md5": "e79284c87b371424885448d11f6398c8",
"info_dict": {
"id": "3860914",
"ext": "mp3",
"title": "孤單南半球-歐德陽",
"description": "孤單南半球-歐德陽",
"thumbnail": r"re:^https?://.*\.jpg$",
"duration": 247.246,
"timestamp": 1314932940,
"upload_date": "20110902",
"uploader": "阿能",
"uploader_id": "15973816",
"categories": ["個人短片"],
},
},
{
# Video with only one format
"url": "http://vlog.xuite.net/play/WUxxR2xCLTI1OTI1MDk5LmZsdg==",
"md5": "21f7b39c009b5a4615b4463df6eb7a46",
"info_dict": {
"id": "25925099",
"ext": "mp4",
"title": "BigBuckBunny_320x180",
"thumbnail": r"re:^https?://.*\.jpg$",
"duration": 596.458,
"timestamp": 1454242500,
"upload_date": "20160131",
"uploader": "屁姥",
"uploader_id": "12158353",
"categories": ["個人短片"],
"description": "http://download.blender.org/peach/bigbuckbunny_movies/BigBuckBunny_320x180.mp4",
},
},
{
# Video with two formats
"url": "http://vlog.xuite.net/play/bWo1N1pLLTIxMzAxMTcwLmZsdg==",
"md5": "1166e0f461efe55b62e26a2d2a68e6de",
"info_dict": {
"id": "21301170",
"ext": "mp4",
"title": "暗殺教室 02",
"description": "字幕:【極影字幕社】",
"thumbnail": r"re:^https?://.*\.jpg$",
"duration": 1384.907,
"timestamp": 1421481240,
"upload_date": "20150117",
"uploader": "我只是想認真點",
"uploader_id": "242127761",
"categories": ["電玩動漫"],
},
"skip": "Video removed",
},
{
# Video with encoded media id
# from http://forgetfulbc.blogspot.com/2016/06/date.html
"url": "http://vlog.xuite.net/embed/cE1xbENoLTI3NDQ3MzM2LmZsdg==?ar=0&as=0",
"info_dict": {
"id": "27447336",
"ext": "mp4",
"title": "男女平權只是口號?專家解釋約會時男生是否該幫女生付錢 (中字)",
"description": "md5:1223810fa123b179083a3aed53574706",
"timestamp": 1466160960,
"upload_date": "20160617",
"uploader": "B.C. & Lowy",
"uploader_id": "232279340",
},
},
{
"url": "http://vlog.xuite.net/play/S1dDUjdyLTMyOTc3NjcuZmx2/%E5%AD%AB%E7%87%95%E5%A7%BF-%E7%9C%BC%E6%B7%9A%E6%88%90%E8%A9%A9",
"only_matching": True,
},
]
def _real_extract(self, url):
# /play/ URLs provide embedded video URL and more metadata
url = url.replace("/embed/", "/play/")
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
error_msg = self._search_regex(
r'<div id="error-message-content">([^<]+)',
webpage,
"error message",
default=None,
)
if error_msg:
raise ExtractorError(
"%s returned error: %s" % (self.IE_NAME, error_msg), expected=True
)
media_info = self._parse_json(
self._search_regex(r"var\s+mediaInfo\s*=\s*({.*});", webpage, "media info"),
video_id,
)
video_id = media_info["MEDIA_ID"]
formats = []
for key in ("html5Url", "html5HQUrl"):
video_url = media_info.get(key)
if not video_url:
continue
format_id = self._search_regex(
r"\bq=(.+?)\b", video_url, "format id", default=None
)
formats.append(
{
"url": video_url,
"ext": "mp4" if format_id.isnumeric() else format_id,
"format_id": format_id,
"height": int(format_id) if format_id.isnumeric() else None,
}
)
self._sort_formats(formats)
timestamp = media_info.get("PUBLISH_DATETIME")
if timestamp:
timestamp = parse_iso8601(timestamp + " +0800", " ")
category = media_info.get("catName")
categories = [category] if category else []
uploader = media_info.get("NICKNAME")
uploader_url = None
author_div = get_element_by_attribute("itemprop", "author", webpage)
if author_div:
uploader = uploader or self._html_search_meta("name", author_div)
uploader_url = self._html_search_regex(
r'<link[^>]+itemprop="url"[^>]+href="([^"]+)"',
author_div,
"uploader URL",
fatal=False,
)
return {
"id": video_id,
"title": media_info["TITLE"],
"description": remove_end(media_info.get("metaDesc"), " (Xuite 影音)"),
"thumbnail": media_info.get("ogImageUrl"),
"timestamp": timestamp,
"uploader": uploader,
"uploader_id": media_info.get("MEMBER_ID"),
"uploader_url": uploader_url,
"duration": float_or_none(media_info.get("MEDIA_DURATION"), 1000000),
"categories": categories,
"formats": formats,
}
|
model | meta | # encoding: utf-8
"""SQLAlchemy Metadata and Session object"""
from typing import Any, Optional
import sqlalchemy.orm as orm
from ckan.types import AlchemySession
from sqlalchemy import MetaData, event
from sqlalchemy.engine import Engine
__all__ = ["Session"]
# SQLAlchemy database engine. Updated by model.init_model()
engine: Optional[Engine] = None
Session: AlchemySession = orm.scoped_session(
orm.sessionmaker(
autoflush=False,
autocommit=False,
expire_on_commit=False,
)
)
create_local_session = orm.sessionmaker(
autoflush=False,
autocommit=False,
expire_on_commit=False,
)
@event.listens_for(create_local_session, "before_flush")
@event.listens_for(Session, "before_flush")
def ckan_before_flush(session: Any, flush_context: Any, instances: Any):
"""Create a new _object_cache in the Session object.
_object_cache is used in DomainObjectModificationExtension to trigger
notifications on changes. e.g: re-indexing a package in solr upon update.
"""
if not hasattr(session, "_object_cache"):
session._object_cache = {"new": set(), "deleted": set(), "changed": set()}
changed = [
obj
for obj in session.dirty
if session.is_modified(obj, include_collections=False)
]
session._object_cache["new"].update(session.new)
session._object_cache["deleted"].update(session.deleted)
session._object_cache["changed"].update(changed)
@event.listens_for(create_local_session, "after_commit")
@event.listens_for(Session, "after_commit")
def ckan_after_commit(session: Any):
"""Cleans our custom _object_cache attribute after commiting."""
if hasattr(session, "_object_cache"):
del session._object_cache
@event.listens_for(create_local_session, "before_commit")
@event.listens_for(Session, "before_commit")
def ckan_before_commit(session: Any):
"""Calls all extensions implementing IDomainObjectModification interface."""
from ckan.model.modification import DomainObjectModificationExtension
dome = DomainObjectModificationExtension()
dome.before_commit(session)
@event.listens_for(create_local_session, "after_rollback")
@event.listens_for(Session, "after_rollback")
def ckan_after_rollback(session: Any):
"""Cleans our custom _object_cache attribute after rollback."""
if hasattr(session, "_object_cache"):
del session._object_cache
# mapper = Session.mapper
mapper = orm.mapper
# Global metadata. If you have multiple databases with overlapping table
# names, you'll need a metadata for each database
metadata = MetaData()
|
scripts | centroid_post | # -*- coding: utf-8 -*-
# ***************************************************************************
# * Copyright (c) 2015 Dan Falck <ddfalck@gmail.com> *
# * Copyright (c) 2020 Schildkroet *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * FreeCAD is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Lesser General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with FreeCAD; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
import datetime
import os
import FreeCAD
import Path
import Path.Post.Utils as PostUtils
import PathScripts.PathUtils as PathUtils
from FreeCAD import Units
TOOLTIP = """
This is a postprocessor file for the Path workbench. It is used to
take a pseudo-G-code fragment outputted by a Path object, and output
real G-code suitable for a centroid 3 axis mill. This postprocessor, once placed
in the appropriate Path/Tool folder, can be used directly from inside
FreeCAD, via the GUI importer or via python scripts with:
import centroid_post
centroid_post.export(object,"/path/to/file.ncc","")
"""
TOOLTIP_ARGS = """
Arguments for centroid:
--header,--no-header ... output headers (--header)
--comments,--no-comments ... output comments (--comments)
--line-numbers,--no-line-numbers ... prefix with line numbers (--no-lin-numbers)
--show-editor, --no-show-editor ... pop up editor before writing output(--show-editor)
--feed-precision=1 ... number of digits of precision for feed rate. Default=1
--axis-precision=4 ... number of digits of precision for axis moves. Default=4
--inches ... Convert output for US imperial mode (G20)
"""
# These globals set common customization preferences
OUTPUT_COMMENTS = True
OUTPUT_HEADER = True
OUTPUT_LINE_NUMBERS = False
if FreeCAD.GuiUp:
SHOW_EDITOR = True
else:
SHOW_EDITOR = False
MODAL = False # if true commands are suppressed if the same as previous line.
COMMAND_SPACE = " "
LINENR = 100 # line number starting value
# These globals will be reflected in the Machine configuration of the project
UNITS = "G21" # G21 for metric, G20 for us standard
UNIT_FORMAT = "mm"
UNIT_SPEED_FORMAT = "mm/min"
MACHINE_NAME = "Centroid"
CORNER_MIN = {"x": -609.6, "y": -152.4, "z": 0} # use metric for internal units
CORNER_MAX = {"x": 609.6, "y": 152.4, "z": 304.8} # use metric for internal units
AXIS_PRECISION = 4
FEED_PRECISION = 1
SPINDLE_DECIMALS = 0
COMMENT = ";"
# gCode header with information about CAD-software, post-processor
# and date/time
if FreeCAD.ActiveDocument:
cam_file = os.path.basename(FreeCAD.ActiveDocument.FileName)
else:
cam_file = "<None>"
HEADER = """;Exported by FreeCAD
;Post Processor: {}
;CAM file: {}
;Output Time: {}
""".format(__name__, cam_file, str(datetime.datetime.now()))
# Preamble text will appear at the beginning of the GCODE output file.
PREAMBLE = """G53 G00 G17
"""
# Postamble text will appear following the last operation.
POSTAMBLE = """M99
"""
TOOLRETURN = """M5
M25
G49 H0
""" # spindle off,height offset canceled,spindle retracted (M25 is a centroid command to retract spindle)
ZAXISRETURN = """G91 G28 X0 Z0
G90
"""
SAFETYBLOCK = """G90 G80 G40 G49
"""
# Pre operation text will be inserted before every operation
PRE_OPERATION = """"""
# Post operation text will be inserted after every operation
POST_OPERATION = """"""
# Tool Change commands will be inserted before a tool change
TOOL_CHANGE = """"""
# to distinguish python built-in open function from the one declared below
if open.__module__ in ["__builtin__", "io"]:
pythonopen = open
def processArguments(argstring):
global OUTPUT_HEADER
global OUTPUT_COMMENTS
global OUTPUT_LINE_NUMBERS
global SHOW_EDITOR
global AXIS_PRECISION
global FEED_PRECISION
global UNIT_SPEED_FORMAT
global UNIT_FORMAT
global UNITS
for arg in argstring.split():
if arg == "--header":
OUTPUT_HEADER = True
elif arg == "--no-header":
OUTPUT_HEADER = False
elif arg == "--comments":
OUTPUT_COMMENTS = True
elif arg == "--no-comments":
OUTPUT_COMMENTS = False
elif arg == "--line-numbers":
OUTPUT_LINE_NUMBERS = True
elif arg == "--no-line-numbers":
OUTPUT_LINE_NUMBERS = False
elif arg == "--show-editor":
SHOW_EDITOR = True
elif arg == "--no-show-editor":
SHOW_EDITOR = False
elif arg.split("=")[0] == "--axis-precision":
AXIS_PRECISION = arg.split("=")[1]
elif arg.split("=")[0] == "--feed-precision":
FEED_PRECISION = arg.split("=")[1]
elif arg == "--inches":
UNITS = "G20"
UNIT_SPEED_FORMAT = "in/min"
UNIT_FORMAT = "in"
def export(objectslist, filename, argstring):
processArguments(argstring)
for i in objectslist:
print(i.Name)
global UNITS
global UNIT_FORMAT
global UNIT_SPEED_FORMAT
print("postprocessing...")
gcode = ""
# write header
if OUTPUT_HEADER:
gcode += HEADER
gcode += SAFETYBLOCK
# Write the preamble
if OUTPUT_COMMENTS:
for item in objectslist:
if hasattr(item, "Proxy") and isinstance(
item.Proxy, Path.Tool.Controller.ToolController
):
gcode += ";T{}={}\n".format(item.ToolNumber, item.Name)
gcode += linenumber() + ";begin preamble\n"
for line in PREAMBLE.splitlines(True):
gcode += linenumber() + line
gcode += linenumber() + UNITS + "\n"
for obj in objectslist:
# do the pre_op
if OUTPUT_COMMENTS:
gcode += linenumber() + ";begin operation\n"
for line in PRE_OPERATION.splitlines(True):
gcode += linenumber() + line
gcode += parse(obj)
# do the post_op
if OUTPUT_COMMENTS:
gcode += linenumber() + ";end operation: %s\n" % obj.Label
for line in POST_OPERATION.splitlines(True):
gcode += linenumber() + line
# do the post_amble
if OUTPUT_COMMENTS:
gcode += ";begin postamble\n"
for line in TOOLRETURN.splitlines(True):
gcode += linenumber() + line
for line in SAFETYBLOCK.splitlines(True):
gcode += linenumber() + line
for line in POSTAMBLE.splitlines(True):
gcode += linenumber() + line
if SHOW_EDITOR:
dia = PostUtils.GCodeEditorDialog()
dia.editor.setText(gcode)
result = dia.exec_()
if result:
final = dia.editor.toPlainText()
else:
final = gcode
else:
final = gcode
print("done postprocessing.")
if not filename == "-":
gfile = pythonopen(filename, "w")
gfile.write(final)
gfile.close()
return final
def linenumber():
global LINENR
if OUTPUT_LINE_NUMBERS is True:
LINENR += 10
return "N" + str(LINENR) + " "
return ""
def parse(pathobj):
out = ""
lastcommand = None
axis_precision_string = "." + str(AXIS_PRECISION) + "f"
feed_precision_string = "." + str(FEED_PRECISION) + "f"
# params = ['X','Y','Z','A','B','I','J','K','F','S'] #This list control
# the order of parameters
# centroid doesn't want K properties on XY plane Arcs need work.
params = ["X", "Y", "Z", "A", "B", "I", "J", "F", "S", "T", "Q", "R", "L", "H"]
if hasattr(pathobj, "Group"): # We have a compound or project.
# if OUTPUT_COMMENTS:
# out += linenumber() + "(compound: " + pathobj.Label + ")\n"
for p in pathobj.Group:
out += parse(p)
return out
else: # parsing simple path
# groups might contain non-path things like stock.
if not hasattr(pathobj, "Path"):
return out
# if OUTPUT_COMMENTS:
# out += linenumber() + "(" + pathobj.Label + ")\n"
for c in PathUtils.getPathWithPlacement(pathobj).Commands:
commandlist = [] # list of elements in the command, code and params.
command = c.Name # command M or G code or comment string
if command[0] == "(":
command = PostUtils.fcoms(command, COMMENT)
commandlist.append(command)
# if modal: only print the command if it is not the same as the
# last one
if MODAL is True:
if command == lastcommand:
commandlist.pop(0)
# Now add the remaining parameters in order
for param in params:
if param in c.Parameters:
if param == "F":
if c.Name not in [
"G0",
"G00",
]: # centroid doesn't use rapid speeds
speed = Units.Quantity(
c.Parameters["F"], FreeCAD.Units.Velocity
)
commandlist.append(
param
+ format(
float(speed.getValueAs(UNIT_SPEED_FORMAT)),
feed_precision_string,
)
)
elif param == "H":
commandlist.append(param + str(int(c.Parameters["H"])))
elif param == "S":
commandlist.append(
param
+ PostUtils.fmt(c.Parameters["S"], SPINDLE_DECIMALS, "G21")
)
elif param == "T":
commandlist.append(param + str(int(c.Parameters["T"])))
else:
pos = Units.Quantity(c.Parameters[param], FreeCAD.Units.Length)
commandlist.append(
param
+ format(
float(pos.getValueAs(UNIT_FORMAT)),
axis_precision_string,
)
)
outstr = str(commandlist)
outstr = outstr.replace("[", "")
outstr = outstr.replace("]", "")
outstr = outstr.replace("'", "")
outstr = outstr.replace(",", "")
# store the latest command
lastcommand = command
# Check for Tool Change:
if command == "M6":
# if OUTPUT_COMMENTS:
# out += linenumber() + "(begin toolchange)\n"
for line in TOOL_CHANGE.splitlines(True):
out += linenumber() + line
# if command == "message":
# if OUTPUT_COMMENTS is False:
# out = []
# else:
# commandlist.pop(0) # remove the command
# prepend a line number and append a newline
if len(commandlist) >= 1:
if OUTPUT_LINE_NUMBERS:
commandlist.insert(0, (linenumber()))
# append the line to the final output
for w in commandlist:
out += w + COMMAND_SPACE
out = out.strip() + "\n"
return out
|
versions | 402ce1583e3f_initial_config_model | """Initial config model
Revision ID: 402ce1583e3f
Revises:
Create Date: 2015-12-23 15:05:36.841153
"""
# revision identifiers, used by Alembic.
revision = "402ce1583e3f"
down_revision = None
branch_labels = None
depends_on = None
import sqlalchemy as sa
from alembic import op
def upgrade():
op.create_table(
"config_value",
sa.Column("id", sa.Integer, primary_key=True),
sa.Column("name", sa.String(32), nullable=False, index=True),
sa.Column("value", sa.Text(), nullable=False),
sa.Column("created_on", sa.DateTime, nullable=False),
)
op.create_table(
"game_version",
sa.Column("id", sa.Integer, primary_key=True),
sa.Column("sha256", sa.String(64), nullable=False, index=True),
sa.Column("version", sa.String(32), nullable=False),
sa.Column("discovered_on", sa.DateTime, nullable=False),
)
op.create_table(
"game_build",
sa.Column("id", sa.Integer, primary_key=True),
sa.Column(
"version",
sa.Integer,
sa.ForeignKey("game_version.id"),
nullable=False,
index=True,
unique=True,
),
sa.Column("build", sa.String(16), nullable=False),
sa.Column("released_on", sa.DateTime, nullable=False),
sa.Column("discovered_on", sa.DateTime, nullable=False),
)
def downgrade():
op.drop("game_build")
op.drop("game_version")
op.drop("config_value")
|
mobile-app | fcm_relay | import logging
from apps.auth_token.auth import ApiTokenAuthentication
from apps.mobile_app.models import FCMDevice
from celery.utils.log import get_task_logger
from common.custom_celery_tasks import shared_dedicated_queue_retry_task
from django.conf import settings
from firebase_admin.exceptions import FirebaseError
from firebase_admin.messaging import (
AndroidConfig,
APNSConfig,
APNSPayload,
Aps,
ApsAlert,
CriticalSound,
Message,
)
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.throttling import UserRateThrottle
from rest_framework.views import APIView
task_logger = get_task_logger(__name__)
task_logger.setLevel(logging.DEBUG)
class FCMRelayThrottler(UserRateThrottle):
scope = "fcm_relay"
rate = "300/m"
class FCMRelayView(APIView):
"""
This view accepts push notifications from OSS instances and forwards these requests to FCM.
Requests to this endpoint come from OSS instances: apps.mobile_app.tasks.send_push_notification_to_fcm_relay.
The view uses public API authentication, so an OSS instance must be connected to cloud to use FCM relay.
"""
authentication_classes = [ApiTokenAuthentication]
permission_classes = [IsAuthenticated]
throttle_classes = [FCMRelayThrottler]
def post(self, request):
if not settings.FCM_RELAY_ENABLED:
return Response(status=status.HTTP_404_NOT_FOUND)
try:
token = request.data["token"]
data = request.data["data"]
apns = request.data["apns"]
android = request.data.get("android") # optional
except KeyError:
return Response(status=status.HTTP_400_BAD_REQUEST)
fcm_relay_async.delay(token=token, data=data, apns=apns, android=android)
return Response(status=status.HTTP_200_OK)
@shared_dedicated_queue_retry_task(
autoretry_for=(Exception,),
retry_backoff=True,
max_retries=1 if settings.DEBUG else 5,
)
def fcm_relay_async(token, data, apns, android=None):
message = _get_message_from_request_data(token, data, apns, android)
# https://firebase.google.com/docs/cloud-messaging/http-server-ref#interpret-downstream
response = FCMDevice(registration_id=token).send_message(message)
task_logger.debug(f"FCM response: {response}")
if isinstance(response, FirebaseError):
raise response
def _get_message_from_request_data(token, data, apns, android):
"""
Create Message object from JSON payload from OSS instance.
"""
return Message(
token=token,
data=data,
apns=_deserialize_apns(apns),
android=AndroidConfig(**android) if android else None,
)
def _deserialize_apns(apns):
"""
Create APNSConfig object from JSON payload from OSS instance.
"""
aps = apns.get("payload", {}).get("aps", {})
if not aps:
return None
thread_id = aps.get("thread-id")
badge = aps.get("badge")
alert = aps.get("alert")
if isinstance(alert, dict):
alert = ApsAlert(**alert)
sound = aps.get("sound")
if isinstance(sound, dict):
sound = CriticalSound(**sound)
# remove all keys from "aps" so it can be used for custom_data
for key in ["thread-id", "badge", "alert", "sound"]:
aps.pop(key, None)
return APNSConfig(
payload=APNSPayload(
aps=Aps(
thread_id=thread_id,
badge=badge,
alert=alert,
sound=sound,
custom_data=aps,
)
),
headers=apns.get("headers"),
)
|
utils | qtlog | # SPDX-FileCopyrightText: Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# SPDX-License-Identifier: GPL-3.0-or-later
"""Loggers and utilities related to Qt logging."""
import argparse
import contextlib
import faulthandler
import logging
import sys
import traceback
from typing import Callable, Iterator, Optional, cast
from qutebrowser.qt import core as qtcore
from qutebrowser.qt import machinery
from qutebrowser.utils import log
_args = None
def init(args: argparse.Namespace) -> None:
"""Install Qt message handler based on the argparse namespace passed."""
global _args
_args = args
qtcore.qInstallMessageHandler(qt_message_handler)
@qtcore.pyqtSlot()
def shutdown_log() -> None:
qtcore.qInstallMessageHandler(None)
@contextlib.contextmanager
def disable_qt_msghandler() -> Iterator[None]:
"""Contextmanager which temporarily disables the Qt message handler."""
old_handler = qtcore.qInstallMessageHandler(None)
if machinery.IS_QT6:
# cast str to Optional[str] to be compatible with PyQt6 type hints for
# qInstallMessageHandler
old_handler = cast(
Optional[
Callable[
[qtcore.QtMsgType, qtcore.QMessageLogContext, Optional[str]], None
]
],
old_handler,
)
try:
yield
finally:
qtcore.qInstallMessageHandler(old_handler)
def qt_message_handler(
msg_type: qtcore.QtMsgType, context: qtcore.QMessageLogContext, msg: Optional[str]
) -> None:
"""Qt message handler to redirect qWarning etc. to the logging system.
Args:
msg_type: The level of the message.
context: The source code location of the message.
msg: The message text.
"""
# Mapping from Qt logging levels to the matching logging module levels.
# Note we map critical to ERROR as it's actually "just" an error, and fatal
# to critical.
qt_to_logging = {
qtcore.QtMsgType.QtDebugMsg: logging.DEBUG,
qtcore.QtMsgType.QtWarningMsg: logging.WARNING,
qtcore.QtMsgType.QtCriticalMsg: logging.ERROR,
qtcore.QtMsgType.QtFatalMsg: logging.CRITICAL,
qtcore.QtMsgType.QtInfoMsg: logging.INFO,
}
# Change levels of some well-known messages to debug so they don't get
# shown to the user.
#
# If a message starts with any text in suppressed_msgs, it's not logged as
# error.
suppressed_msgs = [
# PNGs in Qt with broken color profile
# https://bugreports.qt.io/browse/QTBUG-39788
(
"libpng warning: iCCP: Not recognizing known sRGB profile that has "
"been edited"
),
"libpng warning: iCCP: known incorrect sRGB profile",
# Hopefully harmless warning
"OpenType support missing for script ",
# Error if a QNetworkReply gets two different errors set. Harmless Qt
# bug on some pages.
# https://bugreports.qt.io/browse/QTBUG-30298
(
"QNetworkReplyImplPrivate::error: Internal problem, this method must "
"only be called once."
),
# Sometimes indicates missing text, but most of the time harmless
"load glyph failed ",
# Harmless, see https://bugreports.qt.io/browse/QTBUG-42479
(
"content-type missing in HTTP POST, defaulting to "
"application/x-www-form-urlencoded. "
"Use QNetworkRequest::setHeader() to fix this problem."
),
# https://bugreports.qt.io/browse/QTBUG-43118
"Using blocking call!",
# Hopefully harmless
(
'"Method "GetAll" with signature "s" on interface '
'"org.freedesktop.DBus.Properties" doesn\'t exist'
),
(
'"Method \\"GetAll\\" with signature \\"s\\" on interface '
'\\"org.freedesktop.DBus.Properties\\" doesn\'t exist\\n"'
),
"WOFF support requires QtWebKit to be built with zlib support.",
# Weird Enlightment/GTK X extensions
'QXcbWindow: Unhandled client message: "_E_',
'QXcbWindow: Unhandled client message: "_ECORE_',
'QXcbWindow: Unhandled client message: "_GTK_',
# Happens on AppVeyor CI
"SetProcessDpiAwareness failed:",
# https://bugreports.qt.io/browse/QTBUG-49174
(
"QObject::connect: Cannot connect (null)::stateChanged("
"QNetworkSession::State) to "
"QNetworkReplyHttpImpl::_q_networkSessionStateChanged("
"QNetworkSession::State)"
),
# https://bugreports.qt.io/browse/QTBUG-53989
(
"Image of format '' blocked because it is not considered safe. If "
"you are sure it is safe to do so, you can white-list the format by "
"setting the environment variable QTWEBKIT_IMAGEFORMAT_WHITELIST="
),
# Installing Qt from the installer may cause it looking for SSL3 or
# OpenSSL 1.0 which may not be available on the system
"QSslSocket: cannot resolve ",
"QSslSocket: cannot call unresolved function ",
# When enabling debugging with QtWebEngine
(
"Remote debugging server started successfully. Try pointing a "
"Chromium-based browser to "
),
# https://github.com/qutebrowser/qutebrowser/issues/1287
"QXcbClipboard: SelectionRequest too old",
# https://github.com/qutebrowser/qutebrowser/issues/2071
'QXcbWindow: Unhandled client message: ""',
# https://codereview.qt-project.org/176831
"QObject::disconnect: Unexpected null parameter",
# https://bugreports.qt.io/browse/QTBUG-76391
"Attribute Qt::AA_ShareOpenGLContexts must be set before "
"QCoreApplication is created.",
# Qt 6.4 beta 1: https://bugreports.qt.io/browse/QTBUG-104741
"GL format 0 is not supported",
]
# not using utils.is_mac here, because we can't be sure we can successfully
# import the utils module here.
if sys.platform == "darwin":
suppressed_msgs += [
# https://bugreports.qt.io/browse/QTBUG-47154
(
"virtual void QSslSocketBackendPrivate::transmit() SSLRead "
"failed with: -9805"
),
]
if not msg:
msg = "Logged empty message!"
if any(msg.strip().startswith(pattern) for pattern in suppressed_msgs):
level = logging.DEBUG
elif context.category == "qt.webenginecontext" and (
msg.strip().startswith("GL Type: ") # Qt 6.3
or msg.strip().startswith("GLImplementation:") # Qt 6.2
):
level = logging.DEBUG
else:
level = qt_to_logging[msg_type]
if context.line is None:
lineno = -1 # type: ignore[unreachable]
else:
lineno = context.line
if context.function is None:
func = "none" # type: ignore[unreachable]
elif ":" in context.function:
func = '"{}"'.format(context.function)
else:
func = context.function
if context.category is None or context.category == "default":
name = "qt"
else:
name = "qt-" + context.category
if msg.splitlines()[0] == (
"This application failed to start because it "
"could not find or load the Qt platform plugin "
'"xcb".'
):
# Handle this message specially.
msg += (
"\n\nOn Archlinux, this should fix the problem:\n"
" pacman -S libxkbcommon-x11"
)
faulthandler.disable()
assert _args is not None
if _args.debug:
stack: Optional[str] = "".join(traceback.format_stack())
else:
stack = None
record = log.qt.makeRecord(
name=name,
level=level,
fn=context.file,
lno=lineno,
msg=msg,
args=(),
exc_info=None,
func=func,
sinfo=stack,
)
log.qt.handle(record)
class QtWarningFilter(logging.Filter):
"""Filter to filter Qt warnings.
Attributes:
_pattern: The start of the message.
"""
def __init__(self, pattern: str) -> None:
super().__init__()
self._pattern = pattern
def filter(self, record: logging.LogRecord) -> bool:
"""Determine if the specified record is to be logged."""
do_log = not record.msg.strip().startswith(self._pattern)
return do_log
@contextlib.contextmanager
def hide_qt_warning(pattern: str, logger: str = "qt") -> Iterator[None]:
"""Hide Qt warnings matching the given regex."""
log_filter = QtWarningFilter(pattern)
logger_obj = logging.getLogger(logger)
logger_obj.addFilter(log_filter)
try:
yield
finally:
logger_obj.removeFilter(log_filter)
|
photos | views | from pathlib import Path
from django.conf import settings
from django.http import (
HttpResponse,
HttpResponseNotFound,
HttpResponseRedirect,
JsonResponse,
)
from django.shortcuts import get_object_or_404
from photonix.photos.models import Library
from photonix.photos.utils.thumbnails import get_thumbnail
def thumbnailer(request, type, id, width, height, crop, quality):
width = int(width)
height = int(height)
quality = int(quality)
thumbnail_size_index = None
force_accurate = False
for i, thumbnail_size in enumerate(settings.THUMBNAIL_SIZES):
if (
width == thumbnail_size[0]
and height == thumbnail_size[1]
and crop == thumbnail_size[2]
and quality == thumbnail_size[3]
):
thumbnail_size_index = i
force_accurate = thumbnail_size[5]
break
if thumbnail_size_index is None:
return HttpResponseNotFound("No photo thumbnail with these parameters")
photo_id = None
photo_file_id = None
if type == "photo":
photo_id = id
elif type == "photofile":
photo_file_id = id
path = get_thumbnail(
photo_file=photo_file_id,
photo=photo_id,
width=width,
height=height,
crop=crop,
quality=quality,
return_type="url",
force_accurate=force_accurate,
)
return HttpResponseRedirect(path)
def upload(request):
if "library_id" not in request.GET:
return JsonResponse(
{"ok": False, "message": "library_id must be supplied as GET parameter"},
status=400,
)
user = request.user
lib = get_object_or_404(Library, id=request.GET["library_id"], user=user)
libpath = lib.paths.all()[0]
for fn, file in request.FILES.items():
dest = Path(libpath.path) / fn
with open(dest, "wb+") as destination:
print(f"Writing to {dest}")
for chunk in file.chunks():
destination.write(chunk)
return JsonResponse({"ok": True})
def dummy_thumbnail_response(request, path):
# Only used during testing to return thumbnail images. Everywhere else, Nginx handles these requests.
path = str(Path(settings.THUMBNAIL_ROOT) / path)
with open(path, "rb") as f:
return HttpResponse(f.read(), content_type="image/jpeg")
|
builtinMarketBrowser | pfSearchBox | # noinspection PyPackageRequirements
import gui.utils.color as colorUtils
import gui.utils.draw as drawUtils
import wx
from gui.utils.helpers_wxPython import HandleCtrlBackspace
SearchButton, EVT_SEARCH_BTN = wx.lib.newevent.NewEvent()
CancelButton, EVT_CANCEL_BTN = wx.lib.newevent.NewEvent()
TextEnter, EVT_TEXT_ENTER = wx.lib.newevent.NewEvent()
TextTyped, EVT_TEXT = wx.lib.newevent.NewEvent()
class PFSearchBox(wx.Window):
def __init__(
self,
parent,
id=wx.ID_ANY,
value="",
pos=wx.DefaultPosition,
size=wx.Size(-1, 24),
style=0,
):
wx.Window.__init__(self, parent, id, pos, size, style=style)
self.isSearchButtonVisible = False
self.isCancelButtonVisible = False
self.descriptiveText = "Search"
self.searchBitmap = None
self.cancelBitmap = None
self.bkBitmap = None
self.resized = True
self.searchButtonX = 0
self.searchButtonY = 0
self.searchButtonPressed = False
self.cancelButtonX = 0
self.cancelButtonY = 0
self.cancelButtonPressed = False
self.editX = 0
self.editY = 0
self.padding = 4
self._hl = False
w, h = size
self.EditBox = wx.TextCtrl(
self,
wx.ID_ANY,
"",
wx.DefaultPosition,
(-1, h - 2 if "wxGTK" in wx.PlatformInfo else -1),
wx.TE_PROCESS_ENTER | (wx.BORDER_NONE if "wxGTK" in wx.PlatformInfo else 0),
)
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBk)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)
self.Bind(wx.EVT_LEFT_UP, self.OnLeftUp)
# self.EditBox.ChangeValue(self.descriptiveText)
self.EditBox.Bind(wx.EVT_SET_FOCUS, self.OnEditSetFocus)
self.EditBox.Bind(wx.EVT_KILL_FOCUS, self.OnEditKillFocus)
self.EditBox.Bind(wx.EVT_KEY_DOWN, self.OnKeyPress)
self.EditBox.Bind(wx.EVT_TEXT, self.OnText)
self.EditBox.Bind(wx.EVT_TEXT_ENTER, self.OnTextEnter)
self.SetBackgroundStyle(wx.BG_STYLE_PAINT)
self.SetMinSize(size)
def OnText(self, event):
wx.PostEvent(self, TextTyped())
event.Skip()
def OnTextEnter(self, event):
wx.PostEvent(self, TextEnter())
event.Skip()
@staticmethod
def OnEditSetFocus(event):
# value = self.EditBox.GetValue()
# if value == self.descriptiveText:
# self.EditBox.ChangeValue("")
event.Skip()
def OnEditKillFocus(self, event):
if self.EditBox.GetValue() == "":
self.Clear()
event.Skip()
def OnKeyPress(self, event):
if event.RawControlDown() and event.GetKeyCode() == wx.WXK_BACK:
HandleCtrlBackspace(self.EditBox)
else:
event.Skip()
def Clear(self):
self.EditBox.Clear()
# self.EditBox.ChangeValue(self.descriptiveText)
def Focus(self):
self.EditBox.SetFocus()
def SetValue(self, value):
self.EditBox.SetValue(value)
def ChangeValue(self, value):
self.EditBox.ChangeValue(value)
def GetValue(self):
return self.EditBox.GetValue()
def GetLineText(self, lineno):
return self.EditBox.GetLineText(lineno)
def HitTest(self, target, position, area):
x, y = target
px, py = position
aX, aY = area
if (x < px < x + aX) and (y < py < y + aY):
return True
return False
def GetButtonsPos(self):
btnpos = [
(self.searchButtonX, self.searchButtonY),
(self.cancelButtonX, self.cancelButtonY),
]
return btnpos
def GetButtonsSize(self):
btnsize = []
if self.searchBitmap:
sw = self.searchBitmap.GetWidth()
sh = self.searchBitmap.GetHeight()
else:
sw = 0
sh = 0
if self.cancelBitmap:
cw = self.cancelBitmap.GetWidth()
ch = self.cancelBitmap.GetHeight()
else:
cw = 0
ch = 0
btnsize.append((sw, sh))
btnsize.append((cw, ch))
return btnsize
def OnLeftDown(self, event):
btnpos = self.GetButtonsPos()
btnsize = self.GetButtonsSize()
self.CaptureMouse()
for btn in range(2):
if self.HitTest(btnpos[btn], event.GetPosition(), btnsize[btn]):
if btn == 0:
if not self.searchButtonPressed:
self.searchButtonPressed = True
self.Refresh()
if btn == 1:
if not self.cancelButtonPressed:
self.cancelButtonPressed = True
self.Refresh()
def OnLeftUp(self, event):
btnpos = self.GetButtonsPos()
btnsize = self.GetButtonsSize()
if self.HasCapture():
self.ReleaseMouse()
for btn in range(2):
if self.HitTest(btnpos[btn], event.GetPosition(), btnsize[btn]):
if btn == 0:
if self.searchButtonPressed:
self.searchButtonPressed = False
self.Refresh()
self.SetFocus()
wx.PostEvent(self, SearchButton())
if btn == 1:
if self.cancelButtonPressed:
self.cancelButtonPressed = False
self.Refresh()
self.SetFocus()
wx.PostEvent(self, CancelButton())
else:
if btn == 0:
if self.searchButtonPressed:
self.searchButtonPressed = False
self.Refresh()
if btn == 1:
if self.cancelButtonPressed:
self.cancelButtonPressed = False
self.Refresh()
def OnSize(self, event):
self.resized = True
self.Refresh()
def OnEraseBk(self, event):
pass
def UpdateElementsPos(self, dc):
rect = self.GetRect()
if self.searchBitmap and self.isSearchButtonVisible:
sw = self.searchBitmap.GetWidth()
sh = self.searchBitmap.GetHeight()
else:
sw = 0
sh = 0
if self.cancelBitmap and self.isCancelButtonVisible:
cw = self.cancelBitmap.GetWidth()
ch = self.cancelBitmap.GetHeight()
else:
cw = 0
ch = 0
cwidth = rect.width
cheight = rect.height
self.searchButtonX = self.padding
self.searchButtonY = (cheight - sh) / 2
self.cancelButtonX = cwidth - self.padding - cw
self.cancelButtonY = (cheight - ch) / 2
self.editX = self.searchButtonX + self.padding + sw
editWidth, editHeight = self.EditBox.GetSize()
self.editY = (cheight - editHeight) / 2
self.EditBox.SetPosition((self.editX, self.editY))
self.EditBox.SetSize((self.cancelButtonX - self.padding - self.editX, -1))
def OnPaint(self, event):
dc = wx.AutoBufferedPaintDC(self)
bkColor = wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOW)
sepColor = colorUtils.GetSuitable(bkColor, 0.2)
rect = self.GetRect()
if self.resized:
self.bkBitmap = drawUtils.RenderGradientBar(
bkColor, rect.width, rect.height, 0.1, 0.1, 0.2, 2
)
self.UpdateElementsPos(dc)
self.resized = False
dc.DrawBitmap(self.bkBitmap, 0, 0)
if self.isSearchButtonVisible:
if self.searchBitmap:
if self.searchButtonPressed:
spad = 1
else:
spad = 0
dc.DrawBitmap(
self.searchBitmapShadow,
self.searchButtonX + 1,
self.searchButtonY + 1,
)
dc.DrawBitmap(
self.searchBitmap,
self.searchButtonX + spad,
self.searchButtonY + spad,
)
if self.isCancelButtonVisible:
if self.cancelBitmap:
if self.cancelButtonPressed:
cpad = 1
else:
cpad = 0
dc.DrawBitmap(
self.cancelBitmapShadow,
self.cancelButtonX + 1,
self.cancelButtonY + 1,
)
dc.DrawBitmap(
self.cancelBitmap,
self.cancelButtonX + cpad,
self.cancelButtonY + cpad,
)
dc.SetPen(wx.Pen(sepColor, 1))
dc.DrawLine(0, rect.height - 1, rect.width, rect.height - 1)
def SetSearchBitmap(self, bitmap):
self.searchBitmap = bitmap
self.searchBitmapShadow = drawUtils.CreateDropShadowBitmap(bitmap, 0.2)
def SetCancelBitmap(self, bitmap):
self.cancelBitmap = bitmap
self.cancelBitmapShadow = drawUtils.CreateDropShadowBitmap(bitmap, 0.2)
def IsSearchButtonVisible(self):
return self.isSearchButtonVisible
def IsCancelButtonVisible(self):
return self.isCancelButtonVisible
def ShowSearchButton(self, show=True):
self.isSearchButtonVisible = show
def ShowCancelButton(self, show=True):
self.isCancelButtonVisible = show
def SetDescriptiveText(self, text):
self.descriptiveText = text
def GetDescriptiveText(self):
return self.descriptiveText
|
docs | ext_argparse | """
Convert an argparse parser to option directives.
Inspired by sphinxcontrib.autoprogram but with a few differences:
- Contains some simple pre-processing on the help messages to make
the Sphinx version a bit prettier.
"""
import argparse
import re
from textwrap import dedent
from docutils import nodes
from docutils.parsers.rst import Directive
from docutils.parsers.rst.directives import unchanged
from docutils.statemachine import ViewList
from sphinx.util.nodes import nested_parse_with_titles
_block_re = re.compile(r":\n{2}\s{2}")
_default_re = re.compile(r"Default is (.+)\.\n")
_note_re = re.compile(r"Note: (.*?)(?:\n\n|\n*$)", re.DOTALL)
_option_line_re = re.compile(r"^(?!\s{2,}%\(prog\)s|\s{2,}--\w[\w-]*\w\b|Example: )(.+)$", re.MULTILINE)
_option_re = re.compile(r"(?:^|(?<=\s))(?P<arg>--\w[\w-]*\w)(?P<val>=\w+)?\b")
_prog_re = re.compile(r"%\(prog\)s")
_percent_re = re.compile(r"%%")
_inline_code_block_re = re.compile(r"(?<!`)`([^`]+?)`")
_example_inline_code_block_re = re.compile(r"(?<=^Example: )(.+)$", re.MULTILINE)
def get_parser(module_name, attr):
module = __import__(module_name, globals(), locals(), [attr])
parser = getattr(module, attr)
return parser if not callable(parser) else parser()
def indent(value, length=4):
space = " " * length
return "\n".join(space + line for line in value.splitlines())
class ArgparseDirective(Directive):
has_content = True
option_spec = {
"module": unchanged,
"attr": unchanged,
}
_headlines = ["^", "~"]
def process_help(self, helptext):
# Dedent the help to make sure we are always dealing with
# non-indented text.
helptext = dedent(helptext)
helptext = _inline_code_block_re.sub(
lambda m: (":code:`{0}`".format(m.group(1).replace("\\", "\\\\"))),
helptext,
)
helptext = _example_inline_code_block_re.sub(r":code:`\1`", helptext)
# Replace option references with links.
# Do this before indenting blocks and notes.
helptext = _option_line_re.sub(
lambda m: _option_re.sub(
lambda m2: f":option:`{m2['arg']}{m2['val'] or ''}`" if m2["arg"] in self._available_options else m2[0],
m[1],
),
helptext,
)
# Create simple blocks.
helptext = _block_re.sub("::\n\n ", helptext)
# Boldify the default value.
helptext = _default_re.sub(r"Default is: **\1**.\n", helptext)
# Create note directives from "Note: " paragraphs.
helptext = _note_re.sub(
lambda m: ".. note::\n\n" + indent(m.group(1)) + "\n\n",
helptext,
)
# workaround to replace %(prog)s with streamlink
helptext = _prog_re.sub("streamlink", helptext)
# fix escaped chars for percent-formatted argparse help strings
helptext = _percent_re.sub("%", helptext)
# create cross-links for the "Metadata variables" and "Plugins" sections
helptext = re.sub(
r"the \"Metadata variables\" section",
'the ":ref:`Metadata variables <cli/metadata:Variables>`" section',
helptext,
)
helptext = re.sub(
r"the \"Plugins\" section",
'the ":ref:`Plugins <plugins:Plugins>`" section',
helptext,
)
return indent(helptext)
def generate_group_rst(self, group):
for action in group._group_actions:
# don't document suppressed parameters
if action.help == argparse.SUPPRESS:
continue
metavar = action.metavar
if isinstance(metavar, tuple):
metavar = " ".join(metavar)
options = []
# parameter(s) with metavar
if action.option_strings and metavar:
for arg in action.option_strings:
# optional parameter value
if action.nargs == "?":
metavar = f"[{metavar}]"
options.append(f"{arg} {metavar}")
# positional parameter
elif metavar:
options.append(metavar)
# parameter(s) without metavar
else:
options += action.option_strings
directive = ".. option:: "
options = f"\n{' ' * len(directive)}".join(options)
yield f"{directive}{options}"
yield ""
for line in self.process_help(action.help).split("\n"):
yield line
yield ""
def generate_parser_rst(self, parser, parent=None, depth=0):
if depth >= len(self._headlines):
return
for group in parser.NESTED_ARGUMENT_GROUPS[parent]:
is_parent = group in parser.NESTED_ARGUMENT_GROUPS
# Exclude empty groups
if not group._group_actions and not is_parent:
continue
title = group.title
yield ""
yield title
yield self._headlines[depth] * len(title)
yield from self.generate_group_rst(group)
if is_parent:
yield ""
yield from self.generate_parser_rst(parser, group, depth + 1)
def run(self):
module = self.options.get("module")
attr = self.options.get("attr")
parser = get_parser(module, attr)
self._available_options = []
for action in parser._actions:
# positional parameters have an empty option_strings list
self._available_options += action.option_strings or [action.dest]
node = nodes.section()
node.document = self.state.document
result = ViewList()
for line in self.generate_parser_rst(parser):
result.append(line, "argparse")
nested_parse_with_titles(self.state, result, node)
return node.children
def setup(app):
app.add_directive("argparse", ArgparseDirective)
|
core | Player | #####################################################################
# -*- coding: utf-8 -*- #
# #
# Frets on Fire #
# Copyright (C) 2006 Sami Kyöstilä #
# 2008 Alarian #
# 2008 myfingershurt #
# 2008 Glorandwarf #
# 2008 QQStarS #
# #
# This program is free software; you can redistribute it and/or #
# modify it under the terms of the GNU General Public License #
# as published by the Free Software Foundation; either version 2 #
# of the License, or (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, write to the Free Software #
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, #
# MA 02110-1301, USA. #
#####################################################################
import logging
from functools import total_ordering
import pygame
from fofix.core import VFS, Config, Microphone
from fofix.core.Language import _
from fofix.game import song
log = logging.getLogger(__name__)
@total_ordering
class ConfigOption:
"""Deprecated: also in fofix.core.ConfigDefs"""
def __init__(self, id, text):
self.id = id
self.text = text
def __str__(self):
return self.text
def __repr__(self):
return self.text
def __eq__(self, other):
return self.id == other.id
def __lt__(self, other):
return self.id < other.id
def sortOptionsByKey(opts):
"""Deprecated: also in fofix.core.ConfigDefs"""
a = {}
for k, v in opts.items():
a[k] = ConfigOption(k, v)
return a
# Redoing this, sir. Redoing this...
CONTROL1 = [1 << n for n in range(20)]
CONTROL2 = [1 << n for n in range(20, 40)]
CONTROL3 = [1 << n for n in range(40, 60)]
CONTROL4 = [1 << n for n in range(60, 80)]
CONTROLS = [CONTROL1, CONTROL2, CONTROL3, CONTROL4]
LEFT = 0
RIGHT = 1
UP = 2
DOWN = 3
START = 4
CANCEL = 5
KEY1 = 6
KEY1A = 7
KEY2 = 8
KEY2A = 9
KEY3 = 10
KEY3A = 11
KEY4 = 12
KEY4A = 13
KEY5 = 14
KEY5A = 15
ACTION1 = 16
ACTION2 = 17
STAR = 18
KILL = 19
# note that the drum controls map to guitar controls. Controller type is important!
DRUM1 = 8
DRUM1A = 9
DRUM2 = 10
DRUM2A = 11
DRUM3 = 12
DRUM3A = 13
DRUM4 = 14
DRUM4A = 15
DRUM5 = 6
DRUM5A = 7
DRUMBASS = 16
DRUMBASSA = 17
GUITARTYPES = [-1, 0, 1, 4]
DRUMTYPES = [-1, 2, 3]
MICTYPES = [5]
lefts = [CONTROL1[LEFT], CONTROL2[LEFT], CONTROL3[LEFT], CONTROL4[LEFT]]
rights = [CONTROL1[RIGHT], CONTROL2[RIGHT], CONTROL3[RIGHT], CONTROL4[RIGHT]]
ups = [CONTROL1[UP], CONTROL2[UP], CONTROL3[UP], CONTROL4[UP]]
downs = [CONTROL1[DOWN], CONTROL2[DOWN], CONTROL3[DOWN], CONTROL4[DOWN]]
starts = [CONTROL1[START], CONTROL2[START], CONTROL3[START], CONTROL4[START]]
cancels = [CONTROL1[CANCEL], CONTROL2[CANCEL], CONTROL3[CANCEL], CONTROL4[CANCEL]]
stars = [
CONTROL1[STAR],
CONTROL2[STAR],
CONTROL3[STAR],
CONTROL4[STAR],
] # XXX: don't know if this is needed but...
key1s = []
key2s = []
key3s = []
key4s = []
key5s = []
keysolos = []
action1s = []
action2s = []
kills = [] # XXX: don't know if this is needed but...
drum1s = []
drum2s = []
drum3s = []
drum4s = []
drum5s = []
bassdrums = []
guitarkeynames = [
"Left",
"Right",
"Up",
"Down",
"Cancel",
"Select",
"Fret 1",
"Solo Fret 1",
"Fret 2",
"Solo Fret 2",
"Fret 3",
"Solo Fret 3",
"Fret 4",
"Solo Fret 4",
"Fret 5",
"Solo Fret 5",
"Action 1",
"Action 2",
"Star Key",
"Effects Key",
]
drumkey4names = [
"Left",
"Right",
"Up",
"Down",
"Cancel",
"Select",
"Drum 4",
"Drum 4 B",
"Drum 1",
"Drum 1 B",
"Drum 2",
"Drum 2 B",
"Drum 3",
"Drum 3 B",
"None",
"None",
"Bass Drum",
"Bass Drum B",
"Star Key",
"None",
]
drumkey5names = [
"Left",
"Right",
"Up",
"Down",
"Cancel",
"Select",
"Drum 5",
"Drum 5 B",
"Drum 1",
"Drum 1 B",
"Cymbal 2",
"Cymbal 2 B",
"Drum 3",
"Drum 3 B",
"Cymbal 4",
"Cymbal 4 B",
"Bass Drum",
"Bass Drum B",
"Star Key",
"None",
]
menuUp = []
menuDown = []
menuPrev = []
menuNext = []
menuYes = []
menuNo = []
CONTROLLER1KEYS = [
CONTROL1[KEY1],
CONTROL1[KEY2],
CONTROL1[KEY3],
CONTROL1[KEY4],
CONTROL1[KEY5],
CONTROL1[KEY1A],
CONTROL1[KEY2A],
CONTROL1[KEY3A],
CONTROL1[KEY4A],
CONTROL1[KEY5A],
]
CONTROLLER2KEYS = [
CONTROL2[KEY1],
CONTROL2[KEY2],
CONTROL2[KEY3],
CONTROL2[KEY4],
CONTROL2[KEY5],
CONTROL2[KEY1A],
CONTROL2[KEY2A],
CONTROL2[KEY3A],
CONTROL2[KEY4A],
CONTROL2[KEY5A],
]
CONTROLLER3KEYS = [
CONTROL3[KEY1],
CONTROL3[KEY2],
CONTROL3[KEY3],
CONTROL3[KEY4],
CONTROL3[KEY5],
CONTROL3[KEY1A],
CONTROL3[KEY2A],
CONTROL3[KEY3A],
CONTROL3[KEY4A],
CONTROL3[KEY5A],
]
CONTROLLER4KEYS = [
CONTROL4[KEY1],
CONTROL4[KEY2],
CONTROL4[KEY3],
CONTROL4[KEY4],
CONTROL4[KEY5],
CONTROL4[KEY1A],
CONTROL4[KEY2A],
CONTROL4[KEY3A],
CONTROL4[KEY4A],
CONTROL4[KEY5A],
]
CONTROLLER1ACTIONS = [CONTROL1[ACTION1], CONTROL1[ACTION2]]
CONTROLLER2ACTIONS = [CONTROL2[ACTION1], CONTROL2[ACTION2]]
CONTROLLER3ACTIONS = [CONTROL3[ACTION1], CONTROL3[ACTION2]]
CONTROLLER4ACTIONS = [CONTROL4[ACTION1], CONTROL4[ACTION2]]
CONTROLLER1DRUMS = [
CONTROL1[DRUM1],
CONTROL1[DRUM1A],
CONTROL1[DRUM2],
CONTROL1[DRUM2A],
CONTROL1[DRUM3],
CONTROL1[DRUM3A],
CONTROL1[DRUM4],
CONTROL1[DRUM4A],
CONTROL1[DRUM5],
CONTROL1[DRUM5A],
CONTROL1[DRUMBASS],
CONTROL1[DRUMBASSA],
]
CONTROLLER2DRUMS = [
CONTROL2[DRUM1],
CONTROL2[DRUM1A],
CONTROL2[DRUM2],
CONTROL2[DRUM2A],
CONTROL2[DRUM3],
CONTROL2[DRUM3A],
CONTROL2[DRUM4],
CONTROL2[DRUM4A],
CONTROL2[DRUM5],
CONTROL2[DRUM5A],
CONTROL2[DRUMBASS],
CONTROL2[DRUMBASSA],
]
CONTROLLER3DRUMS = [
CONTROL3[DRUM1],
CONTROL3[DRUM1A],
CONTROL3[DRUM2],
CONTROL3[DRUM2A],
CONTROL3[DRUM3],
CONTROL3[DRUM3A],
CONTROL3[DRUM4],
CONTROL3[DRUM4A],
CONTROL3[DRUM5],
CONTROL3[DRUM5A],
CONTROL3[DRUMBASS],
CONTROL3[DRUMBASSA],
]
CONTROLLER4DRUMS = [
CONTROL4[DRUM1],
CONTROL4[DRUM1A],
CONTROL4[DRUM2],
CONTROL4[DRUM2A],
CONTROL4[DRUM3],
CONTROL4[DRUM3A],
CONTROL4[DRUM4],
CONTROL4[DRUM4A],
CONTROL4[DRUM5],
CONTROL4[DRUM5A],
CONTROL4[DRUMBASS],
CONTROL4[DRUMBASSA],
]
SCORE_MULTIPLIER = [0, 10, 20, 30]
BASS_GROOVE_SCORE_MULTIPLIER = [0, 10, 20, 30, 40, 50]
player0 = []
player1 = []
player2 = []
player3 = []
playerkeys = []
# define configuration keys
Config.define("controller", "name", str, tipText=_("Name your controller."))
Config.define("controller", "key_left", str, "K_LEFT", text=_("Move left"))
Config.define("controller", "key_right", str, "K_RIGHT", text=_("Move right"))
Config.define("controller", "key_up", str, "K_UP", text=_("Move up"))
Config.define("controller", "key_down", str, "K_DOWN", text=_("Move down"))
Config.define(
"controller", "key_action1", str, "K_RETURN", text=(_("Pick"), _("Bass Drum"))
)
Config.define(
"controller",
"key_action2",
str,
"K_RSHIFT",
text=(_("Secondary Pick"), _("Bass Drum 2")),
)
Config.define(
"controller", "key_1", str, "K_F1", text=(_("Fret #1"), _("Drum #4"), _("Drum #5"))
)
Config.define("controller", "key_2", str, "K_F2", text=(_("Fret #2"), _("Drum #1")))
Config.define(
"controller",
"key_3",
str,
"K_F3",
text=(_("Fret #3"), _("Drum #2"), _("Cymbal #2")),
)
Config.define("controller", "key_4", str, "K_F4", text=(_("Fret #4"), _("Drum #3")))
Config.define(
"controller", "key_5", str, "K_F5", text=(_("Fret #5"), None, _("Cymbal #4"))
)
Config.define(
"controller",
"key_1a",
str,
"K_F6",
text=(
_("Solo Fret #1"),
_("Solo Key"),
_("Drum #4"),
_("Drum #5"),
_("Analog Slider"),
),
)
Config.define(
"controller", "key_2a", str, "K_F7", text=(_("Solo Fret #2"), _("Drum #1"))
)
Config.define(
"controller",
"key_3a",
str,
"K_F8",
text=(_("Solo Fret #3"), _("Drum #2"), _("Cymbal #2")),
)
Config.define(
"controller", "key_4a", str, "K_F9", text=(_("Solo Fret #4"), _("Drum #3"))
)
Config.define(
"controller", "key_5a", str, "K_F10", text=(_("Solo Fret #5"), None, _("Cymbal #4"))
)
Config.define("controller", "key_cancel", str, "K_ESCAPE", text=_("Cancel"))
Config.define("controller", "key_star", str, "K_PAGEDOWN", text=_("StarPower"))
Config.define("controller", "key_kill", str, "K_PAGEUP", text=_("Whammy"))
Config.define("controller", "key_start", str, "K_LCTRL", text=_("Start"))
Config.define(
"controller",
"two_chord_max",
int,
0,
text=_("Two-Chord Max"),
options={0: _("Off"), 1: _("On")},
tipText=_("When enabled, the highest notes in large note chords are auto-played."),
)
Config.define(
"controller",
"type",
int,
0,
text=_("Controller Type"),
options=sortOptionsByKey(
{
0: _("Standard Guitar"),
1: _("Solo Shift Guitar"),
2: _("Drum Set (4-Drum)"),
4: _("Analog Slide Guitar"),
5: _("Microphone"),
}
),
tipText=_(
"'Standard Guitar' is for keyboards and pre-WT GH-series guitars. 'Solo Shift Guitar' is for RB-series guitars and keyboards who want to use a shift key for solo frets. 'Analog Slide Guitar' is for guitars with an analog slider bar."
),
) # , 3: _("Drum Set (3-Drum 2-Cymbal)")
Config.define(
"controller",
"analog_sp",
int,
0,
text=_("Analog SP"),
options={0: _("Disabled"), 1: _("Enabled")},
tipText=_("Enables analog SP (as in the XBOX Xplorer controller.)"),
)
Config.define(
"controller",
"analog_sp_threshold",
int,
60,
text=_("Analog SP Threshold"),
options=dict([(n, n) for n in range(10, 101, 10)]),
tipText=_("Sets a threshold level for activating SP in analog mode."),
)
Config.define(
"controller",
"analog_sp_sensitivity",
int,
4,
text=_("Analog SP Sensitivity"),
options=dict([(n, n + 1) for n in range(10)]),
tipText=_("Sets the sensitivity for activating SP in analog mode."),
)
Config.define(
"controller",
"analog_drum",
int,
0,
text=_("Analog Drums"),
options={0: _("Disabled"), 1: _("PS2/PS3/Wii"), 2: _("XBOX"), 3: _("XBOX Inv.")},
tipText=_("Enables analog drums as in RB2 and GH drumsets."),
) # FIXME: Analog Drum
Config.define(
"controller",
"analog_slide",
int,
0,
text=_("Analog Slider"),
options={0: _("Default"), 1: _("Inverted")},
tipText=_("Experimental testing for the analog slide mode."),
)
Config.define(
"controller",
"analog_kill",
int,
0,
text=_("Analog Effects"),
options={0: _("Disabled"), 1: _("PS2/PS3/Wii"), 2: _("XBOX"), 3: _("XBOX Inv.")},
tipText=_(
"Enables analog whammy bar. Set to the system your controller was designed for."
),
)
Config.define(
"controller",
"analog_fx",
int,
0,
text=_("Sound FX Switch"),
options={0: _("Switch"), 1: _("Cycle")},
) # FIXME: Analog FX
Config.define(
"controller",
"mic_device",
int,
-1,
text=_("Microphone Device"),
options=Microphone.getAvailableMics(),
)
Config.define(
"controller",
"mic_tap_sensitivity",
int,
5,
text=_("Tap Sensitivity"),
options=dict((n, n) for n in range(1, 21)),
tipText=_("Sets how sensitive the microphone is to being tapped."),
)
Config.define(
"controller",
"mic_passthrough_volume",
float,
0.0,
text=_("Passthrough Volume"),
options=dict((n / 100.0, n) for n in range(101)),
tipText=_("Sets how loud you hear yourself singing."),
)
Config.define("player", "name", str, "")
Config.define("player", "difficulty", int, song.MED_DIF)
Config.define("player", "part", int, song.GUITAR_PART)
Config.define("player", "neck", str, "")
Config.define(
"player",
"necktype",
str,
2,
text=_("Neck Type"),
options={0: _("Default Neck"), 1: _("Theme Neck"), 2: _("Specific Neck")},
)
Config.define(
"player",
"leftymode",
int,
0,
text=_("Lefty Mode"),
options={0: _("Off"), 1: _("On")},
)
Config.define(
"player", "drumflip", int, 0, text=_("Drum Flip"), options={0: _("Off"), 1: _("On")}
)
Config.define(
"player",
"two_chord_max",
int,
0,
text=_("Two-Chord Max"),
options={0: _("Off"), 1: _("On")},
)
Config.define(
"player",
"assist_mode",
int,
0,
text=_("Assist Mode"),
options={0: _("Off"), 1: _("Easy Assist"), 2: _("Medium Assist")},
)
Config.define(
"player",
"auto_kick",
int,
0,
text=_("Auto Kick"),
options={0: _("Off"), 1: _("On")},
)
Config.define("player", "controller", int, 0)
# Overlay the users folder in /data with one in the user data folder.
VFS.mount(VFS.resolveRead("/data/users"), "users")
VFS.mountWritable(VFS.resolveWrite("/userdata/users"), "users")
controlpath = "/users/controllers"
playerpath = "/users/players"
def _makeControllerIniName(name):
"""Turn a controller name into a virtual path to the appropriate ini."""
return "%s/%s.ini" % (controlpath, name)
def _makePlayerIniName(name):
"""Turn a player name into a virtual path to the appropriate ini."""
return "%s/%s.ini" % (playerpath, name)
control0 = None
control1 = None
control2 = None
control3 = None
controlDict = {}
controllerDict = {}
playername = []
playerpref = []
playerstat = []
# Load the player database and check that it is fully initialized.
_SCHEMA_VERSION = 3
_playerDB = VFS.openSqlite3("%s/%s" % (playerpath, "FoFiX-players.cache"))
_updateTables = 0
try:
v = _playerDB.execute(
"SELECT `value` FROM `config` WHERE `key` = 'version'"
).fetchone()[0]
if int(v) != _SCHEMA_VERSION:
_updateTables = 2 # an old version. We don't want to just burn old tables.
except Exception:
_updateTables = 1 # no good table
# needs to handle old versions eventually.
if _updateTables > 0:
for tbl in _playerDB.execute(
"SELECT `name` FROM `sqlite_master` WHERE `type` = 'table'"
).fetchall():
_playerDB.execute("DROP TABLE `%s`" % tbl)
_playerDB.commit()
_playerDB.execute("VACUUM")
_playerDB.execute("CREATE TABLE `config` (`key` STRING UNIQUE, `value` STRING)")
_playerDB.execute(
"CREATE TABLE `players` (`name` STRING UNIQUE, `lefty` INT, `drumflip` INT, `autokick` INT, `assist` INT, `twochord` INT, `necktype` INT, `neck` STRING, \
`part` INT, `difficulty` INT, `upname` STRING, `control` INT, `changed` INT, `loaded` INT)"
)
_playerDB.execute(
"CREATE TABLE `stats` (`song` STRING, `hash` STRING, `player` STRING)"
)
_playerDB.execute(
"INSERT INTO `config` (`key`, `value`) VALUES (?, ?)",
("version", _SCHEMA_VERSION),
)
_playerDB.commit()
def resetStats(player=None):
if player is None:
_playerDB.execute(
"UPDATE `stats` SET `hit` = 0, `notes` = 0, `sc1` = 0, `sc2` = 0, `sc3` = 0, `sc4` = 0, `sc5` = 0, `ha1` = 0, `ha2` = 0, `ha3` = 0, `ha4` = 0, `ha5` = 0"
)
else:
_playerDB.execute(
"UPDATE `stats` SET `hit` = 0, `notes` = 0, `sc1` = 0, `sc2` = 0, `sc3` = 0, `sc4` = 0, `sc5` = 0, `ha1` = 0, `ha2` = 0, `ha3` = 0, `ha4` = 0, `ha5` = 0 WHERE `name` = ?",
[player],
)
_playerDB.commit()
def loadPlayers():
global playername, playerpref, playerstat
playername = []
playerpref = []
playerstat = []
allplayers = VFS.listdir(playerpath)
for name in allplayers:
if name == "default.ini":
continue
if name.lower().endswith(".ini") and len(name) > 4:
playername.append(name[0 : len(name) - 4])
pref = _playerDB.execute(
"SELECT * FROM `players` WHERE `name` = ?", [playername[-1]]
).fetchone()
try:
if len(pref) == 14:
playerpref.append(
(
pref[1],
pref[2],
pref[3],
pref[4],
pref[5],
pref[6],
pref[7],
pref[8],
pref[9],
pref[10],
)
)
except TypeError:
try:
c = Config.load(
VFS.resolveRead(_makePlayerIniName(name[:-4])), type=2
)
lefty = c.get("player", "leftymode")
drumf = c.get("player", "drumflip")
autok = c.get("player", "auto_kick")
assist = c.get("player", "assist_mode")
twoch = c.get("player", "two_chord_max")
neck = c.get("player", "neck")
neckt = c.get("player", "necktype")
part = c.get("player", "part")
diff = c.get("player", "difficulty")
upname = c.get("player", "name")
control = c.get("player", "controller")
del c
_playerDB.execute(
"INSERT INTO `players` VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, 0, 1)",
[
playername[-1],
lefty,
drumf,
autok,
assist,
twoch,
neckt,
neck,
part,
diff,
upname,
control,
],
)
playerpref.append(
(
lefty,
drumf,
autok,
assist,
twoch,
neckt,
neck,
part,
diff,
upname,
)
)
except IOError:
_playerDB.execute(
"INSERT INTO `players` VALUES (?, 0, 0, 0, 0, 0, 0, ``, 0, 2, ``, 0, 0, 1)",
[playername[-1]],
)
playerpref.append((0, 0, 0, 0, 0, 0, "", 0, 2, "", 0))
_playerDB.execute(
"UPDATE `players` SET `loaded` = 1 WHERE `name` = ?", [playername[-1]]
)
_playerDB.commit()
return 1
def savePlayers():
for pref in _playerDB.execute(
"SELECT * FROM `players` WHERE `changed` = 1"
).fetchall():
try:
c = Config.load(VFS.resolveWrite(_makePlayerIniName(str(pref[0]))), type=2)
c.set("player", "leftymode", int(pref[1]))
c.set("player", "drumflip", int(pref[2]))
c.set("player", "auto_kick", int(pref[3]))
c.set("player", "assist_mode", int(pref[4]))
c.set("player", "two_chord_max", int(pref[5]))
c.set("player", "necktype", int(pref[6]))
c.set("player", "neck", str(pref[7]))
c.set("player", "part", int(pref[8]))
c.set("player", "difficulty", int(pref[9]))
c.set("player", "name", str(pref[10]))
c.set("player", "controller", int(pref[11]))
del c
_playerDB.execute(
"UPDATE `players` SET `changed` = 0 WHERE `name` = ?", [pref[0]]
)
except Exception:
c = VFS.open(_makePlayerIniName(str(pref[0])), "w")
c.close()
c = Config.load(VFS.resolveWrite(_makePlayerIniName(str(pref[0]))), type=2)
c.set("player", "leftymode", int(pref[1]))
c.set("player", "drumflip", int(pref[2]))
c.set("player", "auto_kick", int(pref[3]))
c.set("player", "assist_mode", int(pref[4]))
c.set("player", "two_chord_max", int(pref[5]))
c.set("player", "necktype", int(pref[6]))
c.set("player", "neck", str(pref[7]))
c.set("player", "part", int(pref[8]))
c.set("player", "difficulty", int(pref[9]))
c.set("player", "name", str(pref[10]))
c.set("player", "controller", int(pref[11]))
del c
_playerDB.execute(
"UPDATE `players` SET `changed` = 0 WHERE `name` = ?", [pref[0]]
)
_playerDB.execute("UPDATE `players` SET `loaded` = 0")
_playerDB.commit()
def updatePlayer(player, pref):
a = _playerDB.execute(
"SELECT * FROM `players` WHERE `name` = ?", [player]
).fetchone()
try:
a = a[0]
except Exception:
a = None
if a is not None:
_playerDB.execute(
"UPDATE `players` SET `name` = ?, `lefty` = ?, `drumflip` = ?, `autokick` = ?, `assist` = ?, `twochord` = ?, `necktype` = ?, `neck` = ?, \
`part` = 0, `difficulty` = 2, `upname` = ?, `control` = 0, `changed` = 1, `loaded` = 1 WHERE `name` = ?",
pref + [player],
)
if player != pref[0]:
VFS.rename(_makePlayerIniName(player), _makePlayerIniName(pref[0]))
else:
_playerDB.execute(
"INSERT INTO `players` VALUES (?, ?, ?, ?, ?, ?, ?, ?, 0, 2, ?, 0, 1, 1)",
pref,
)
_playerDB.commit()
savePlayers()
loadPlayers()
def deletePlayer(player):
_playerDB.execute("DELETE FROM `players` WHERE `name` = ?", [player])
VFS.unlink(_makePlayerIniName(player))
if VFS.isfile("%s/%s.png" % (playerpath, player)):
VFS.unlink("%s/%s.png" % (playerpath, player))
savePlayers()
loadPlayers()
def loadControls():
global controllerDict
controllers = []
allcontrollers = VFS.listdir(controlpath)
default = ["defaultd.ini", "defaultg.ini", "defaultm.ini"]
for name in allcontrollers:
if name.lower().endswith(".ini") and len(name) > 4:
if name in default:
continue
controllers.append(name[0 : len(name) - 4])
i = len(controllers)
controllerDict = dict([(str(controllers[n]), controllers[n]) for n in range(0, i)])
controllerDict["defaultg"] = _("Default Guitar")
controllerDict["defaultd"] = _("Default Drum")
defMic = "None"
if Microphone.supported:
controllerDict["defaultm"] = _("Default Microphone")
defMic = "defaultm"
tsControl = _("Controller %d")
tsControlTip = _("Select the controller for slot %d")
i = 1
Config.define(
"game",
"control0",
str,
"defaultg",
text=tsControl % 1,
options=controllerDict,
tipText=tsControlTip % 1,
)
controllerDict[_("None")] = "None"
Config.define(
"game",
"control1",
str,
"defaultd",
text=tsControl % 2,
options=controllerDict,
tipText=tsControlTip % 2,
)
Config.define(
"game",
"control2",
str,
defMic,
text=tsControl % 3,
options=controllerDict,
tipText=tsControlTip % 3,
)
Config.define(
"game",
"control3",
str,
"None",
text=tsControl % 4,
options=controllerDict,
tipText=tsControlTip % 4,
)
def deleteControl(control):
VFS.unlink(_makeControllerIniName(control))
defaultUsed = -1
for i in range(4):
get = Config.get("game", "control%d" % i)
if get == control:
if i == 0:
Config.set("game", "control%d" % i, "defaultg")
defaultUsed = 0
else:
Config.set("game", "control%d" % i, "None")
if get == "defaultg" and defaultUsed > -1:
Config.set("game", "control%d" % i, "None")
loadControls()
def renameControl(control, newname):
VFS.rename(_makeControllerIniName(control), _makeControllerIniName(newname))
for i in range(4):
if Config.get("game", "control%d" % i) == control:
Config.set("game", "control%d" % i, newname)
loadControls()
def pluginControls(activeControls):
global playerkeys, player0, player1, player2, player3
playerkeys = [None] * 4
for player, control in enumerate(activeControls):
if control == 0:
playerkeys[player] = CONTROL1
elif control == 1:
playerkeys[player] = CONTROL2
elif control == 2:
playerkeys[player] = CONTROL3
elif control == 3:
playerkeys[player] = CONTROL4
player0 = playerkeys[0]
player1 = playerkeys[1]
player2 = playerkeys[2]
player3 = playerkeys[3]
class Controls:
def __init__(self):
log.debug("Controls class init (Player.py)...")
self.controls = []
self.controls.append(Config.get("game", "control0"))
self.controls.append(Config.get("game", "control1"))
self.controls.append(Config.get("game", "control2"))
self.controls.append(Config.get("game", "control3"))
self.config = []
self.controlList = []
self.maxplayers = 0
self.guitars = 0
self.drums = 0
self.mics = 0
self.overlap = []
self.p2Nav = Config.get("game", "p2_menu_nav")
self.drumNav = Config.get("game", "drum_navigation")
self.keyCheckerMode = Config.get("game", "key_checker_mode")
if VFS.isfile(_makeControllerIniName(self.controls[0])):
self.config.append(
Config.load(
VFS.resolveRead(_makeControllerIniName(self.controls[0])), type=1
)
)
if (
VFS.isfile(_makeControllerIniName(self.controls[1]))
and self.controls[1] != "None"
):
self.config.append(
Config.load(
VFS.resolveRead(_makeControllerIniName(self.controls[1])),
type=1,
)
)
else:
self.config.append(None)
Config.set("game", "control1", None)
self.controls[1] = "None"
if (
VFS.isfile(_makeControllerIniName(self.controls[2]))
and self.controls[2] != "None"
):
self.config.append(
Config.load(
VFS.resolveRead(_makeControllerIniName(self.controls[2])),
type=1,
)
)
else:
self.config.append(None)
Config.set("game", "control2", None)
self.controls[2] = "None"
if (
VFS.isfile(_makeControllerIniName(self.controls[3]))
and self.controls[3] != "None"
):
self.config.append(
Config.load(
VFS.resolveRead(_makeControllerIniName(self.controls[3])),
type=1,
)
)
else:
self.config.append(None)
Config.set("game", "control3", None)
self.controls[3] = "None"
else:
confM = None
if Microphone.supported:
confM = Config.load(
VFS.resolveRead(_makeControllerIniName("defaultm")), type=1
)
self.config.append(
Config.load(VFS.resolveRead(_makeControllerIniName("defaultg")), type=1)
)
self.config.append(
Config.load(VFS.resolveRead(_makeControllerIniName("defaultd")), type=1)
)
self.config.append(confM)
self.config.append(None)
Config.set("game", "control0", "defaultg")
Config.set("game", "control1", "defaultd")
self.controls = ["defaultg", "defaultd"]
if confM is not None:
Config.set("game", "control2", "defaultm")
self.controls.append("defaultm")
else:
Config.set("game", "control2", None)
self.controls.append("None")
Config.set("game", "control3", None)
self.controls.append("None")
self.type = []
self.analogKill = []
self.analogSP = []
self.analogSPThresh = []
self.analogSPSense = []
self.analogDrum = [] # FIXME: Analog Drum
self.analogSlide = []
self.analogFX = [] # FIXME: Analog FX
self.twoChord = []
self.micDevice = []
self.micTapSensitivity = []
self.micPassthroughVolume = []
self.flags = 0
for i in self.config:
if i:
type = i.get("controller", "type")
if type == 5:
self.mics += 1
elif type > 1:
self.guitars += 1
else:
self.drums += 1
self.type.append(type)
self.analogKill.append(i.get("controller", "analog_kill"))
self.analogSP.append(i.get("controller", "analog_sp"))
self.analogSPThresh.append(i.get("controller", "analog_sp_threshold"))
self.analogSPSense.append(i.get("controller", "analog_sp_sensitivity"))
self.analogDrum.append(
i.get("controller", "analog_drum")
) # FIXME: Analog Drum
self.analogSlide.append(i.get("controller", "analog_slide"))
self.analogFX.append(
i.get("controller", "analog_fx")
) # FIXME: Analog FX
self.micDevice.append(i.get("controller", "mic_device"))
self.micTapSensitivity.append(
i.get("controller", "mic_tap_sensitivity")
)
self.micPassthroughVolume.append(
i.get("controller", "mic_passthrough_volume")
)
self.twoChord.append(i.get("controller", "two_chord_max"))
self.controlList.append(i.get("controller", "name"))
else:
self.type.append(None)
self.analogKill.append(None)
self.analogSP.append(None)
self.analogFX.append(None) # FIXME: Analog FX
self.twoChord.append(None)
def keycode(name, config):
if not config:
return "None"
k = config.get("controller", name)
if k == "None":
return "None"
try:
return int(k)
except Exception:
return getattr(pygame, k)
self.controlMapping = {}
global menuUp, menuDown, menuNext, menuPrev, menuYes, menuNo
global drum1s, drum2s, drum3s, drum4s, drum5s, bassdrums
global key1s, key2s, key3s, key4s, key5s, keysolos, action1s, action2s, kills
menuUp = []
menuDown = []
menuNext = []
menuPrev = []
menuYes = []
menuNo = []
drum1s = []
drum2s = []
drum3s = []
drum4s = []
drum5s = []
bassdrums = []
key1s = []
key2s = []
key3s = []
key4s = []
key5s = []
keysolos = []
action1s = []
action2s = []
kills = []
for i, config in enumerate(self.config):
if self.type[i] is None:
# self.type[i] = -1
controlMapping = {}
continue
if self.type[i] in DRUMTYPES: # drum set
drum1s.extend([CONTROLS[i][DRUM1], CONTROLS[i][DRUM1A]])
drum2s.extend([CONTROLS[i][DRUM2], CONTROLS[i][DRUM2A]])
drum3s.extend([CONTROLS[i][DRUM3], CONTROLS[i][DRUM3A]])
drum4s.extend([CONTROLS[i][DRUM4], CONTROLS[i][DRUM4A]])
drum5s.extend([CONTROLS[i][DRUM5], CONTROLS[i][DRUM5A]])
bassdrums.extend([CONTROLS[i][DRUMBASS], CONTROLS[i][DRUMBASSA]])
if self.p2Nav == 1 or (self.p2Nav == 0 and i == 0):
if self.drumNav:
menuUp.extend([CONTROLS[i][DRUM2], CONTROLS[i][DRUM2A]])
if self.type[i] == 3:
menuDown.extend([CONTROLS[i][DRUM4], CONTROLS[i][DRUM4A]])
else:
menuDown.extend([CONTROLS[i][DRUM3], CONTROLS[i][DRUM3A]])
menuYes.extend([CONTROLS[i][DRUM5], CONTROLS[i][DRUM5A]])
menuNo.extend([CONTROLS[i][DRUM1], CONTROLS[i][DRUM1A]])
menuYes.append(CONTROLS[i][START])
menuNo.append(CONTROLS[i][CANCEL])
menuUp.append(CONTROLS[i][UP])
menuDown.append(CONTROLS[i][DOWN])
menuNext.append(CONTROLS[i][RIGHT])
menuPrev.append(CONTROLS[i][LEFT])
elif self.type[i] in MICTYPES: # it's a mic
if self.p2Nav == 1 or (self.p2Nav == 0 and i == 0):
menuUp.append(CONTROLS[i][UP])
menuDown.append(CONTROLS[i][DOWN])
menuNext.append(CONTROLS[i][RIGHT])
menuPrev.append(CONTROLS[i][LEFT])
menuYes.append(CONTROLS[i][START])
menuNo.append(CONTROLS[i][CANCEL])
elif self.type[i] in GUITARTYPES:
if self.type[i] == 0:
key1s.extend([CONTROLS[i][KEY1], CONTROLS[i][KEY1A]])
else:
key1s.extend([CONTROLS[i][KEY1]])
key2s.extend([CONTROLS[i][KEY2], CONTROLS[i][KEY2A]])
key3s.extend([CONTROLS[i][KEY3], CONTROLS[i][KEY3A]])
key4s.extend([CONTROLS[i][KEY4], CONTROLS[i][KEY4A]])
key5s.extend([CONTROLS[i][KEY5], CONTROLS[i][KEY5A]])
keysolos.extend(
[
CONTROLS[i][KEY1A],
CONTROLS[i][KEY2A],
CONTROLS[i][KEY3A],
CONTROLS[i][KEY4A],
CONTROLS[i][KEY5A],
]
)
action1s.extend([CONTROLS[i][ACTION1]])
action2s.extend([CONTROLS[i][ACTION2]])
kills.extend([CONTROLS[i][KILL]])
if self.p2Nav == 1 or (self.p2Nav == 0 and i == 0):
menuUp.extend([CONTROLS[i][ACTION1], CONTROLS[i][UP]])
menuDown.extend([CONTROLS[i][ACTION2], CONTROLS[i][DOWN]])
menuNext.extend(
[CONTROLS[i][RIGHT], CONTROLS[i][KEY4], CONTROLS[i][KEY4A]]
)
menuPrev.extend(
[CONTROLS[i][LEFT], CONTROLS[i][KEY3], CONTROLS[i][KEY3A]]
)
menuYes.extend(
[CONTROLS[i][KEY1], CONTROLS[i][KEY1A], CONTROLS[i][START]]
)
menuNo.extend(
[CONTROLS[i][KEY2], CONTROLS[i][KEY2A], CONTROLS[i][CANCEL]]
)
if self.type[i] == 3:
# drums do not need special declarations!
controlMapping = {
keycode("key_left", config): CONTROLS[i][LEFT],
keycode("key_right", config): CONTROLS[i][RIGHT],
keycode("key_up", config): CONTROLS[i][UP],
keycode("key_down", config): CONTROLS[i][DOWN],
keycode("key_star", config): CONTROLS[i][STAR],
keycode("key_cancel", config): CONTROLS[i][CANCEL],
keycode("key_1a", config): CONTROLS[i][
DRUM5A
], # order is important. This minimizes key conflicts.
keycode("key_2a", config): CONTROLS[i][DRUM1A],
keycode("key_3a", config): CONTROLS[i][DRUM2A],
keycode("key_4a", config): CONTROLS[i][DRUM3A],
keycode("key_5a", config): CONTROLS[i][DRUM4A],
keycode("key_action2", config): CONTROLS[i][DRUMBASSA],
keycode("key_1", config): CONTROLS[i][DRUM5],
keycode("key_2", config): CONTROLS[i][DRUM1],
keycode("key_3", config): CONTROLS[i][DRUM2],
keycode("key_4", config): CONTROLS[i][DRUM3],
keycode("key_5", config): CONTROLS[i][DRUM4],
keycode("key_action1", config): CONTROLS[i][DRUMBASS],
keycode("key_start", config): CONTROLS[i][START],
}
elif self.type[i] == 2:
# drums do not need special declarations!
controlMapping = {
keycode("key_left", config): CONTROLS[i][LEFT],
keycode("key_right", config): CONTROLS[i][RIGHT],
keycode("key_up", config): CONTROLS[i][UP],
keycode("key_down", config): CONTROLS[i][DOWN],
keycode("key_star", config): CONTROLS[i][STAR],
keycode("key_cancel", config): CONTROLS[i][CANCEL],
keycode("key_1a", config): CONTROLS[i][
DRUM5A
], # order is important. This minimizes key conflicts.
keycode("key_2a", config): CONTROLS[i][DRUM1A],
keycode("key_3a", config): CONTROLS[i][DRUM2A],
keycode("key_4a", config): CONTROLS[i][DRUM3A],
keycode("key_action2", config): CONTROLS[i][DRUMBASSA],
keycode("key_1", config): CONTROLS[i][DRUM5],
keycode("key_2", config): CONTROLS[i][DRUM1],
keycode("key_3", config): CONTROLS[i][DRUM2],
keycode("key_4", config): CONTROLS[i][DRUM3],
keycode("key_action1", config): CONTROLS[i][DRUMBASS],
keycode("key_start", config): CONTROLS[i][START],
}
elif self.type[i] > -1:
# drums do not need special declarations!
controlMapping = {
keycode("key_left", config): CONTROLS[i][LEFT],
keycode("key_right", config): CONTROLS[i][RIGHT],
keycode("key_up", config): CONTROLS[i][UP],
keycode("key_down", config): CONTROLS[i][DOWN],
keycode("key_cancel", config): CONTROLS[i][CANCEL],
keycode("key_star", config): CONTROLS[i][STAR],
keycode("key_kill", config): CONTROLS[i][KILL],
keycode("key_1a", config): CONTROLS[i][
KEY1A
], # order is important. This minimizes key conflicts.
keycode("key_2a", config): CONTROLS[i][KEY2A],
keycode("key_3a", config): CONTROLS[i][KEY3A],
keycode("key_4a", config): CONTROLS[i][KEY4A],
keycode("key_5a", config): CONTROLS[i][KEY5A],
keycode("key_1", config): CONTROLS[i][KEY1],
keycode("key_2", config): CONTROLS[i][KEY2],
keycode("key_3", config): CONTROLS[i][KEY3],
keycode("key_4", config): CONTROLS[i][KEY4],
keycode("key_5", config): CONTROLS[i][KEY5],
keycode("key_action2", config): CONTROLS[i][ACTION2],
keycode("key_action1", config): CONTROLS[i][ACTION1],
keycode("key_start", config): CONTROLS[i][START],
}
else:
controlMapping = {}
controlMapping = self.checkMapping(controlMapping)
self.controlMapping.update(controlMapping)
self.reverseControlMapping = dict(
(value, key) for key, value in self.controlMapping.items()
)
# Multiple key support
self.heldKeys = {}
def checkMapping(self, newDict):
def keyName(value):
if value in CONTROL1:
name = "Controller 1"
control = CONTROL1
n = 0
elif value in CONTROL2:
name = "Controller 2"
control = CONTROL2
n = 1
elif value in CONTROL3:
name = "Controller 3"
control = CONTROL3
n = 2
else:
name = "Controller 4"
control = CONTROL4
n = 3
for j in range(20):
if value == control[j]:
if self.type[n] == 2:
return name + " " + drumkey4names[j]
elif self.type[n] == 3:
return name + " " + drumkey5names[j]
else:
return name + " " + guitarkeynames[j]
else:
log.error("Key value not found.")
return "Error"
if self.keyCheckerMode == 0:
return newDict
okconflict = lefts + rights + ups + downs + starts + cancels
a = []
for key, value in newDict.items():
if key == "None":
continue
if key in self.controlMapping.keys():
if value in okconflict:
if self.getMapping(key) in okconflict:
continue
a.append(
_("%s conflicts with %s")
% (keyName(value), keyName(self.getMapping(key)))
)
if len(a) == 0:
return newDict
self.overlap.extend(a)
return newDict
def getMapping(self, key):
return self.controlMapping.get(key)
def getReverseMapping(self, control):
return self.reverseControlMapping.get(control)
def keyPressed(self, key):
c = self.getMapping(key)
if c:
self.toggle(c, True)
if c in self.heldKeys and key not in self.heldKeys[c]:
self.heldKeys[c].append(key)
return c
return None
def keyReleased(self, key):
c = self.getMapping(key)
if c:
if c in self.heldKeys:
if key in self.heldKeys[c]:
self.heldKeys[c].remove(key)
if not self.heldKeys[c]:
self.toggle(c, False)
return c
return None
self.toggle(c, False)
return c
return None
def toggle(self, control, state):
prevState = self.flags
if state:
self.flags |= control
return not prevState & control
else:
self.flags &= ~control
return prevState & control
def getState(self, control):
return self.flags & control
# returns False if there are any key mapping conflicts
def isKeyMappingOK(config, start):
def keycode(name, config):
k = config.get("controller", name)
if k is None or k == "None":
return None
try:
return int(k)
except Exception:
return getattr(pygame, k)
# list of keys to look for
keyNames = [
"key_action1",
"key_action2",
"key_1",
"key_2",
"key_3",
"key_4",
"key_5",
"key_1a",
"key_2a",
"key_3a",
"key_4a",
"key_5a",
"key_start",
"key_star",
"key_kill",
"key_cancel",
]
overlap = []
keyVal = []
for i in keyNames:
if config.get("controller", i) in keyVal and i != start:
overlap.append(config.prototype["controller"][i].text)
else:
keyVal.append(keycode(i, config))
if len(overlap) > 0:
return overlap
# everything tests OK
return 0
# sets the key mapping and checks for a conflict #FIXME
# restores the old mapping if a conflict occurred
def setNewKeyMapping(engine, config, section, option, key):
oldKey = config.get(section, option)
config.set(section, option, key)
keyCheckerMode = Config.get("game", "key_checker_mode")
if key == "None" or key is None:
return True
b = isKeyMappingOK(config, option)
if b != 0:
if keyCheckerMode > 0:
from fofix.game import Dialogs
Dialogs.showMessage(
engine, _("This key conflicts with the following keys: %s") % str(b)
)
if keyCheckerMode == 2: # enforce no conflicts!
config.set(section, option, oldKey)
return False
return True
class Player(object):
def __init__(self, name, number):
log.debug("Player class init (Player.py)...")
self.name = name
self.reset()
self.keyList = None
self.progressKeys = []
self.drums = []
self.keys = []
self.soloKeys = []
self.soloShift = None
self.soloSlide = False
self.actions = []
self.yes = []
self.no = []
self.conf = []
self.up = []
self.down = []
self.left = []
self.right = []
self.controller = -1
self.controlType = -1
self.guitarNum = None
self.number = number
self.bassGrooveEnabled = False
self.currentTheme = 1
self.lefty = _playerDB.execute(
"SELECT `lefty` FROM `players` WHERE `name` = ?", [self.name]
).fetchone()[0]
self.twoChordMax = _playerDB.execute(
"SELECT `twochord` FROM `players` WHERE `name` = ?", [self.name]
).fetchone()[0]
self.drumflip = _playerDB.execute(
"SELECT `drumflip` FROM `players` WHERE `name` = ?", [self.name]
).fetchone()[0]
self.assistMode = _playerDB.execute(
"SELECT `assist` FROM `players` WHERE `name` = ?", [self.name]
).fetchone()[0]
self.autoKick = _playerDB.execute(
"SELECT `autokick` FROM `players` WHERE `name` = ?", [self.name]
).fetchone()[0]
self.neck = _playerDB.execute(
"SELECT `neck` FROM `players` WHERE `name` = ?", [self.name]
).fetchone()[0]
self.neckType = _playerDB.execute(
"SELECT `necktype` FROM `players` WHERE `name` = ?", [self.name]
).fetchone()[0]
self.whichPart = _playerDB.execute(
"SELECT `part` FROM `players` WHERE `name` = ?", [self.name]
).fetchone()[0]
self._upname = _playerDB.execute(
"SELECT `upname` FROM `players` WHERE `name` = ?", [self.name]
).fetchone()[0]
self._difficulty = _playerDB.execute(
"SELECT `difficulty` FROM `players` WHERE `name` = ?", [self.name]
).fetchone()[0]
# need to store selected practice mode and start position here
self.practiceMode = False
self.practiceSpeed = 1.0
self.practiceSection = None
self.startPos = 0.0
self.hopoFreq = None
def reset(self):
self.twoChord = 0
def configController(self):
if self.keyList:
if self.controlType == 1:
self.keys = [
self.keyList[KEY1],
self.keyList[KEY2],
self.keyList[KEY3],
self.keyList[KEY4],
self.keyList[KEY5],
self.keyList[KEY1],
self.keyList[KEY2],
self.keyList[KEY3],
self.keyList[KEY4],
self.keyList[KEY5],
]
self.soloKeys = [
self.keyList[KEY1],
self.keyList[KEY2],
self.keyList[KEY3],
self.keyList[KEY4],
self.keyList[KEY5],
]
else:
self.keys = [
self.keyList[KEY1],
self.keyList[KEY2],
self.keyList[KEY3],
self.keyList[KEY4],
self.keyList[KEY5],
self.keyList[KEY1A],
self.keyList[KEY2A],
self.keyList[KEY3A],
self.keyList[KEY4A],
self.keyList[KEY5A],
]
self.soloKeys = [
self.keyList[KEY1A],
self.keyList[KEY2A],
self.keyList[KEY3A],
self.keyList[KEY4A],
self.keyList[KEY5A],
]
self.soloShift = self.keyList[KEY1A]
self.drumSolo = []
self.actions = [self.keyList[ACTION1], self.keyList[ACTION2]]
self.drums = [
self.keyList[DRUMBASS],
self.keyList[DRUM1],
self.keyList[DRUM2],
self.keyList[DRUM3],
self.keyList[DRUM5],
self.keyList[DRUMBASSA],
self.keyList[DRUM1A],
self.keyList[DRUM2A],
self.keyList[DRUM3A],
self.keyList[DRUM5A],
]
if self.controlType == 1:
self.progressKeys = [
self.keyList[KEY1],
self.keyList[CANCEL],
self.keyList[START],
self.keyList[KEY2],
]
else:
self.progressKeys = [
self.keyList[KEY1],
self.keyList[KEY1A],
self.keyList[CANCEL],
self.keyList[START],
self.keyList[KEY2],
self.keyList[KEY2A],
]
if self.controlType == 4:
self.soloSlide = True
else:
self.soloSlide = False
if self.controlType in GUITARTYPES:
self.yes = [self.keyList[KEY1]]
if self.controlType == 1:
self.yes.append(self.keyList[KEY1A])
self.no = [self.keyList[KEY2], self.keyList[KEY2A]]
self.up = [self.keyList[ACTION1]]
self.down = [self.keyList[ACTION2]]
elif self.controlType == 2:
self.yes = [self.keyList[DRUM5], self.keyList[DRUM5A]]
self.no = [self.keyList[DRUM1], self.keyList[DRUM1A]]
self.up = [self.keyList[DRUM2], self.keyList[DRUM2A]]
self.down = [self.keyList[DRUM3], self.keyList[DRUM3A]]
self.conf = [self.keyList[DRUMBASS], self.keyList[DRUMBASSA]]
self.yes.append(self.keyList[START])
self.no.append(self.keyList[CANCEL])
self.up.append(self.keyList[UP])
self.down.append(self.keyList[DOWN])
self.left.append(self.keyList[LEFT])
self.right.append(self.keyList[RIGHT])
# add drum4 to the drums when ready
return True
else:
return False
def getName(self):
if self._upname == "" or self._upname is None:
return self.name
else:
return self._upname
def setName(self, name):
_playerDB.execute(
"UPDATE `players` SET `upname` = ?, `changed` = 1 WHERE `name` = ?",
[name, self.name],
)
_playerDB.commit()
self._upname = name
def getDifficulty(self):
return song.difficulties.get(self._difficulty)
def setDifficulty(self, difficulty):
_playerDB.execute(
"UPDATE `players` SET `difficulty` = ?, `changed` = 1 WHERE `name` = ?",
[difficulty.id, self.name],
)
_playerDB.commit()
self._difficulty = difficulty.id
def getDifficultyInt(self):
return self._difficulty
def getPart(self):
# this should not be reading from the ini file each time it wants to know the part
if self.whichPart == -2:
return "No Player 2"
else:
return song.parts.get(self.whichPart)
def setPart(self, part):
if part == "No Player 2":
self.whichPart = (
-2 # also need to set self.part here to avoid unnecessary ini reads
)
else:
self.whichPart = (
part.id
) # also need to set self.part here to avoid unnecessary ini reads
_playerDB.execute(
"UPDATE `players` SET `part` = ?, `changed` = 1 WHERE `name` = ?",
[self.whichPart, self.name],
)
_playerDB.commit()
difficulty = property(getDifficulty, setDifficulty)
part = property(getPart, setPart)
upname = property(getName, setName)
|
invesalius3 | setup | import logging
import os
import pathlib
import subprocess
import sys
import numpy
import setuptools
from Cython.Build import build_ext, cythonize
if sys.platform == "darwin":
unix_copt = ["-Xpreprocessor", "-fopenmp", "-lomp"]
unix_lopt = ["-Xpreprocessor", "-fopenmp", "-lomp"]
else:
unix_copt = [
"-fopenmp",
]
unix_lopt = [
"-fopenmp",
]
copt = {"msvc": ["/openmp"], "mingw32": ["-fopenmp"], "unix": unix_copt}
lopt = {"mingw32": ["-fopenmp"], "unix": unix_lopt}
class build_ext_subclass(build_ext):
def build_extensions(self):
c = self.compiler.compiler_type
print("Compiler", c)
if c in copt:
for e in self.extensions:
e.extra_compile_args = copt[c]
if c in lopt:
for e in self.extensions:
e.extra_link_args = lopt[c]
for e in self.extensions:
e.include_dirs = [numpy.get_include()]
build_ext.build_extensions(self)
class BuildPluginsCommand(setuptools.Command):
"""
A custom command to build all plugins with cython code.
"""
description = "Build all plugins with cython code"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
compilable_plugins = ["porous_creation", "remove_tiny_objects"]
inv_folder = pathlib.Path(__file__).parent.resolve()
plugins_folder = inv_folder.joinpath("plugins")
for p in compilable_plugins:
plugin_folder = plugins_folder.joinpath(p)
self.announce("Compiling plugin: {}".format(p))
os.chdir(plugin_folder)
subprocess.check_call(
[sys.executable, "setup.py", "build_ext", "--inplace"]
)
os.chdir(inv_folder)
setuptools.setup(
cmdclass={
"build_ext": build_ext_subclass,
"build_plugins": BuildPluginsCommand,
},
ext_modules=cythonize(
[
setuptools.Extension(
"invesalius_cy.mips",
["invesalius_cy/mips.pyx"],
),
setuptools.Extension(
"invesalius_cy.interpolation",
["invesalius_cy/interpolation.pyx"],
),
setuptools.Extension(
"invesalius_cy.transforms",
["invesalius_cy/transforms.pyx"],
),
setuptools.Extension(
"invesalius_cy.floodfill",
["invesalius_cy/floodfill.pyx"],
language="c++",
),
setuptools.Extension(
"invesalius_cy.cy_mesh",
["invesalius_cy/cy_mesh.pyx"],
language="c++",
),
]
),
)
|
scripts | dumper_post | # ***************************************************************************
# * Copyright (c) 2014 sliptonic <shopinthewoods@gmail.com> *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * FreeCAD is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Lesser General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with FreeCAD; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
import datetime
import Path.Post.Utils as PostUtils
import PathScripts.PathUtils as PathUtils
TOOLTIP = """
Dumper is an extremely simple postprocessor file for the Path workbench. It is used
to dump the command list from one or more Path objects for simple inspection. This post
doesn't do any manipulation of the path and doesn't write anything to disk. It just
shows the dialog so you can see it. Useful for debugging, but not much else.
"""
now = datetime.datetime.now()
SHOW_EDITOR = True
# to distinguish python built-in open function from the one declared below
if open.__module__ in ["__builtin__", "io"]:
pythonopen = open
def export(objectslist, filename, argstring):
"called when freecad exports a list of objects"
output = """(This output produced with the dump post processor)
(Dump is useful for inspecting the raw commands in your paths)
(but is not useful for driving machines.)
(Consider setting a default postprocessor in your project or )
(exporting your paths using a specific post that matches your machine)
"""
for obj in objectslist:
if not hasattr(obj, "Path"):
print(
"the object "
+ obj.Name
+ " is not a path. Please select only path and Compounds."
)
return
print("postprocessing...")
output += parse(obj)
if SHOW_EDITOR:
dia = PostUtils.GCodeEditorDialog()
dia.editor.setText(output)
result = dia.exec_()
if result:
final = dia.editor.toPlainText()
else:
final = output
else:
final = output
print("done postprocessing.")
return final
def parse(pathobj):
out = ""
if hasattr(pathobj, "Group"): # We have a compound or project.
out += "(Group: " + pathobj.Label + ")\n"
for p in pathobj.Group:
out += parse(p)
return out
else: # parsing simple path
if not hasattr(
pathobj, "Path"
): # groups might contain non-path things like stock.
return out
out += "(Path: " + pathobj.Label + ")\n"
for c in PathUtils.getPathWithPlacement(pathobj).Commands:
out += str(c) + "\n"
return out
# print(__name__ + " gcode postprocessor loaded.")
|
Sandbox | Init | # ***************************************************************************
# * Copyright (c) 2001,2002 Juergen Riegel <juergen.riegel@web.de> *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * FreeCAD is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Lesser General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with FreeCAD; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************/
# FreeCAD init script of the Sandbox module
import FreeCAD
FreeCAD.addExportType("DRAWEXE script (*.tcl)", "exportDRAWEXE")
|
term2048 | keypress | # -*- coding: UTF-8 -*-
"""
Keyboard events processing
"""
# Common values
# Space Bar key to pause the game
SPACE = 32
# Vim keys
K, J, L, H = 107, 106, 108, 104
try:
import termios
except ImportError:
# Windows
import msvcrt
UP, DOWN, RIGHT, LEFT = 72, 80, 77, 75
__key_aliases = {
K: UP,
J: DOWN,
L: RIGHT,
H: LEFT,
}
def getKey():
"""Return a key pressed by the user"""
while True:
if msvcrt.kbhit():
a = ord(msvcrt.getch())
return __key_aliases.get(a, a)
else:
# Linux/OSX
# refs:
# http://bytes.com/topic/python/answers/630206-check-keypress-linux-xterm
# http://stackoverflow.com/a/2521032/735926
import sys
import tty
__fd = sys.stdin.fileno()
__old = termios.tcgetattr(__fd)
# Arrow keys
# they are preceded by 27 and 91, hence the double 'if' in getKey.
UP, DOWN, RIGHT, LEFT = 65, 66, 67, 68
__key_aliases = {
K: UP,
J: DOWN,
L: RIGHT,
H: LEFT,
}
def __getKey():
"""Return a key pressed by the user"""
try:
tty.setcbreak(sys.stdin.fileno())
termios.tcflush(sys.stdin, termios.TCIOFLUSH)
ch = sys.stdin.read(1)
return ord(ch) if ch else None
finally:
termios.tcsetattr(__fd, termios.TCSADRAIN, __old)
def getKey():
"""
same as __getKey, but handle arrow keys
"""
k = __getKey()
if k == 27:
k = __getKey()
if k == 91:
k = __getKey()
return __key_aliases.get(k, k)
# legacy support
getArrowKey = getKey
|
Settings | PerObjectContainerStack | # Copyright (c) 2020 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
from typing import Any, Optional
from UM.Application import Application
from UM.Decorators import override
from UM.Settings.Interfaces import PropertyEvaluationContext
from UM.Settings.SettingInstance import InstanceState
from .CuraContainerStack import CuraContainerStack
class PerObjectContainerStack(CuraContainerStack):
def isDirty(self):
# This stack should never be auto saved, so always return that there is nothing to save.
return False
@override(CuraContainerStack)
def getProperty(
self,
key: str,
property_name: str,
context: Optional[PropertyEvaluationContext] = None,
) -> Any:
if context is None:
context = PropertyEvaluationContext()
context.pushContainer(self)
global_stack = Application.getInstance().getGlobalContainerStack()
if not global_stack:
return None
# Return the user defined value if present, otherwise, evaluate the value according to the default routine.
if self.getContainer(0).hasProperty(key, property_name):
if self.getContainer(0).getProperty(key, "state") == InstanceState.User:
result = super().getProperty(key, property_name, context)
context.popContainer()
return result
# Handle the "limit_to_extruder" property.
limit_to_extruder = super().getProperty(key, "limit_to_extruder", context)
if limit_to_extruder is not None:
limit_to_extruder = str(limit_to_extruder)
# if this stack has the limit_to_extruder "not overridden", use the original limit_to_extruder as the current
# limit_to_extruder, so the values retrieved will be from the perspective of the original limit_to_extruder
# stack.
if limit_to_extruder == "-1":
if "original_limit_to_extruder" in context.context:
limit_to_extruder = context.context["original_limit_to_extruder"]
if (
limit_to_extruder is not None
and limit_to_extruder != "-1"
and int(limit_to_extruder) <= len(global_stack.extruderList)
):
# set the original limit_to_extruder if this is the first stack that has a non-overridden limit_to_extruder
if "original_limit_to_extruder" not in context.context:
context.context["original_limit_to_extruder"] = limit_to_extruder
if super().getProperty(key, "settable_per_extruder", context):
result = global_stack.extruderList[int(limit_to_extruder)].getProperty(
key, property_name, context
)
if result is not None:
context.popContainer()
return result
result = super().getProperty(key, property_name, context)
context.popContainer()
return result
@override(CuraContainerStack)
def setNextStack(self, stack: CuraContainerStack) -> None:
super().setNextStack(stack)
# trigger signal to re-evaluate all default settings
for key in self.getContainer(0).getAllKeys():
# only evaluate default settings
if self.getContainer(0).getProperty(key, "state") != InstanceState.Default:
continue
self._collectPropertyChanges(key, "value")
self._emitCollectedPropertyChanges()
|
migrations | 0046_reviewrating | # Generated by Django 3.0.7 on 2021-02-25 18:36
import django.db.models.deletion
from django.db import connection, migrations, models
from django.db.models import Q
from psycopg2.extras import execute_values
def convert_review_rating(app_registry, schema_editor):
"""take rating type Reviews and convert them to ReviewRatings"""
db_alias = schema_editor.connection.alias
reviews = (
app_registry.get_model("bookwyrm", "Review")
.objects.using(db_alias)
.filter(Q(content__isnull=True) | Q(content=""))
)
with connection.cursor() as cursor:
values = [(r.id,) for r in reviews]
execute_values(
cursor,
"""
INSERT INTO bookwyrm_reviewrating(review_ptr_id)
VALUES %s""",
values,
)
def unconvert_review_rating(app_registry, schema_editor):
"""undo the conversion from ratings back to reviews"""
# All we need to do to revert this is drop the table, which Django will do
# on its own, as long as we have a valid reverse function. So, this is a
# no-op function so Django will do its thing
class Migration(migrations.Migration):
dependencies = [
("bookwyrm", "0045_auto_20210210_2114"),
]
operations = [
migrations.CreateModel(
name="ReviewRating",
fields=[
(
"review_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="bookwyrm.Review",
),
),
],
options={
"abstract": False,
},
bases=("bookwyrm.review",),
),
migrations.RunPython(convert_review_rating, unconvert_review_rating),
]
|
analog | qa_dpll | #!/usr/bin/env python
#
# Copyright 2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from gnuradio import analog, blocks, gr, gr_unittest
class test_dpll_bb(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_dpll_bb_001(self):
# Test set/gets
period = 1.0
gain = 0.1
op = analog.dpll_bb(period, gain)
op.set_gain(0.2)
g = op.gain()
self.assertAlmostEqual(g, 0.2)
f = op.freq()
self.assertEqual(1 / period, f)
d0 = 1.0 - 0.5 * f
d1 = op.decision_threshold()
self.assertAlmostEqual(d0, d1)
p = op.phase()
self.assertEqual(0, p)
def test_dpll_bb_002(self):
period = 4
gain = 0.1
src_data = 10 * (
(period - 1)
* [
0,
]
+ [
1,
]
)
expected_result = src_data
src = blocks.vector_source_b(src_data)
op = analog.dpll_bb(period, gain)
dst = blocks.vector_sink_b()
self.tb.connect(src, op)
self.tb.connect(op, dst)
self.tb.run()
result_data = dst.data()
self.assertComplexTuplesAlmostEqual(expected_result, result_data, 4)
if __name__ == "__main__":
gr_unittest.run(test_dpll_bb)
|
invesalius | expanduser | #!/usr/bin/python
# -*- coding: utf-8 -*-
# From http://bugs.python.org/file23442/expanduser.py
import ctypes
from ctypes import windll, wintypes
class GUID(ctypes.Structure):
_fields_ = [
("Data1", wintypes.DWORD),
("Data2", wintypes.WORD),
("Data3", wintypes.WORD),
("Data4", wintypes.BYTE * 8),
]
def __init__(self, l, w1, w2, b1, b2, b3, b4, b5, b6, b7, b8):
"""Create a new GUID."""
self.Data1 = l
self.Data2 = w1
self.Data3 = w2
self.Data4[:] = (b1, b2, b3, b4, b5, b6, b7, b8)
def __repr__(self):
b1, b2, b3, b4, b5, b6, b7, b8 = self.Data4
return "GUID(%x-%x-%x-%x%x%x%x%x%x%x%x)" % (
self.Data1,
self.Data2,
self.Data3,
b1,
b2,
b3,
b4,
b5,
b6,
b7,
b8,
)
# constants to be used according to the version on shell32
CSIDL_PROFILE = 40
FOLDERID_Profile = GUID(
0x5E6C858F, 0x0E22, 0x4760, 0x9A, 0xFE, 0xEA, 0x33, 0x17, 0xB6, 0x71, 0x73
)
def expand_user():
# get the function that we can find from Vista up, not the one in XP
get_folder_path = getattr(windll.shell32, "SHGetKnownFolderPath", None)
# import pdb; pdb.set_trace()
if get_folder_path is not None:
# ok, we can use the new function which is recomended by the msdn
ptr = ctypes.c_wchar_p()
get_folder_path(ctypes.byref(FOLDERID_Profile), 0, 0, ctypes.byref(ptr))
return ptr.value
else:
# use the deprecated one found in XP and on for compatibility reasons
get_folder_path = getattr(windll.shell32, "SHGetSpecialFolderPathW", None)
buf = ctypes.create_unicode_buffer(300)
get_folder_path(None, buf, CSIDL_PROFILE, False)
return buf.value
|
dtv | qa_dtv | #!/usr/bin/env python
#
# Copyright 2014 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
import os
from os.path import getsize
import numpy as np
import pmt
from gnuradio import blocks, digital, dtv, gr, gr_unittest
ts_in_file = os.path.join(os.getenv("srcdir"), "vv009-4kfft.ts")
complex_out_file = os.path.join(os.getenv("srcdir"), "vv009-4kfft.cfile")
class test_dtv(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
self.outfile = "vv.cfile"
def tearDown(self):
self.tb = None
os.remove(self.outfile)
def test_000(self):
infile = ts_in_file
testfile = complex_out_file
file_source = blocks.file_source(gr.sizeof_char * 1, infile, False, 0, 0)
file_source.set_begin_tag(pmt.PMT_NIL)
bbheader = dtv.dvb_bbheader_bb(
dtv.STANDARD_DVBT2,
dtv.FECFRAME_NORMAL,
dtv.C2_3,
dtv.RO_0_35,
dtv.INPUTMODE_NORMAL,
dtv.INBAND_OFF,
168,
4000000,
)
bbscrambler = dtv.dvb_bbscrambler_bb(
dtv.STANDARD_DVBT2, dtv.FECFRAME_NORMAL, dtv.C2_3
)
bch = dtv.dvb_bch_bb(dtv.STANDARD_DVBT2, dtv.FECFRAME_NORMAL, dtv.C2_3)
ldpc = dtv.dvb_ldpc_bb(
dtv.STANDARD_DVBT2, dtv.FECFRAME_NORMAL, dtv.C2_3, dtv.MOD_OTHER
)
interleaver = dtv.dvbt2_interleaver_bb(
dtv.FECFRAME_NORMAL, dtv.C2_3, dtv.MOD_64QAM
)
modulator = dtv.dvbt2_modulator_bc(
dtv.FECFRAME_NORMAL, dtv.MOD_64QAM, dtv.ROTATION_ON
)
cellinterleaver = dtv.dvbt2_cellinterleaver_cc(
dtv.FECFRAME_NORMAL, dtv.MOD_64QAM, 3, 3
)
framemapper = dtv.dvbt2_framemapper_cc(
dtv.FECFRAME_NORMAL,
dtv.C2_3,
dtv.MOD_64QAM,
dtv.ROTATION_ON,
3,
3,
dtv.CARRIERS_NORMAL,
dtv.FFTSIZE_4K,
dtv.GI_1_32,
dtv.L1_MOD_16QAM,
dtv.PILOT_PP7,
2,
8,
dtv.PAPR_TR,
dtv.VERSION_111,
dtv.PREAMBLE_T2_SISO,
dtv.INPUTMODE_NORMAL,
dtv.RESERVED_OFF,
dtv.L1_SCRAMBLED_OFF,
dtv.INBAND_OFF,
)
freqinterleaver = dtv.dvbt2_freqinterleaver_cc(
dtv.CARRIERS_NORMAL,
dtv.FFTSIZE_4K,
dtv.PILOT_PP7,
dtv.GI_1_32,
8,
dtv.PAPR_TR,
dtv.VERSION_111,
dtv.PREAMBLE_T2_SISO,
)
pilotgenerator = dtv.dvbt2_pilotgenerator_cc(
dtv.CARRIERS_NORMAL,
dtv.FFTSIZE_4K,
dtv.PILOT_PP7,
dtv.GI_1_32,
8,
dtv.PAPR_TR,
dtv.VERSION_111,
dtv.PREAMBLE_T2_SISO,
dtv.MISO_TX1,
dtv.EQUALIZATION_OFF,
dtv.BANDWIDTH_8_0_MHZ,
4096,
)
paprtr = dtv.dvbt2_paprtr_cc(
dtv.CARRIERS_NORMAL,
dtv.FFTSIZE_4K,
dtv.PILOT_PP7,
dtv.GI_1_32,
8,
dtv.PAPR_TR,
dtv.VERSION_111,
3.0,
10,
4096,
)
ofdm_cyclic_prefixer = digital.ofdm_cyclic_prefixer(
4096, 4096 + 4096 // 32, 0, ""
)
p1insertion = dtv.dvbt2_p1insertion_cc(
dtv.CARRIERS_NORMAL,
dtv.FFTSIZE_4K,
dtv.GI_1_32,
8,
dtv.PREAMBLE_T2_SISO,
dtv.SHOWLEVELS_OFF,
3.01,
)
file_sink = blocks.file_sink(gr.sizeof_gr_complex * 1, self.outfile, False)
file_sink.set_unbuffered(True)
self.tb.connect(
file_source,
bbheader,
bbscrambler,
bch,
ldpc,
interleaver,
modulator,
cellinterleaver,
framemapper,
freqinterleaver,
pilotgenerator,
paprtr,
ofdm_cyclic_prefixer,
p1insertion,
file_sink,
)
self.tb.run()
file_sink.close()
self.assertEqual(getsize(self.outfile), getsize(testfile))
out_data = np.fromfile(self.outfile, dtype=np.float32)
expected_data = np.fromfile(testfile, dtype=np.float32)
self.assertFloatTuplesAlmostEqual(out_data, expected_data, 5)
if __name__ == "__main__":
gr_unittest.run(test_dtv)
|
base | multi_account | # -*- coding: utf-8 -*-
import re
import time
from datetime import timedelta
from pyload.core.utils.purge import chars as remove_chars
from pyload.core.utils.purge import uniquify
from .account import BaseAccount
class MultiAccount(BaseAccount):
__name__ = "MultiAccount"
__type__ = "account"
__version__ = "0.24"
__status__ = "testing"
__config__ = [
("enabled", "bool", "Activated", True),
("mh_mode", "all;listed;unlisted", "Filter downloaders to use", "all"),
("mh_list", "str", "Downloader list (comma separated)", ""),
("mh_interval", "int", "Reload interval in hours", 12),
]
__description__ = """Multi-downloader account plugin"""
__license__ = "GPLv3"
__authors__ = [
("Walter Purcaro", "vuolter@gmail.com"),
("GammaC0de", "nitzo2001[AT]yahoo[DOT]com"),
]
DOMAIN_REPLACEMENTS = [
(r"ddl\.to", "ddownload.com"),
(r"180upload\.com", "hundredeightyupload.com"),
(r"bayfiles\.net", "bayfiles.com"),
(r"cloudnator\.com", "shragle.com"),
(r"dfiles\.eu", "depositfiles.com"),
(r"easy-share\.com", "crocko.com"),
(r"freakshare\.net", "freakshare.com"),
(r"hellshare\.com", "hellshare.cz"),
(r"ifile\.it", "filecloud.io"),
(r"nowdownload\.\w+", "nowdownload.sx"),
(r"nowvideo\.\w+", "nowvideo.sx"),
(r"putlocker\.com", "firedrive.com"),
(r"share-?rapid\.cz", "multishare.cz"),
(r"ul\.to", "uploaded.to"),
(r"uploaded\.net", "uploaded.to"),
(r"uploadhero\.co", "uploadhero.com"),
(r"zshares\.net", "zshare.net"),
(r"^1", "one"),
(r"^2", "two"),
(r"^3", "three"),
(r"^4", "four"),
(r"^5", "five"),
(r"^6", "six"),
(r"^7", "seven"),
(r"^8", "eight"),
(r"^9", "nine"),
(r"^0", "zero"),
]
def init(self):
self.need_reactivate = False
self.plugins = []
self.supported = []
self.pluginclass = None
self.pluginmodule = None
self.plugintype = None
self.fail_count = 0
self.init_plugin()
def init_plugin(self):
plugin, self.plugintype = self.pyload.plugin_manager.find_plugin(self.classname)
if plugin:
self.pluginmodule = self.pyload.plugin_manager.load_module(
self.plugintype, self.classname
)
self.pluginclass = self.pyload.plugin_manager.load_class(
self.plugintype, self.classname
)
self.pyload.addon_manager.add_event("plugin_updated", self.plugins_updated)
self.periodical.start(3, threaded=True)
else:
self.log_warning(
self._(
"Multi-downloader feature will be deactivated due missing plugin reference"
)
)
def plugins_updated(self, type_plugins):
if not any(
t in ("base", "addon") for t, n in type_plugins
): #: do nothing if restart required
self.reactivate()
def periodical_task(self):
self.reactivate(refresh=True)
def replace_domains(self, list):
for r in self.DOMAIN_REPLACEMENTS:
pattern, repl = r
_re = re.compile(pattern, re.I | re.U)
list = [
_re.sub(repl, domain) if _re.match(domain) else domain
for domain in list
]
return list
def parse_domains(self, list):
_re = re.compile(
r"^(?:https?://)?(?:www\.)?(?:\w+\.)*((?:(?:\d{1,3}\.){3}\d{1,3}|[\w\-^_]{3,63}(?:\.[a-zA-Z]{2,}){1,2})(?:\:\d+)?)",
re.I | re.U,
)
domains = [
domain.strip().lower() for url in list for domain in _re.findall(url)
]
return self.replace_domains(uniquify(domains))
def _grab_hosters(self):
self.info["data"]["hosters"] = []
try:
hosterlist = self.grab_hosters(
self.user, self.info["login"]["password"], self.info["data"]
)
if hosterlist and isinstance(hosterlist, list):
domains = self.parse_domains(hosterlist)
self.info["data"]["hosters"] = sorted(domains)
self.sync(reverse=True)
except Exception as exc:
self.log_warning(
self._("Error loading downloader list for user `{}`").format(self.user),
exc,
exc_info=self.pyload.debug > 1,
stack_info=self.pyload.debug > 2,
)
finally:
self.log_debug(
"Downloader list for user `{}`: {}".format(
self.user, self.info["data"]["hosters"]
)
)
return self.info["data"]["hosters"]
def grab_hosters(self, user, password, data):
"""
Load list of supported downloaders.
:return: List of domain names
"""
raise NotImplementedError
def _override(self):
prev_supported = self.supported
new_supported = []
excluded = []
self.supported = []
if self.plugintype == "downloader":
plugin_map = {
name.lower(): name
for name in self.pyload.plugin_manager.downloader_plugins.keys()
}
account_list = [
account.type.lower()
for account in self.pyload.api.get_accounts(False)
if account.valid and account.premium
]
else:
plugin_map = {}
account_list = [
name[::-1].replace("Folder"[::-1], "", 1).lower()[::-1]
for name in self.pyload.plugin_manager.decrypter_plugins.keys()
]
for plugin in self.get_plugins():
name = remove_chars(plugin, "-.")
if name in account_list:
excluded.append(plugin)
else:
if name in plugin_map:
self.supported.append(plugin_map[name])
else:
new_supported.append(plugin)
removed = [plugin for plugin in prev_supported if plugin not in self.supported]
if removed:
self.log_debug(f"Unload: {', '.join(removed)}")
for plugin in removed:
self.unload_plugin(plugin)
if not self.supported and not new_supported:
self.log_error(self._("No {} loaded").format(self.plugintype))
return
#: Inject plugin plugin
self.log_debug(
"Overwritten {}s: {}".format(
self.plugintype, ", ".join(sorted(self.supported))
)
)
for plugin in self.supported:
hdict = self.pyload.plugin_manager.plugins[self.plugintype][plugin]
hdict["new_module"] = self.pluginmodule
hdict["new_name"] = self.classname
if excluded:
self.log_info(
self._("{}s not overwritten: {}").format(
self.plugintype.capitalize(), ", ".join(sorted(excluded))
)
)
if new_supported:
plugins = sorted(new_supported)
self.log_debug(f"New {self.plugintype}s: {', '.join(plugins)}")
#: Create new regexp
domains = "|".join(x.replace(".", r"\.") for x in plugins)
pattern = rf".*(?P<DOMAIN>{domains}).*"
if (
hasattr(self.pluginclass, "__pattern__")
and isinstance(self.pluginclass.__pattern__, str)
and "://" in self.pluginclass.__pattern__
):
pattern = rf"{self.pluginclass.__pattern__}|{pattern}"
self.log_debug(f"Pattern: {pattern}")
hdict = self.pyload.plugin_manager.plugins[self.plugintype][self.classname]
hdict["pattern"] = pattern
hdict["re"] = re.compile(pattern)
def get_plugins(self, cached=True):
if cached and self.plugins:
return self.plugins
for _ in range(5):
try:
plugin_set = set(self._grab_hosters())
break
except Exception as exc:
self.log_warning(
exc,
self._("Waiting 1 minute and retry"),
exc_info=self.pyload.debug > 1,
stack_info=self.pyload.debug > 2,
)
time.sleep(60)
else:
self.log_warning(self._("No hoster list retrieved"))
return []
try:
mh_mode = self.config.get("mh_mode", "all")
if mh_mode in ("listed", "unlisted"):
mh_list = (
self.config.get("mh_list", "")
.replace("|", ",")
.replace(";", ",")
.split(",")
)
config_set = set(mh_list)
if mh_mode == "listed":
plugin_set &= config_set
else:
plugin_set -= config_set
except Exception as exc:
self.log_error(exc)
self.plugins = list(plugin_set)
return self.plugins
def unload_plugin(self, plugin):
#: Reset module
hdict = self.pyload.plugin_manager.plugins[self.plugintype][plugin]
if "pyload" in hdict:
hdict.pop("pyload", None)
if "new_module" in hdict:
hdict.pop("new_module", None)
hdict.pop("new_name", None)
def reactivate(self, refresh=False):
reloading = self.info["data"].get("hosters") is not None
if self.info["login"]["valid"] is None:
return
else:
interval = self.config.get("mh_interval", 12) * 60 * 60
self.periodical.set_interval(interval)
if self.info["login"]["valid"] is False:
self.fail_count += 1
if self.fail_count < 3:
if reloading:
self.log_error(
self._(
"Could not reload hoster list - invalid account, retry in 5 minutes"
)
)
else:
self.log_error(
self._(
"Could not load hoster list - invalid account, retry in 5 minutes"
)
)
self.periodical.set_interval(timedelta(minutes=5).total_seconds())
else:
if reloading:
self.log_error(
self._(
"Could not reload hoster list - invalid account, deactivating"
)
)
else:
self.log_error(
self._(
"Could not load hoster list - invalid account, deactivating"
)
)
self.deactivate()
return
if not self.logged:
if not self.relogin():
self.fail_count += 1
if self.fail_count < 3:
if reloading:
self.log_error(
self._(
"Could not reload hoster list - login failed, retry in 5 minutes"
)
)
else:
self.log_error(
self._(
"Could not load hoster list - login failed, retry in 5 minutes"
)
)
self.periodical.set_interval(timedelta(minutes=5).total_seconds())
else:
if reloading:
self.log_error(
self._(
"Could not reload hoster list - login failed, deactivating"
)
)
else:
self.log_error(
self._(
"Could not load hoster list - login failed, deactivating"
)
)
self.deactivate()
return
self.pyload.addon_manager.add_event("plugin_updated", self.plugins_updated)
if refresh or not reloading:
if not self.get_plugins(cached=False):
self.fail_count += 1
if self.fail_count < 3:
self.log_error(
self._(
"Failed to load hoster list for user `{}`, retry in 5 minutes"
).format(self.user)
)
self.periodical.set_interval(timedelta(minutes=5).total_seconds())
else:
self.log_error(
self._(
"Failed to load hoster list for user `{}`, deactivating"
).format(self.user)
)
self.deactivate()
return
if self.fail_count:
self.fail_count = 0
interval = timedelta(
hours=self.config.get("mh_interval", 12)
).total_seconds()
self.periodical.set_interval(interval)
self._override()
def deactivate(self):
"""
Remove override for all plugins.
"""
self.log_info(self._("Reverting back to default hosters"))
self.pyload.addon_manager.remove_event("plugin_updated", self.plugins_updated)
self.periodical.stop()
self.fail_count = 0
if self.supported:
self.log_debug(f"Unload: {', '.join(self.supported)}")
for plugin in self.supported:
self.unload_plugin(plugin)
#: Reset pattern
hdict = self.pyload.plugin_manager.plugins[self.plugintype][self.classname]
hdict["pattern"] = getattr(self.pluginclass, "__pattern__", r"^unmatchable$")
hdict["re"] = re.compile(hdict["pattern"])
def update_accounts(self, user, password=None, options={}):
super().update_accounts(user, password, options)
if self.need_reactivate:
interval = timedelta(
hours=self.config.get("mh_interval", 12)
).total_seconds()
self.periodical.restart(interval, threaded=True, delay=2)
self.need_reactivate = True
def remove_account(self, user):
self.deactivate()
super().remove_account(user)
|
migrations | 0167_feature_flag_override | # Generated by Django 3.1.12 on 2021-09-06 18:59
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("posthog", "0166_plugin_public_jobs"),
]
operations = [
migrations.CreateModel(
name="FeatureFlagOverride",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("override_value", models.JSONField()),
(
"feature_flag",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="posthog.featureflag",
),
),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
(
"team",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="posthog.team"
),
),
],
),
migrations.AddConstraint(
model_name="featureflagoverride",
constraint=models.UniqueConstraint(
fields=("user", "feature_flag", "team"),
name="unique feature flag for a user/team combo",
),
),
]
|
sequencer | seq | import copy
import math
from sglib import constants
from sglib.lib import util
from sglib.lib.translate import _
from sglib.lib.util import *
from sglib.log import LOG
from sglib.math import clip_min, clip_value
from sglib.models import theme
from sglib.models.daw import *
from sgui import shared as glbl_shared
from sgui import widgets
from sgui.daw import shared
from sgui.daw import strings as daw_strings
from sgui.daw.lib import item as item_lib
from sgui.daw.shared import *
from sgui.plugins import *
from sgui.sgqt import *
from sgui.util import get_font
from . import _shared, atm_context_menu, context_menu, header_context_menu
from .atm_item import SeqAtmItem
from .item import SequencerItem
from .midi_file_dialog import midi_file_dialog
def sequence_editor_set_delete_mode(a_enabled):
if a_enabled:
shared.SEQUENCER.setDragMode(QGraphicsView.DragMode.NoDrag)
_shared.SEQUENCE_EDITOR_DELETE_MODE = True
else:
shared.SEQUENCER.setDragMode(QGraphicsView.DragMode.RubberBandDrag)
_shared.SEQUENCE_EDITOR_DELETE_MODE = False
shared.SEQUENCER.selected_item_strings = set()
class Silhouette(QGraphicsRectItem):
def __init__(self):
super().__init__()
self.setRect(
QtCore.QRectF(
0.0,
0.0,
float(_shared.SEQUENCER_PX_PER_BEAT),
float(shared.SEQUENCE_EDITOR_TRACK_HEIGHT),
),
)
self.setBrush(
QColor(theme.SYSTEM_COLORS.daw.drag_drop_silhouette),
)
def quantize(self, pos):
beat, track = _shared.pos_to_beat_and_track(pos)
self.setPos(
beat * _shared.SEQUENCER_PX_PER_BEAT,
(track * shared.SEQUENCE_EDITOR_TRACK_HEIGHT)
+ _shared.SEQUENCE_EDITOR_HEADER_HEIGHT,
)
class ItemSequencer(QGraphicsView, HoverCursorChange):
"""This is the sequencer QGraphicsView and QGraphicsScene on
the "Sequencer" tab
"""
def __init__(self):
QGraphicsView.__init__(self)
self._is_drawing = False
self._is_point_moving = False
self.setToolTip(daw_strings.sequencer)
self.setCacheMode(QGraphicsView.CacheModeFlag.CacheBackground)
self.setViewportUpdateMode(
QGraphicsView.ViewportUpdateMode.SmartViewportUpdate,
)
self.setOptimizationFlag(
QGraphicsView.OptimizationFlag.DontSavePainterState,
)
if theme.SYSTEM_COLORS.daw.seq_antialiasing:
self.setRenderHint(QPainter.RenderHint.Antialiasing)
else:
self.setOptimizationFlag(
QGraphicsView.OptimizationFlag.DontAdjustForAntialiasing,
)
# The below code is broken on Qt5.3.<=2, so not using it for
# now, but this will obviously be quite desirable some day
# self.opengl_widget = QOpenGLWidget()
# self.surface_format = QSurfaceFormat()
# self.surface_format.setRenderableType(QSurfaceFormat.OpenGL)
# #self.surface_format.setSamples(4)
# #self.surface_format.setSwapInterval(10)
# self.opengl_widget.setFormat(self.surface_format)
# self.setViewport(self.opengl_widget)
self.ignore_moves = False
self.ignore_selection_change = False
self.playback_pos = 0.0
self.playback_pos_orig = 0.0
self.selected_item_strings = set()
self.selected_point_strings = set()
self.clipboard = []
self.automation_points = []
self.sequence_clipboard = None
self.current_atm_point = None
self.atm_select_pos_x = None
self.atm_select_track = None
self.atm_delete = False
self.current_coord = None
self.current_item = None
self.reset_line_lists()
self.h_zoom = 1.0
self.v_zoom = 1.0
self.header_y_pos = 0.0
self.scene = QGraphicsScene(self)
# self.scene.setItemIndexMethod(QGraphicsScene.NoIndex)
self.scene.setItemIndexMethod(
QGraphicsScene.ItemIndexMethod.BspTreeIndex,
)
self.scene.dropEvent = self.sceneDropEvent
self.scene.dragEnterEvent = self.sceneDragEnterEvent
self.scene.dragMoveEvent = self.sceneDragMoveEvent
self.scene.dragLeaveEvent = self.sceneDragLeaveEvent
self.scene.contextMenuEvent = self.sceneContextMenuEvent
self.scene.setBackgroundBrush(
QColor(
theme.SYSTEM_COLORS.daw.seq_background,
)
)
# self.scene.selectionChanged.connect(self.highlight_selected)
self.scene.mouseMoveEvent = self.sceneMouseMoveEvent
self.scene.mouseReleaseEvent = self.sceneMouseReleaseEvent
# self.scene.selectionChanged.connect(self.set_selected_strings)
self.setAcceptDrops(True)
self.setScene(self.scene)
self.scene.installEventFilter(self)
self.audio_items = []
self.track = 0
self.gradient_index = 0
self.playback_px = 0.0
# self.draw_header(0)
self.setAlignment(
QtCore.Qt.AlignmentFlag.AlignTop | QtCore.Qt.AlignmentFlag.AlignLeft
)
self.setDragMode(QGraphicsView.DragMode.RubberBandDrag)
self.is_playing = False
self.reselect_on_stop = []
self.playback_cursor = None
self.setVerticalScrollBarPolicy(
QtCore.Qt.ScrollBarPolicy.ScrollBarAlwaysOff,
)
self.setHorizontalScrollBarPolicy(
QtCore.Qt.ScrollBarPolicy.ScrollBarAlwaysOff,
)
_shared.init()
atm_context_menu.init()
context_menu.init()
self.addAction(_shared.copy_action)
self.addAction(_shared.delete_action)
self.addAction(atm_context_menu.break_atm_action)
self.addAction(atm_context_menu.unbreak_atm_action)
self.addAction(context_menu.cut_action)
self.addAction(context_menu.glue_action)
self.addAction(context_menu.rename_action)
self.addAction(context_menu.transpose_action)
self.addAction(context_menu.unlink_action)
self.addAction(context_menu.unlink_selected_action)
self.addAction(context_menu.unlink_unique_action)
self.context_menu_enabled = True
def eventFilter(self, obj, event):
if event.type() == QtCore.QEvent.Type.GraphicsSceneWheel:
if event.delta() > 20:
up = True
elif event.delta() < -20:
up = False
else:
return False
if event.modifiers() == QtCore.Qt.KeyboardModifier.ControlModifier:
shared.SEQ_WIDGET.inc_hzoom(up)
event.accept()
return True
elif event.modifiers() == QtCore.Qt.KeyboardModifier.ShiftModifier:
shared.SEQ_WIDGET.inc_vzoom(up)
event.accept()
return True
return False
def show_context_menu(self):
if glbl_shared.IS_PLAYING:
return
if not self.context_menu_enabled:
self.context_menu_enabled = True
return
if _shared.SEQUENCE_EDITOR_MODE == 0:
context_menu.MENU.exec(QCursor.pos())
elif _shared.SEQUENCE_EDITOR_MODE == 1:
atm_context_menu.MENU.exec(QCursor.pos())
self.context_menu_enabled = False
def get_item(self, a_pos):
for f_item in self.scene.items(a_pos):
if isinstance(f_item, SequencerItem):
return f_item
return None
def get_point(self, a_pos):
for f_item in self.scene.items(a_pos):
if isinstance(f_item, SeqAtmItem):
return f_item
return None
def check_header(self, a_pos):
for f_item in self.scene.items(a_pos):
if f_item == self.header:
return True
return False
def _mp_item_draw(self, event, pos):
if self.get_item(pos):
QGraphicsView.mousePressEvent(self, event)
return
self.scene.clearSelection()
pos_x = pos.x()
pos_y = pos.y() - _shared.SEQUENCE_EDITOR_HEADER_HEIGHT
f_beat = float(pos_x // _shared.SEQUENCER_PX_PER_BEAT)
f_track = int(pos_y // shared.SEQUENCE_EDITOR_TRACK_HEIGHT)
if (
event.modifiers() == (QtCore.Qt.KeyboardModifier.ControlModifier)
and len(_shared.SEQUENCE_CLIPBOARD) == 1
):
item_ref = _shared.SEQUENCE_CLIPBOARD[0]
f_uid = item_ref.item_uid
length = item_ref.length_beats
else:
f_item_name = "{}-1".format(shared.TRACK_NAMES[f_track])
f_uid = constants.DAW_PROJECT.create_empty_item(
f_item_name,
)
length = _shared.LAST_ITEM_LENGTH
self.draw_item_ref = sequencer_item(
f_track,
f_beat,
length,
f_uid,
)
shared.CURRENT_SEQUENCE.add_item_ref_by_uid(self.draw_item_ref)
self.selected_item_strings = {str(self.draw_item_ref)}
constants.DAW_PROJECT.save_sequence(shared.CURRENT_SEQUENCE)
constants.DAW_PROJECT.commit(_("Add new item ref"))
shared.SEQ_WIDGET.open_sequence()
self._is_drawing = True
self._draw_start_beat = f_beat
def _mp_atm_draw(self, event, pos):
if self.current_coord is None:
return
port, index = shared.TRACK_PANEL.has_automation(
self.current_coord[0],
)
if port is None:
event.accept()
QGraphicsView.mousePressEvent(self, event)
else:
track, beat, val = self.current_coord
beat = _shared.quantize(beat)
point = DawAtmPoint(
beat,
port,
val,
*shared.TRACK_PANEL.get_atm_params(track),
)
shared.ATM_SEQUENCE.add_point(point)
self.scene.clearSelection()
point_item = self.draw_point(point)
point_item.setSelected(True)
self.set_selected_point_strings()
point_item.is_moving = True
self.current_atm_point = point_item
point_pos = point_item.pos()
# Move it under the mouse cursor so the event will propagate to it
point_item.setPos(
pos.x() - (point_item.rect().width() * 0.5),
pos.y() - (point_item.rect().height() * 0.5),
)
QGraphicsView.mousePressEvent(self, event)
# Move it back to it's quantized position
point_item.setPos(point_pos)
def mousePressEvent(self, a_event):
if glbl_shared.IS_PLAYING:
return
f_pos = self.mapToScene(qt_event_pos(a_event))
self.current_coord = self.get_item_coord(f_pos)
self._start_pos = f_pos
if f_pos.x() > self.max_x:
return
if self.check_header(f_pos):
if a_event.button() == QtCore.Qt.MouseButton.LeftButton:
f_beat = int(f_pos.x() / _shared.SEQUENCER_PX_PER_BEAT)
global_set_playback_pos(f_beat)
return
if self.ignore_moves:
self.ignore_moves = False
shared.SEQ_WIDGET.force_hzoom(3)
return
if a_event.button() == QtCore.Qt.MouseButton.RightButton:
if self.current_coord:
if _shared.SEQUENCE_EDITOR_MODE == 0:
self.current_item = self.get_item(f_pos)
if self.current_item and not self.current_item.isSelected():
self.scene.clearSelection()
self.current_item.setSelected(True)
self.selected_item_strings = {
self.current_item.get_selected_string(),
}
self.show_context_menu()
a_event.accept()
QGraphicsView.mousePressEvent(self, a_event)
elif _shared.SEQUENCE_EDITOR_MODE == 0:
self.current_item = self.get_item(f_pos)
self.setDragMode(QGraphicsView.DragMode.RubberBandDrag)
if shared.EDITOR_MODE == shared.EDITOR_MODE_SELECT:
f_item = self.get_item(f_pos)
if not f_item:
a_event.accept()
QGraphicsView.mousePressEvent(self, a_event)
elif shared.EDITOR_MODE == shared.EDITOR_MODE_DRAW:
self._mp_item_draw(a_event, f_pos)
elif shared.EDITOR_MODE == shared.EDITOR_MODE_ERASE:
self.deleted_items = []
sequence_editor_set_delete_mode(True)
a_event.accept()
QGraphicsView.mousePressEvent(self, a_event)
elif shared.EDITOR_MODE == shared.EDITOR_MODE_SPLIT:
QGraphicsView.mousePressEvent(self, a_event)
return
elif _shared.SEQUENCE_EDITOR_MODE == 1:
self.setDragMode(QGraphicsView.DragMode.NoDrag)
self.atm_select_pos_x = None
self.atm_select_track = None
if shared.EDITOR_MODE == shared.EDITOR_MODE_SELECT:
self.current_coord = self.get_item_coord(f_pos, True)
self._is_point_moving = bool(self.get_point(f_pos))
if not self._is_point_moving:
self.scene.clearSelection()
self.atm_select_pos_x = f_pos.x()
self.atm_select_track = self.current_coord[0]
a_event.accept()
QGraphicsView.mousePressEvent(self, a_event)
elif shared.EDITOR_MODE == shared.EDITOR_MODE_ERASE:
self.current_coord = self.get_item_coord(f_pos, True)
self.scene.clearSelection()
self.atm_select_pos_x = f_pos.x()
self.atm_select_track = self.current_coord[0]
self.atm_delete = True
elif shared.EDITOR_MODE == shared.EDITOR_MODE_DRAW:
self._mp_atm_draw(a_event, f_pos)
elif shared.EDITOR_MODE == shared.EDITOR_MODE_SPLIT:
a_event.accept()
QGraphicsView.mousePressEvent(self, a_event)
else:
raise NotImplementedError("Did not expect to hit this else statement")
def _mm_item_draw(self, event):
pos = event.scenePos()
item_pos = QtCore.QPointF(pos.x(), self._start_pos.y())
if self.get_item(item_pos):
return
beat = float(pos.x() // _shared.SEQUENCER_PX_PER_BEAT)
if beat <= self._draw_start_beat:
return
self.draw_item_ref.start_beat = beat
shared.CURRENT_SEQUENCE.add_item_ref_by_uid(
copy.deepcopy(self.draw_item_ref),
)
self.selected_item_strings.add(str(self.draw_item_ref))
constants.DAW_PROJECT.save_sequence(shared.CURRENT_SEQUENCE)
constants.DAW_PROJECT.commit(_("Add new item ref"))
shared.SEQ_WIDGET.open_sequence()
def _mm_atm_select(self, event):
if self.atm_select_pos_x is None:
return
f_pos_x = event.scenePos().x()
f_vals = sorted((f_pos_x, self.atm_select_pos_x))
for f_item in self.get_all_points(self.atm_select_track):
f_item_pos_x = f_item.pos().x()
if f_item_pos_x >= f_vals[0] and f_item_pos_x <= f_vals[1]:
f_item.setSelected(True)
else:
f_item.setSelected(False)
def sceneMouseMoveEvent(self, a_event):
QGraphicsScene.mouseMoveEvent(self.scene, a_event)
if _shared.SEQUENCE_EDITOR_MODE == 0:
if _shared.SEQUENCE_EDITOR_DELETE_MODE:
f_item = self.get_item(a_event.scenePos())
if f_item and not f_item.audio_item in self.deleted_items:
f_item.hide()
self.deleted_items.append(f_item.audio_item)
elif self._is_drawing:
self._mm_item_draw(a_event)
elif _shared.SEQUENCE_EDITOR_MODE == 1:
if shared.EDITOR_MODE == shared.EDITOR_MODE_SELECT:
if not self._is_point_moving:
self._mm_atm_select(a_event)
elif shared.EDITOR_MODE == shared.EDITOR_MODE_ERASE:
self._mm_atm_select(a_event)
def sceneMouseReleaseEvent(self, a_event):
self._is_drawing = False
if _shared.SEQUENCE_EDITOR_MODE == 0:
if _shared.SEQUENCE_EDITOR_DELETE_MODE:
sequence_editor_set_delete_mode(False)
self.scene.clearSelection()
self.selected_item_strings = set()
for f_item in self.deleted_items:
shared.CURRENT_SEQUENCE.remove_item_ref(f_item)
constants.DAW_PROJECT.save_sequence(shared.CURRENT_SEQUENCE)
constants.DAW_PROJECT.commit("Delete sequencer items")
self.open_sequence()
glbl_shared.clean_audio_pool()
else:
QGraphicsScene.mouseReleaseEvent(self.scene, a_event)
elif _shared.SEQUENCE_EDITOR_MODE == 1:
if self.atm_delete:
self.delete_selected_atm(self.atm_select_track)
elif shared.EDITOR_MODE == shared.EDITOR_MODE_DRAW:
if self.current_atm_point:
self.current_atm_point.set_point_value()
self.current_atm_point = None
self.automation_save_callback()
QGraphicsScene.mouseReleaseEvent(self.scene, a_event)
else:
QGraphicsScene.mouseReleaseEvent(self.scene, a_event)
self.atm_select_pos_x = None
self.atm_select_track = None
self.atm_delete = False
def get_item_coord(self, a_pos, a_clip=False):
f_pos_x = a_pos.x()
f_pos_y = a_pos.y() - _shared.SEQUENCE_EDITOR_HEADER_HEIGHT
if a_clip or (
f_pos_x > 0
and f_pos_y > 0
and f_pos_y < _shared.SEQUENCE_EDITOR_TOTAL_HEIGHT
):
f_pos_x = clip_min(f_pos_x, 0.0)
f_pos_y = clip_value(
f_pos_y,
0.0,
_shared.SEQUENCE_EDITOR_TOTAL_HEIGHT,
)
f_track_height = (
shared.SEQUENCE_EDITOR_TRACK_HEIGHT - _shared.ATM_POINT_DIAMETER
)
f_track = int(f_pos_y / shared.SEQUENCE_EDITOR_TRACK_HEIGHT)
f_val = (
1.0
- (
(f_pos_y - (f_track * shared.SEQUENCE_EDITOR_TRACK_HEIGHT))
/ f_track_height
)
) * 127.0
f_beat = f_pos_x / _shared.SEQUENCER_PX_PER_BEAT
return f_track, round(f_beat, 6), round(f_val, 6)
else:
return None
def delete_selected_atm(self, a_track):
_shared.copy_selected()
f_selected = list(self.get_selected_points(a_track))
self.scene.clearSelection()
self.selected_point_strings = set()
for f_point in f_selected:
self.automation_points.remove(f_point)
shared.ATM_SEQUENCE.remove_point(f_point.item)
self.automation_save_callback()
def get_selected_items(self):
return [x for x in self.audio_items if x.isSelected()]
def restore_selected(self):
for item in self.audio_items:
if item.get_selected_string() in self.selected_item_strings:
item.setSelected(True)
def set_selected_strings(self):
if self.ignore_selection_change:
return
self.selected_item_strings = {
x.get_selected_string() for x in self.get_selected_items()
}
def clear_selected_item_strings(self):
self.selected_item_strings.clear()
def set_selected_point_strings(self):
self.selected_point_strings = {str(x.item) for x in self.get_selected_points()}
def get_all_points(self, a_track=None):
f_dict = shared.TRACK_PANEL.plugin_uid_map
if a_track is None:
for f_point in self.automation_points:
yield f_point
else:
a_track = int(a_track)
for f_point in self.automation_points:
if f_dict[f_point.item.index] == a_track:
yield f_point
def get_selected_points(self, a_track=None):
f_dict = shared.TRACK_PANEL.plugin_uid_map
if a_track is None:
for f_point in self.automation_points:
if f_point.isSelected():
yield f_point
else:
a_track = int(a_track)
for f_point in self.automation_points:
if f_dict[f_point.item.index] == a_track and f_point.isSelected():
yield f_point
def atm_select_all(self, track_num):
_dict = shared.TRACK_PANEL.plugin_uid_map
for point in self.automation_points:
if _dict[point.item.index] == track_num:
point.setSelected(True)
def atm_select_left(self, track_num, beat):
_dict = shared.TRACK_PANEL.plugin_uid_map
for point in self.automation_points:
if _dict[point.item.index] == track_num and point.item.beat < beat:
point.setSelected(True)
def atm_select_right(self, track_num, beat):
_dict = shared.TRACK_PANEL.plugin_uid_map
for point in self.automation_points:
if _dict[point.item.index] == track_num and point.item.beat >= beat:
point.setSelected(True)
def open_sequence(self):
if _shared.SEQUENCE_EDITOR_MODE == 0:
shared.SEQUENCER.setDragMode(QGraphicsView.DragMode.NoDrag)
elif _shared.SEQUENCE_EDITOR_MODE == 1:
shared.SEQUENCER.setDragMode(
QGraphicsView.DragMode.RubberBandDrag,
)
self.enabled = False
shared.ATM_SEQUENCE = constants.DAW_PROJECT.get_atm_sequence()
f_items_dict = constants.DAW_PROJECT.get_items_dict()
f_scrollbar = self.horizontalScrollBar()
f_scrollbar_value = f_scrollbar.value()
self.setUpdatesEnabled(False)
self.clear_drawn_items()
self.ignore_selection_change = True
# , key=lambda x: x.bar_num,
_shared.CACHED_SEQ_LEN = get_current_sequence_length()
for f_item in sorted(shared.CURRENT_SEQUENCE.items, reverse=True):
if f_item.start_beat < get_current_sequence_length():
f_item_name = f_items_dict.get_name_by_uid(f_item.item_uid)
f_new_item = self.draw_item(f_item_name, f_item)
if f_new_item.get_selected_string() in self.selected_item_strings:
f_new_item.setSelected(True)
self.ignore_selection_change = False
if _shared.SEQUENCE_EDITOR_MODE == 1:
self.open_atm_sequence()
shared.TRACK_PANEL.update_ccs_in_use()
f_scrollbar.setValue(int(f_scrollbar_value))
self.setUpdatesEnabled(True)
self.update()
self.enabled = True
shared.ITEMLIST.open()
def open_atm_sequence(self):
self.atm_paths = {}
for f_track in shared.TRACK_PANEL.tracks:
f_port, f_index = shared.TRACK_PANEL.has_automation(f_track)
if f_port is not None:
points = shared.ATM_SEQUENCE.get_points(f_index, f_port)
if points:
point_items = [self.draw_point(x) for x in points]
self.draw_atm_lines(f_track, point_items)
def draw_atm_lines(self, a_track_num, a_points):
plugin_uid = a_points[0].item.index
path = QPainterPath()
point = a_points[0]
pos = point.scenePos()
x = pos.x() + _shared.ATM_POINT_RADIUS
y = pos.y() + _shared.ATM_POINT_RADIUS
path.moveTo(0.0, y)
path.lineTo(x, y)
break_after = point.item.break_after
for point in a_points[1:]:
pos = point.scenePos()
x = pos.x() + _shared.ATM_POINT_RADIUS
y = pos.y() + _shared.ATM_POINT_RADIUS
if break_after:
path.moveTo(x, y)
else:
path.lineTo(x, y)
break_after = point.item.break_after
if not break_after:
path.lineTo(self.sceneRect().right(), y)
path_item = QGraphicsPathItem(path)
path_item.setPen(
QColor(
theme.SYSTEM_COLORS.daw.seq_atm_line,
),
)
# path_item.setBrush(QtCore.Qt.GlobalColor.white)
self.scene.addItem(path_item)
self.atm_paths[plugin_uid] = path_item
def remove_atm_path(self, a_plugin_uid):
if a_plugin_uid in self.atm_paths:
self.scene.removeItem(self.atm_paths.pop(a_plugin_uid))
def reset_line_lists(self):
self.text_list = []
self.beat_line_list = []
def prepare_to_quit(self):
self.scene.clearSelection()
self.scene.clear()
def keyPressEvent(self, a_event):
# Done this way to prevent the sequence editor from grabbing the key
if a_event.key() == QtCore.Qt.Key.Key_Delete:
self.delete_selected()
else:
QGraphicsView.keyPressEvent(self, a_event)
QApplication.restoreOverrideCursor()
def set_header_y_pos(self, a_y=None):
if a_y is not None:
self.header_y_pos = a_y
self.header.setPos(0.0, self.header_y_pos - 2.0)
def get_selected(self):
return [x for x in self.audio_items if x.isSelected()]
def resizeEvent(self, a_event):
QGraphicsView.resizeEvent(self, a_event)
def sceneContextMenuEvent(self, a_event):
if glbl_shared.IS_PLAYING:
return
QGraphicsScene.contextMenuEvent(self.scene, a_event)
self.show_context_menu()
def highlight_selected(self):
self.setUpdatesEnabled(False)
self.has_selected = False
if _shared.SEQUENCE_EDITOR_MODE == 0:
for f_item in self.audio_items:
f_item.set_brush()
self.has_selected = True
elif _shared.SEQUENCE_EDITOR_MODE == 1:
for f_item in self.get_all_points():
f_item.set_brush()
self.has_selected = True
self.setUpdatesEnabled(True)
self.update()
def sceneDragEnterEvent(self, a_event):
a_event.setAccepted(True)
self.last_drag_item = None
self.silhouette = Silhouette()
self.scene.addItem(self.silhouette)
self.silhouette.quantize(a_event.scenePos())
def sceneDragMoveEvent(self, a_event):
self.silhouette.quantize(a_event.scenePos())
a_event.setDropAction(QtCore.Qt.DropAction.CopyAction)
item = self.get_item(a_event.scenePos())
if item and item != self.last_drag_item:
self.silhouette.hide()
item.set_brush(override=True)
if self.last_drag_item:
self.last_drag_item.set_brush()
self.last_drag_item = item
elif not item:
self.silhouette.show()
if self.last_drag_item:
self.last_drag_item.set_brush()
def sceneDragLeaveEvent(self, a_event):
self.silhouette.hide()
if self.last_drag_item:
self.last_drag_item.set_brush()
QGraphicsScene.dragLeaveEvent(self.scene, a_event)
def check_running(self):
if glbl_shared.IS_PLAYING:
return True
return False
def sceneDropEvent(self, a_event):
self.silhouette.hide()
LOG.info(
[
shared.AUDIO_ITEMS_TO_DROP,
shared.MIDI_FILES_TO_DROP,
shared.ITEM_TO_DROP,
]
)
f_pos = a_event.scenePos()
if shared.AUDIO_ITEMS_TO_DROP:
self.add_audio_items(f_pos, shared.AUDIO_ITEMS_TO_DROP)
elif shared.ITEM_TO_DROP:
self.add_existing_item(a_event, shared.ITEM_TO_DROP._uid)
elif shared.MIDI_FILES_TO_DROP:
if len(shared.MIDI_FILES_TO_DROP) != 1:
QMessageBox.warning(
None,
"Error",
"Only one MIDI file can be dropped at a time",
)
shared.clear_seq_drop()
return
midi_path = shared.MIDI_FILES_TO_DROP[0]
beat, track_index = _shared.pos_to_beat_and_track(f_pos)
midi_file_dialog(midi_path, beat, track_index)
shared.clear_seq_drop()
def replace_item(self, item, uid):
item.audio_item.item_uid = uid
constants.DAW_PROJECT.save_sequence(
shared.CURRENT_SEQUENCE,
a_notify=True,
)
constants.DAW_PROJECT.commit("Added sequencer item")
shared.SEQ_WIDGET.open_sequence()
def replace_all_instances_of_item(self, item_uid, uid):
for item in shared.CURRENT_SEQUENCE.items:
if item.item_uid == item_uid:
item.item_uid = uid
constants.DAW_PROJECT.save_sequence(
shared.CURRENT_SEQUENCE,
a_notify=True,
)
constants.DAW_PROJECT.commit("Added sequencer item")
shared.SEQ_WIDGET.open_sequence()
def add_existing_item(self, event, uid):
if self.check_running():
return
self.set_selected_strings()
scene_pos = event.scenePos()
override_item = self.get_item(scene_pos)
if override_item:
menu = QMenu()
action = menu.addAction("Replace this item")
action.triggered.connect(lambda: self.replace_item(override_item, uid))
action = menu.addAction(
"Replace all instances of this item",
)
action.triggered.connect(
lambda: self.replace_all_instances_of_item(
override_item.audio_item.item_uid,
uid,
)
)
menu.exec(QCursor.pos())
return
beat, track = _shared.pos_to_beat_and_track(scene_pos)
item = constants.DAW_PROJECT.get_item_by_uid(uid)
refs = {x for x in shared.CURRENT_SEQUENCE.items if x.item_uid == uid}
if refs:
length = max(x.length_beats for x in refs)
else:
length = item.get_length(
shared.CURRENT_SEQUENCE.get_tempo_at_pos(beat),
)
length = round(length + 0.49)
if length < 1.0:
length = 4.0
item_ref = sequencer_item(track, beat, length, uid)
shared.CURRENT_SEQUENCE.add_item_ref_by_uid(item_ref)
# item_lib.save_item_by_uid(uid, item)
constants.DAW_PROJECT.save_sequence(
shared.CURRENT_SEQUENCE,
a_notify=True,
)
constants.DAW_PROJECT.commit("Added sequencer item")
shared.SEQ_WIDGET.open_sequence()
def add_audio_items(self, a_pos, a_item_list, a_single_item=None):
if self.check_running():
return
if a_single_item is None:
if len(a_item_list) > 1:
menu = QMenu()
multi_action = menu.addAction("Add each file to it's own track")
multi_action.triggered.connect(
lambda: self.add_audio_items(a_pos, a_item_list, False)
)
single_action = menu.addAction("Add all files to one item on one track")
single_action.triggered.connect(
lambda: self.add_audio_items(a_pos, a_item_list, True)
)
menu.exec(QCursor.pos())
return
else:
a_single_item = True
glbl_shared.APP.setOverrideCursor(QtCore.Qt.CursorShape.WaitCursor)
f_beat_frac, f_track_num = _shared.pos_to_beat_and_track(a_pos)
f_seconds_per_beat = 60.0 / shared.CURRENT_SEQUENCE.get_tempo_at_pos(
f_beat_frac
)
f_restart = False
if a_single_item:
lane_num = 0
f_item_name = "{}-1".format(shared.TRACK_NAMES[f_track_num])
f_item_uid = constants.DAW_PROJECT.create_empty_item(f_item_name)
f_items = constants.DAW_PROJECT.get_item_by_uid(f_item_uid)
f_item_ref = sequencer_item(
f_track_num,
f_beat_frac,
1.0,
f_item_uid,
)
for f_file_name in a_item_list:
glbl_shared.APP.processEvents()
f_file_name_str = str(f_file_name)
f_item_name = os.path.basename(f_file_name_str)
if f_file_name_str:
if not a_single_item:
f_item_uid = constants.DAW_PROJECT.create_empty_item(f_item_name)
f_items = constants.DAW_PROJECT.get_item_by_uid(f_item_uid)
f_index = f_items.get_next_index()
if f_index == -1:
QMessageBox.warning(
self,
_("Error"),
_(
"No more available audio item slots, "
"max per sequence is {}"
).format(MAX_AUDIO_ITEM_COUNT),
)
break
f_uid = constants.PROJECT.get_wav_uid_by_name(f_file_name_str)
f_graph = constants.PROJECT.get_sample_graph_by_uid(f_uid)
f_delta = datetime.timedelta(seconds=f_graph.length_in_seconds)
if not f_restart and glbl_shared.add_entropy(f_delta):
f_restart = True
f_length = math.ceil(f_graph.length_in_seconds / f_seconds_per_beat)
if a_single_item:
f_item = DawAudioItem(
f_uid,
a_start_bar=0,
a_start_beat=0.0,
a_lane_num=lane_num,
)
lane_num += 1
f_items.add_item(f_index, f_item)
if f_length > f_item_ref.length_beats:
f_item_ref.length_beats = f_length
else:
f_item_ref = sequencer_item(
f_track_num,
f_beat_frac,
f_length,
f_item_uid,
)
shared.CURRENT_SEQUENCE.add_item_ref_by_uid(f_item_ref)
f_item = DawAudioItem(
f_uid,
a_start_bar=0,
a_start_beat=0.0,
a_lane_num=0,
)
f_items.add_item(f_index, f_item)
item_lib.save_item_by_uid(f_item_uid, f_items)
f_track_num += 1
if f_track_num >= TRACK_COUNT_ALL:
break
if a_single_item:
shared.CURRENT_SEQUENCE.add_item_ref_by_uid(f_item_ref)
item_lib.save_item_by_uid(f_item_uid, f_items)
constants.DAW_PROJECT.save_sequence(
shared.CURRENT_SEQUENCE,
a_notify=not f_restart,
)
constants.DAW_PROJECT.commit("Added audio items")
shared.SEQ_WIDGET.open_sequence()
self.last_open_dir = os.path.dirname(f_file_name_str)
if f_restart:
glbl_shared.restart_engine()
glbl_shared.APP.restoreOverrideCursor()
def get_beat_value(self):
return self.playback_pos
def set_playback_pos(self, a_beat=0.0):
f_right = self.sceneRect().right()
self.playback_pos = float(a_beat)
f_pos = self.playback_pos * _shared.SEQUENCER_PX_PER_BEAT
if f_pos > f_right:
return
self.playback_cursor.setPos(f_pos, 0.0)
if glbl_shared.IS_PLAYING and shared.SEQ_WIDGET.follow_checkbox.isChecked():
f_port_rect = self.viewport().rect()
f_rect = self.mapToScene(f_port_rect).boundingRect()
if not (f_pos > f_rect.left() and f_pos < f_rect.right()):
f_pos = int(self.playback_pos) * _shared.SEQUENCER_PX_PER_BEAT
shared.SEQ_WIDGET.scrollbar.setValue(int(f_pos))
def start_playback(self):
self.setInteractive(False)
self.playback_pos_orig = self.playback_pos
if _shared.SEQUENCE_EDITOR_MODE == 0:
self.set_selected_strings()
elif _shared.SEQUENCE_EDITOR_MODE == 1:
self.set_selected_point_strings()
def set_playback_clipboard(self):
self.reselect_on_stop = []
for f_item in self.audio_items:
if f_item.isSelected():
self.reselect_on_stop.append(str(f_item.audio_item))
def stop_playback(self):
self.reset_selection()
global_set_playback_pos(self.playback_pos_orig)
self.setInteractive(True)
def reset_selection(self):
for f_item in self.audio_items:
if str(f_item.audio_item) in self.reselect_on_stop:
f_item.setSelected(True)
def set_zoom(self, a_scale):
self.h_zoom = a_scale
self.update_zoom()
def set_v_zoom(self, a_scale):
self.v_zoom = a_scale
self.update_zoom()
def update_zoom(self):
pass
# set_SEQUENCER_zoom(self.h_zoom, self.v_zoom)
def header_click_event(self, a_event):
if (
not glbl_shared.IS_PLAYING
and a_event.button() != QtCore.Qt.MouseButton.RightButton
):
f_beat = int(a_event.scenePos().x() / _shared.SEQUENCER_PX_PER_BEAT)
global_set_playback_pos(f_beat)
def check_line_count(self):
"""Check that there are not too many vertical
lines on the screen
"""
f_num_count = len(self.text_list)
if f_num_count == 0:
return
view_pct = float(self.width()) / float(self.max_x)
f_num_visible_count = int(f_num_count * view_pct)
if f_num_visible_count > 24:
for f_line in self.beat_line_list:
f_line.setVisible(False)
f_factor = f_num_visible_count // 24
if f_factor == 1:
for f_num in self.text_list:
f_num.setVisible(True)
else:
f_factor = int(round(f_factor / 2.0) * 2)
for f_num in self.text_list:
f_num.setVisible(False)
for f_num in self.text_list[::f_factor]:
f_num.setVisible(True)
else:
for f_line in self.beat_line_list:
f_line.setVisible(True)
for f_num in self.text_list:
f_num.setVisible(True)
def get_region_items(self):
f_sequence_start = shared.CURRENT_SEQUENCE.loop_marker.start_beat
f_sequence_end = shared.CURRENT_SEQUENCE.loop_marker.beat
f_result = []
for f_item in self.audio_items:
f_seq_item = f_item.audio_item
f_item_start = f_seq_item.start_beat
f_item_end = f_item_start + f_seq_item.length_beats
if f_item_start >= f_sequence_start and f_item_end <= f_sequence_end:
f_result.append(f_item)
return f_result
def select_region_items(self):
self.scene.clearSelection()
for f_item in self.get_region_items():
f_item.setSelected(True)
self.set_selected_strings()
def select_all(self):
for item in self.audio_items:
item.setSelected(True)
self.set_selected_strings()
def select_start_right(self):
for item in self.audio_items:
seq_item = item.audio_item
start = seq_item.start_beat
if start >= self.header_event_pos:
item.setSelected(True)
else:
item.setSelected(False)
self.set_selected_strings()
def select_end_right(self):
for item in self.audio_items:
seq_item = item.audio_item
start = seq_item.start_beat
end = start + seq_item.length_beats
if end > self.header_event_pos:
item.setSelected(True)
else:
item.setSelected(False)
self.set_selected_strings()
def select_start_left(self):
beat = self.header_event_pos
for item in self.audio_items:
seq_item = item.audio_item
start = seq_item.start_beat
if start < self.header_event_pos:
item.setSelected(True)
else:
item.setSelected(False)
self.set_selected_strings()
def select_end_left(self):
beat = self.header_event_pos
for item in self.audio_items:
seq_item = item.audio_item
start = seq_item.start_beat
end = start + seq_item.length_beats
if end <= self.header_event_pos:
item.setSelected(True)
else:
item.setSelected(False)
self.set_selected_strings()
def track_select_all(self):
track, beat, val = self.current_coord
for item in self.audio_items:
seq_item = item.audio_item
if seq_item.track_num == track:
item.setSelected(True)
self.set_selected_strings()
def track_select_left(self):
track, beat, val = self.current_coord
for item in self.audio_items:
seq_item = item.audio_item
start = seq_item.start_beat
if start <= beat and seq_item.track_num == track:
item.setSelected(True)
self.set_selected_strings()
def track_select_right(self):
track, beat, val = self.current_coord
for item in self.audio_items:
seq_item = item.audio_item
start = seq_item.start_beat
if start >= beat and seq_item.track_num == track:
item.setSelected(True)
self.set_selected_strings()
def get_loop_pos(self, a_warn=True):
if self.loop_start is None:
if a_warn:
QMessageBox.warning(
self,
_("Error"),
_(
"You must set the sequence markers first by "
"right-clicking on the scene header"
),
)
return None
else:
return self.loop_start, self.loop_end
def draw_header(self, a_cursor_pos=None):
self.loop_start = self.loop_end = None
f_sequence_length = get_current_sequence_length()
f_size = _shared.SEQUENCER_PX_PER_BEAT * f_sequence_length
self.max_x = f_size
self.setSceneRect(
-3.0,
0.0,
f_size + self.width() + 3.0,
float(_shared.SEQUENCE_EDITOR_TOTAL_HEIGHT),
)
self.header = QGraphicsRectItem(
0.0,
0.0,
float(f_size),
float(_shared.SEQUENCE_EDITOR_HEADER_HEIGHT),
)
self.header.setToolTip(
"The project timeline. Left click to set the playback cursor,\n"
"right click to set region, tempo, or text markers"
)
self.header.setZValue(1500.0)
self.header.setBrush(
QColor(
theme.SYSTEM_COLORS.daw.seq_header,
)
)
self.header.mousePressEvent = self.header_click_event
self.header.contextMenuEvent = header_context_menu.show
self.scene.addItem(self.header)
text_item = None
last_tsig = None
start_num = 1
for f_marker in shared.CURRENT_SEQUENCE.get_markers():
if f_marker.type == 1: # Loop/Region
self.loop_start = f_marker.start_beat
self.loop_end = f_marker.beat
f_x = f_marker.start_beat * _shared.SEQUENCER_PX_PER_BEAT
width = (
f_marker.beat - f_marker.start_beat
) * _shared.SEQUENCER_PX_PER_BEAT
region = QGraphicsRectItem(
float(f_x),
0.0,
float(width),
float(_shared.SEQUENCE_EDITOR_HEADER_HEIGHT),
self.header,
)
region.setZValue(0.0)
region_brush = QColor(
theme.SYSTEM_COLORS.daw.seq_header_region,
)
region.setBrush(region_brush)
elif f_marker.type == 2: # Tempo
f_text = "{} : {}/{}".format(
f_marker.tempo,
f_marker.tsig_num,
f_marker.tsig_den,
)
item = QGraphicsEllipseItem(
0.0,
0.0,
12.0,
12.0,
self.header,
)
item.setZValue(1.0)
item.setBrush(
QColor(
theme.SYSTEM_COLORS.daw.seq_tempo_marker,
)
)
item.setPos(
f_marker.beat * _shared.SEQUENCER_PX_PER_BEAT,
_shared.SEQUENCE_EDITOR_HEADER_ROW_HEIGHT + 3.0,
)
# Hide the previous tempo marker's text if it overlaps this
# tempo marker
if text_item and text_item.sceneBoundingRect().intersects(
item.sceneBoundingRect(),
):
text_item.hide()
text_item = get_font().QGraphicsSimpleTextItem(
f_text,
self.header,
)
tooltip = "Tempo: {} BPM\nTime Signature: {}/{}".format(
f_marker.tempo,
f_marker.tsig_num,
f_marker.tsig_den,
)
item.setToolTip(tooltip, reformat=False)
text_item.setZValue(1.0)
text_item.setBrush(
QColor(
theme.SYSTEM_COLORS.daw.seq_header_text,
),
)
text_item.setPos(
(f_marker.beat * _shared.SEQUENCER_PX_PER_BEAT) + 12.0,
_shared.SEQUENCE_EDITOR_HEADER_ROW_HEIGHT,
)
item.mousePressEvent = header_context_menu.TempoMarkerEvent(
f_marker.beat,
).mouse_press
tsig = (f_marker.tsig_num, f_marker.tsig_den)
if tsig != last_tsig:
start_num = 1
last_tsig = tsig
self.draw_sequence(f_marker, start_num)
start_num += f_marker.length
elif f_marker.type == 3: # Text
f_item = get_font().QGraphicsSimpleTextItem(
f_marker.text,
self.header,
)
f_item.setZValue(1.0)
f_item.setBrush(
QColor(
theme.SYSTEM_COLORS.daw.seq_header_text,
),
)
f_item.setPos(
f_marker.beat * _shared.SEQUENCER_PX_PER_BEAT,
_shared.SEQUENCE_EDITOR_HEADER_ROW_HEIGHT * 2,
)
else:
assert False, "Invalid marker type"
f_total_height = (
_shared.SEQUENCE_EDITOR_TRACK_COUNT * shared.SEQUENCE_EDITOR_TRACK_HEIGHT
) + _shared.SEQUENCE_EDITOR_HEADER_HEIGHT
playback_cursor_pen = QPen(
QColor(
theme.SYSTEM_COLORS.widgets.playback_cursor,
),
2.0,
)
self.playback_cursor = self.scene.addLine(
0.0,
0.0,
0.0,
f_total_height,
playback_cursor_pen,
)
self.playback_cursor.setZValue(1000.0)
self.set_playback_pos(self.playback_pos)
self.check_line_count()
self.set_header_y_pos()
def draw_sequence(self, a_marker, start_num: int):
f_sequence_length = get_current_sequence_length()
f_size = _shared.SEQUENCER_PX_PER_BEAT * f_sequence_length
bar_pen = QPen(
QColor(
theme.SYSTEM_COLORS.daw.seq_bar_line,
)
)
f_beat_pen = QPen(
QColor(
theme.SYSTEM_COLORS.daw.seq_beat_line,
),
)
f_16th_pen = QPen(
QColor(
theme.SYSTEM_COLORS.daw.seq_16th_line,
),
)
track_pen = QPen(
QColor(
theme.SYSTEM_COLORS.daw.seq_track_line,
),
)
f_total_height = (
_shared.SEQUENCE_EDITOR_TRACK_COUNT * shared.SEQUENCE_EDITOR_TRACK_HEIGHT
) + _shared.SEQUENCE_EDITOR_HEADER_HEIGHT
f_x_offset = a_marker.beat * _shared.SEQUENCER_PX_PER_BEAT
i3 = f_x_offset
number_brush = QColor(
theme.SYSTEM_COLORS.daw.seq_header_text,
)
for i in range(start_num, start_num + int(a_marker.length)):
if i % a_marker.tsig_num == 1:
f_number = get_font().QGraphicsSimpleTextItem(
str((i // a_marker.tsig_num) + 1),
self.header,
)
f_number.setFlag(
QGraphicsItem.GraphicsItemFlag.ItemIgnoresTransformations,
)
f_number.setBrush(number_brush)
f_number.setZValue(1000.0)
self.text_list.append(f_number)
if shared.SEQ_WIDGET.last_hzoom >= 3:
self.scene.addLine(
i3,
0.0,
i3,
f_total_height,
bar_pen,
)
f_number.setPos(i3 + 3.0, 2)
if _shared.SEQ_LINES_ENABLED and _shared.DRAW_SEQUENCER_GRAPHS:
for f_i4 in range(1, _shared.SEQ_SNAP_RANGE):
f_sub_x = i3 + (_shared.SEQUENCER_QUANTIZE_PX * f_i4)
f_line = self.scene.addLine(
f_sub_x,
_shared.SEQUENCE_EDITOR_HEADER_HEIGHT,
f_sub_x,
f_total_height,
f_16th_pen,
)
self.beat_line_list.append(f_line)
elif _shared.DRAW_SEQUENCER_GRAPHS:
f_beat_x = i3
f_line = self.scene.addLine(
f_beat_x,
0.0,
f_beat_x,
f_total_height,
f_beat_pen,
)
self.beat_line_list.append(f_line)
if _shared.SEQ_LINES_ENABLED:
for f_i4 in range(1, _shared.SEQ_SNAP_RANGE):
f_sub_x = f_beat_x + (_shared.SEQUENCER_QUANTIZE_PX * f_i4)
f_line = self.scene.addLine(
f_sub_x,
_shared.SEQUENCE_EDITOR_HEADER_HEIGHT,
f_sub_x,
f_total_height,
f_16th_pen,
)
self.beat_line_list.append(f_line)
i3 += _shared.SEQUENCER_PX_PER_BEAT
self.scene.addLine(
i3,
_shared.SEQUENCE_EDITOR_HEADER_HEIGHT,
i3,
f_total_height,
bar_pen,
)
for i2 in range(_shared.SEQUENCE_EDITOR_TRACK_COUNT):
f_y = (
shared.SEQUENCE_EDITOR_TRACK_HEIGHT * (i2 + 1)
) + _shared.SEQUENCE_EDITOR_HEADER_HEIGHT
line = self.scene.addLine(
f_x_offset,
f_y,
f_size,
f_y,
track_pen,
)
line.setZValue(1000.0)
def clear_drawn_items(self):
self.reset_line_lists()
self.audio_items = []
self.automation_points = []
self.ignore_selection_change = True
self.scene.clear()
self.ignore_selection_change = False
self.draw_header()
def draw_item(self, a_name, a_item):
f_item = SequencerItem(
a_name,
a_item,
draw_handle=shared.SEQ_WIDGET.last_hzoom >= 3,
)
self.audio_items.append(f_item)
self.scene.addItem(f_item)
return f_item
def draw_point(self, a_point):
if a_point.index not in shared.TRACK_PANEL.plugin_uid_map:
LOG.info(
"{} not in {}".format(a_point.index, shared.TRACK_PANEL.plugin_uid_map)
)
return
f_track = shared.TRACK_PANEL.plugin_uid_map[a_point.index]
f_min = (
f_track * shared.SEQUENCE_EDITOR_TRACK_HEIGHT
) + _shared.SEQUENCE_EDITOR_HEADER_HEIGHT
f_max = f_min + shared.SEQUENCE_EDITOR_TRACK_HEIGHT - _shared.ATM_POINT_DIAMETER
f_item = SeqAtmItem(
a_point,
self.automation_save_callback,
f_min,
f_max,
)
self.scene.addItem(f_item)
f_item.setPos(self.get_pos_from_point(a_point))
self.automation_points.append(f_item)
if str(a_point) in self.selected_point_strings:
f_item.setSelected(True)
return f_item
def get_pos_from_point(self, a_point):
f_track_height = (
shared.SEQUENCE_EDITOR_TRACK_HEIGHT - _shared.ATM_POINT_DIAMETER
)
f_track = shared.TRACK_PANEL.plugin_uid_map[a_point.index]
return QtCore.QPointF(
float(a_point.beat * _shared.SEQUENCER_PX_PER_BEAT),
float(
(f_track_height * (1.0 - (a_point.cc_val / 127.0)))
+ (shared.SEQUENCE_EDITOR_TRACK_HEIGHT * f_track)
+ _shared.SEQUENCE_EDITOR_HEADER_HEIGHT
),
)
def automation_save_callback(self, a_open=True):
constants.DAW_PROJECT.save_atm_sequence(shared.ATM_SEQUENCE)
if a_open:
self.open_sequence()
|
commands | reset | # -*- coding: utf-8 -*-
from os import path
from dbsettings.models import Setting
from django.apps import apps
from django.contrib.auth import get_user_model
from django.core.cache import cache
from django.core.management import call_command
from django.core.management.base import BaseCommand, CommandError
from django.core.management.commands.flush import Command as Flush
from .fake import Command as Fake
from .migrate import Command as Migrate
class Command(BaseCommand):
help = "Reapplies core migrations and generates fake data."
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
self.UserModel = get_user_model()
self.username_field = self.UserModel._meta.get_field(
self.UserModel.USERNAME_FIELD
)
# Disable system checks for reset.
self.requires_system_checks = False
def add_arguments(self, parser):
Migrate().add_arguments(parser)
Fake().add_arguments(parser)
def handle(self, *args, **options):
verbosity = options["verbosity"]
# Flush all existing database records.
flush = Flush()
flush.handle(**options)
if verbosity > 0:
self.stdout.write(self.style.SUCCESS("Database flushed."))
# Remove all site-wide settings.
Setting.objects.all().delete()
# Run migrations for all Baby Buddy apps.
for config in apps.app_configs.values():
if path.split(path.split(config.path)[0])[1] == "babybuddy":
migrate = Migrate()
options["app_label"] = config.name
options["migration_name"] = "zero"
try:
migrate.handle(*args, **options)
except CommandError:
# Ignore apps without migrations.
pass
# Run other migrations.
migrate = Migrate()
options["app_label"] = None
options["migration_name"] = None
migrate.handle(*args, **options)
# Clear cache.
cache.clear()
if verbosity > 0:
self.stdout.write(self.style.SUCCESS("Cache cleared."))
# Populate database with fake data.
fake = Fake()
fake.handle(*args, **options)
if verbosity > 0:
self.stdout.write(self.style.SUCCESS("Database reset complete."))
|
translators | translating_nodes | from __future__ import unicode_literals
import random
import six
from .friendly_nodes import *
from .pyjsparserdata import *
if six.PY3:
from functools import reduce
xrange = range
unicode = str
# number of characters above which expression will be split to multiple lines in order to avoid python parser stack overflow
# still experimental so I suggest to set it to 400 in order to avoid common errors
# set it to smaller value only if you have problems with parser stack overflow
LINE_LEN_LIMIT = (
400 # 200 # or any other value - the larger the smaller probability of errors :)
)
class ForController:
def __init__(self):
self.inside = [False]
self.update = ""
def enter_for(self, update):
self.inside.append(True)
self.update = update
def leave_for(self):
self.inside.pop()
def enter_other(self):
self.inside.append(False)
def leave_other(self):
self.inside.pop()
def is_inside(self):
return self.inside[-1]
class InlineStack:
NAME = "PyJs_%s_%d_"
def __init__(self):
self.reps = {}
self.names = []
def inject_inlines(self, source):
for lval in self.names: # first in first out! Its important by the way
source = inject_before_lval(source, lval, self.reps[lval])
return source
def require(self, typ):
name = self.NAME % (typ, len(self.names))
self.names.append(name)
return name
def define(self, name, val):
self.reps[name] = val
def reset(self):
self.rel = {}
self.names = []
class ContextStack:
def __init__(self):
self.to_register = set([])
self.to_define = {}
def reset(self):
self.to_register = set([])
self.to_define = {}
def register(self, var):
self.to_register.add(var)
def define(self, name, code):
self.to_define[name] = code
self.register(name)
def get_code(self):
code = "var.registers([%s])\n" % ", ".join(repr(e) for e in self.to_register)
for name, func_code in six.iteritems(self.to_define):
code += func_code
return code
def clean_stacks():
global Context, inline_stack
Context = ContextStack()
inline_stack = InlineStack()
def to_key(literal_or_identifier):
"""returns string representation of this object"""
if literal_or_identifier["type"] == "Identifier":
return literal_or_identifier["name"]
elif literal_or_identifier["type"] == "Literal":
k = literal_or_identifier["value"]
if isinstance(k, float):
return unicode(float_repr(k))
elif "regex" in literal_or_identifier:
return compose_regex(k)
elif isinstance(k, bool):
return "true" if k else "false"
elif k is None:
return "null"
else:
return unicode(k)
def trans(ele, standard=False):
"""Translates esprima syntax tree to python by delegating to appropriate translating node"""
try:
node = globals().get(ele["type"])
if not node:
raise NotImplementedError("%s is not supported!" % ele["type"])
if standard:
node = node.__dict__["standard"] if "standard" in node.__dict__ else node
return node(**ele)
except:
# print ele
raise
def limited(func):
"""Decorator limiting resulting line length in order to avoid python parser stack overflow -
If expression longer than LINE_LEN_LIMIT characters then it will be moved to upper line
USE ONLY ON EXPRESSIONS!!!"""
def f(standard=False, **args):
insert_pos = len(
inline_stack.names
) # in case line is longer than limit we will have to insert the lval at current position
# this is because calling func will change inline_stack.
# we cant use inline_stack.require here because we dont know whether line overflows yet
res = func(**args)
if len(res) > LINE_LEN_LIMIT:
name = inline_stack.require("LONG")
inline_stack.names.pop()
inline_stack.names.insert(insert_pos, name)
res = "def %s(var=var):\n return %s\n" % (name, res)
inline_stack.define(name, res)
return name + "()"
else:
return res
f.__dict__["standard"] = func
return f
# ==== IDENTIFIERS AND LITERALS =======
inf = float("inf")
def Literal(type, value, raw, regex=None):
if regex: # regex
return "JsRegExp(%s)" % repr(compose_regex(value))
elif value is None: # null
return 'var.get(u"null")'
# Todo template
# String, Bool, Float
return "Js(%s)" % repr(value) if value != inf else 'Js(float("inf"))'
def Identifier(type, name):
return "var.get(%s)" % repr(name)
@limited
def MemberExpression(type, computed, object, property):
far_left = trans(object)
if computed: # obj[prop] type accessor
# may be literal which is the same in every case so we can save some time on conversion
if property["type"] == "Literal":
prop = repr(to_key(property))
else: # worst case
prop = trans(property)
else: # always the same since not computed (obj.prop accessor)
prop = repr(to_key(property))
return far_left + ".get(%s)" % prop
def ThisExpression(type):
return 'var.get(u"this")'
@limited
def CallExpression(type, callee, arguments):
arguments = [trans(e) for e in arguments]
if callee["type"] == "MemberExpression":
far_left = trans(callee["object"])
if callee["computed"]: # obj[prop] type accessor
# may be literal which is the same in every case so we can save some time on conversion
if callee["property"]["type"] == "Literal":
prop = repr(to_key(callee["property"]))
else: # worst case
prop = trans(callee["property"]) # its not a string literal! so no repr
else: # always the same since not computed (obj.prop accessor)
prop = repr(to_key(callee["property"]))
arguments.insert(0, prop)
return far_left + ".callprop(%s)" % ", ".join(arguments)
else: # standard call
return trans(callee) + "(%s)" % ", ".join(arguments)
# ========== ARRAYS ============
def ArrayExpression(type, elements): # todo fix null inside problem
return "Js([%s])" % ", ".join(trans(e) if e else "None" for e in elements)
# ========== OBJECTS =============
def ObjectExpression(type, properties):
name = inline_stack.require("Object")
elems = []
after = ""
for p in properties:
if p["kind"] == "init":
elems.append("%s:%s" % Property(**p))
elif p["kind"] == "set":
k, setter = Property(
**p
) # setter is just a lval referring to that function, it will be defined in InlineStack automatically
after += (
'%s.define_own_property(%s, {"set":%s, "configurable":True, "enumerable":True})\n'
% (name, k, setter)
)
elif p["kind"] == "get":
k, getter = Property(**p)
after += (
'%s.define_own_property(%s, {"get":%s, "configurable":True, "enumerable":True})\n'
% (name, k, getter)
)
else:
raise RuntimeError("Unexpected object propery kind")
obj = "%s = Js({%s})\n" % (name, ",".join(elems))
inline_stack.define(name, obj + after)
return name
def Property(type, kind, key, computed, value, method, shorthand):
if shorthand or computed:
raise NotImplementedError("Shorthand and Computed properties not implemented!")
k = to_key(key)
if k is None:
raise SyntaxError("Invalid key in dictionary! Or bug in Js2Py")
v = trans(value)
return repr(k), v
# ========== EXPRESSIONS ============
@limited
def UnaryExpression(type, operator, argument, prefix):
a = trans(
argument, standard=True
) # unary involve some complex operations so we cant use line shorteners here
if operator == "delete":
if argument["type"] in {"Identifier", "MemberExpression"}:
# means that operation is valid
return js_delete(a)
return (
"PyJsComma(%s, Js(True))" % a
) # otherwise not valid, just perform expression and return true.
elif operator == "typeof":
return js_typeof(a)
return UNARY[operator](a)
@limited
def BinaryExpression(type, operator, left, right):
a = trans(left)
b = trans(right)
# delegate to our friends
return BINARY[operator](a, b)
@limited
def UpdateExpression(type, operator, argument, prefix):
a = trans(
argument, standard=True
) # also complex operation involving parsing of the result so no line length reducing here
return js_postfix(a, operator == "++", not prefix)
@limited
def AssignmentExpression(type, operator, left, right):
operator = operator[:-1]
if left["type"] == "Identifier":
if operator:
return "var.put(%s, %s, %s)" % (
repr(to_key(left)),
trans(right),
repr(operator),
)
else:
return "var.put(%s, %s)" % (repr(to_key(left)), trans(right))
elif left["type"] == "MemberExpression":
far_left = trans(left["object"])
if left["computed"]: # obj[prop] type accessor
# may be literal which is the same in every case so we can save some time on conversion
if left["property"]["type"] == "Literal":
prop = repr(to_key(left["property"]))
else: # worst case
prop = trans(left["property"]) # its not a string literal! so no repr
else: # always the same since not computed (obj.prop accessor)
prop = repr(to_key(left["property"]))
if operator:
return far_left + ".put(%s, %s, %s)" % (prop, trans(right), repr(operator))
else:
return far_left + ".put(%s, %s)" % (prop, trans(right))
else:
raise SyntaxError("Invalid left hand side in assignment!")
six
@limited
def SequenceExpression(type, expressions):
return reduce(js_comma, (trans(e) for e in expressions))
@limited
def NewExpression(type, callee, arguments):
return trans(callee) + ".create(%s)" % ", ".join(trans(e) for e in arguments)
@limited
def ConditionalExpression(
type, test, consequent, alternate
): # caused plenty of problems in my home-made translator :)
return "(%s if %s else %s)" % (trans(consequent), trans(test), trans(alternate))
# =========== STATEMENTS =============
def BlockStatement(type, body):
return StatementList(
body
) # never returns empty string! In the worst case returns pass\n
def ExpressionStatement(type, expression):
return trans(expression) + "\n" # end expression space with new line
def BreakStatement(type, label):
if label:
return 'raise %s("Breaked")\n' % (get_break_label(label["name"]))
else:
return "break\n"
def ContinueStatement(type, label):
if label:
return 'raise %s("Continued")\n' % (get_continue_label(label["name"]))
else:
return "continue\n"
def ReturnStatement(type, argument):
return "return %s\n" % (trans(argument) if argument else "var.get('undefined')")
def EmptyStatement(type):
return "pass\n"
def DebuggerStatement(type):
return "pass\n"
def DoWhileStatement(type, body, test):
inside = trans(body) + "if not %s:\n" % trans(test) + indent("break\n")
result = "while 1:\n" + indent(inside)
return result
def ForStatement(type, init, test, update, body):
update = indent(trans(update)) if update else ""
init = trans(init) if init else ""
if not init.endswith("\n"):
init += "\n"
test = trans(test) if test else "1"
if not update:
result = "#for JS loop\n%swhile %s:\n%s%s\n" % (
init,
test,
indent(trans(body)),
update,
)
else:
result = "#for JS loop\n%swhile %s:\n" % (init, test)
body = "try:\n%sfinally:\n %s\n" % (indent(trans(body)), update)
result += indent(body)
return result
def ForInStatement(type, left, right, body, each):
res = "for PyJsTemp in %s:\n" % trans(right)
if left["type"] == "VariableDeclaration":
addon = trans(left) # make sure variable is registered
if addon != "pass\n":
res = addon + res # we have to execute this expression :(
# now extract the name
try:
name = left["declarations"][0]["id"]["name"]
except:
raise RuntimeError("Unusual ForIn loop")
elif left["type"] == "Identifier":
name = left["name"]
else:
raise RuntimeError("Unusual ForIn loop")
res += indent("var.put(%s, PyJsTemp)\n" % repr(name) + trans(body))
return res
def IfStatement(type, test, consequent, alternate):
# NOTE we cannot do elif because function definition inside elif statement would not be possible!
IF = "if %s:\n" % trans(test)
IF += indent(trans(consequent))
if not alternate:
return IF
ELSE = "else:\n" + indent(trans(alternate))
return IF + ELSE
def LabeledStatement(type, label, body):
# todo consider using smarter approach!
inside = trans(body)
defs = ""
if (
inside.startswith("while ")
or inside.startswith("for ")
or inside.startswith("#for")
):
# we have to add contine label as well...
# 3 or 1 since #for loop type has more lines before real for.
sep = 1 if not inside.startswith("#for") else 3
cont_label = get_continue_label(label["name"])
temp = inside.split("\n")
injected = "try:\n" + "\n".join(temp[sep:])
injected += "except %s:\n pass\n" % cont_label
inside = "\n".join(temp[:sep]) + "\n" + indent(injected)
defs += "class %s(Exception): pass\n" % cont_label
break_label = get_break_label(label["name"])
inside = "try:\n%sexcept %s:\n pass\n" % (indent(inside), break_label)
defs += "class %s(Exception): pass\n" % break_label
return defs + inside
def StatementList(lis):
if lis: # ensure we don't return empty string because it may ruin indentation!
code = "".join(trans(e) for e in lis)
return code if code else "pass\n"
else:
return "pass\n"
def PyimportStatement(type, imp):
lib = imp["name"]
jlib = "PyImport_%s" % lib
code = "import %s as %s\n" % (lib, jlib)
# check whether valid lib name...
try:
compile(code, "", "exec")
except:
raise SyntaxError("Invalid Python module name (%s) in pyimport statement" % lib)
# var.pyimport will handle module conversion to PyJs object
code += "var.pyimport(%s, %s)\n" % (repr(lib), jlib)
return code
def SwitchStatement(type, discriminant, cases):
# TODO there will be a problem with continue in a switch statement.... FIX IT
code = "while 1:\n" + indent("SWITCHED = False\nCONDITION = (%s)\n")
code = code % trans(discriminant)
for case in cases:
case_code = None
if case["test"]: # case (x):
case_code = "if SWITCHED or PyJsStrictEq(CONDITION, %s):\n" % (
trans(case["test"])
)
else: # default:
case_code = "if True:\n"
case_code += indent("SWITCHED = True\n")
case_code += indent(StatementList(case["consequent"]))
# one more indent for whole
code += indent(case_code)
# prevent infinite loop and sort out nested switch...
code += indent("SWITCHED = True\nbreak\n")
return code
def ThrowStatement(type, argument):
return "PyJsTempException = JsToPyException(%s)\nraise PyJsTempException\n" % trans(
argument
)
def TryStatement(type, block, handler, handlers, guardedHandlers, finalizer):
result = "try:\n%s" % indent(trans(block))
# complicated catch statement...
if handler:
identifier = handler["param"]["name"]
holder = "PyJsHolder_%s_%d" % (to_hex(identifier), random.randrange(1e8))
identifier = repr(identifier)
result += "except PyJsException as PyJsTempException:\n"
# fill in except ( catch ) block and remember to recover holder variable to its previous state
result += indent(
TRY_CATCH.replace("HOLDER", holder)
.replace("NAME", identifier)
.replace("BLOCK", indent(trans(handler["body"])))
)
# translate finally statement if present
if finalizer:
result += "finally:\n%s" % indent(trans(finalizer))
return result
def LexicalDeclaration(type, declarations, kind):
raise NotImplementedError(
"let and const not implemented yet but they will be soon! Check github for updates."
)
def VariableDeclarator(type, id, init):
name = id["name"]
# register the name if not already registered
Context.register(name)
if init:
return "var.put(%s, %s)\n" % (repr(name), trans(init))
return ""
def VariableDeclaration(type, declarations, kind):
code = "".join(trans(d) for d in declarations)
return code if code else "pass\n"
def WhileStatement(type, test, body):
result = "while %s:\n" % trans(test) + indent(trans(body))
return result
def WithStatement(type, object, body):
raise NotImplementedError("With statement not implemented!")
def Program(type, body):
inline_stack.reset()
code = "".join(trans(e) for e in body)
# here add hoisted elements (register variables and define functions)
code = Context.get_code() + code
# replace all inline variables
code = inline_stack.inject_inlines(code)
return code
# ======== FUNCTIONS ============
def FunctionDeclaration(type, id, params, defaults, body, generator, expression):
if generator:
raise NotImplementedError("Generators not supported")
if defaults:
raise NotImplementedError("Defaults not supported")
if not id:
return FunctionExpression(
type, id, params, defaults, body, generator, expression
)
JsName = id["name"]
PyName = "PyJsHoisted_%s_" % JsName
PyName = PyName if is_valid_py_name(PyName) else "PyJsHoistedNonPyName"
# this is quite complicated
global Context
previous_context = Context
# change context to the context of this function
Context = ContextStack()
# translate body within current context
code = trans(body)
# get arg names
vars = [v["name"] for v in params]
# args are automaticaly registered variables
Context.to_register.update(vars)
# add all hoisted elements inside function
code = Context.get_code() + code
# check whether args are valid python names:
used_vars = []
for v in vars:
if is_valid_py_name(v):
used_vars.append(v)
else: # invalid arg in python, for example $, replace with alternatice arg
used_vars.append("PyJsArg_%s_" % to_hex(v))
header = "@Js\n"
header += "def %s(%sthis, arguments, var=var):\n" % (
PyName,
", ".join(used_vars) + (", " if vars else ""),
)
# transfer names from Py scope to Js scope
arg_map = dict(zip(vars, used_vars))
arg_map.update({"this": "this", "arguments": "arguments"})
arg_conv = "var = Scope({%s}, var)\n" % ", ".join(
repr(k) + ":" + v for k, v in six.iteritems(arg_map)
)
# and finally set the name of the function to its real name:
footer = "%s.func_name = %s\n" % (PyName, repr(JsName))
footer += "var.put(%s, %s)\n" % (repr(JsName), PyName)
whole_code = header + indent(arg_conv + code) + footer
# restore context
Context = previous_context
# define in upper context
Context.define(JsName, whole_code)
return "pass\n"
def FunctionExpression(type, id, params, defaults, body, generator, expression):
if generator:
raise NotImplementedError("Generators not supported")
if defaults:
raise NotImplementedError("Defaults not supported")
JsName = id["name"] if id else "anonymous"
if not is_valid_py_name(JsName):
ScriptName = "InlineNonPyName"
else:
ScriptName = JsName
PyName = inline_stack.require(ScriptName) # this is unique
# again quite complicated
global Context
previous_context = Context
# change context to the context of this function
Context = ContextStack()
# translate body within current context
code = trans(body)
# get arg names
vars = [v["name"] for v in params]
# args are automaticaly registered variables
Context.to_register.update(vars)
# add all hoisted elements inside function
code = Context.get_code() + code
# check whether args are valid python names:
used_vars = []
for v in vars:
if is_valid_py_name(v):
used_vars.append(v)
else: # invalid arg in python, for example $, replace with alternatice arg
used_vars.append("PyJsArg_%s_" % to_hex(v))
header = "@Js\n"
header += "def %s(%sthis, arguments, var=var):\n" % (
PyName,
", ".join(used_vars) + (", " if vars else ""),
)
# transfer names from Py scope to Js scope
arg_map = dict(zip(vars, used_vars))
arg_map.update({"this": "this", "arguments": "arguments"})
if id: # make self available from inside...
if id["name"] not in arg_map:
arg_map[id["name"]] = PyName
arg_conv = "var = Scope({%s}, var)\n" % ", ".join(
repr(k) + ":" + v for k, v in six.iteritems(arg_map)
)
# and finally set the name of the function to its real name:
footer = "%s._set_name(%s)\n" % (PyName, repr(JsName))
whole_code = header + indent(arg_conv + code) + footer
# restore context
Context = previous_context
# define in upper context
inline_stack.define(PyName, whole_code)
return PyName
LogicalExpression = BinaryExpression
PostfixExpression = UpdateExpression
clean_stacks()
if __name__ == "__main__":
import codecs
import time
import pyjsparser
c = None #'''`ijfdij`'''
if not c:
with codecs.open("esp.js", "r", "utf-8") as f:
c = f.read()
print("Started")
t = time.time()
res = trans(pyjsparser.PyJsParser().parse(c))
dt = time.time() - t + 0.000000001
print("Translated everyting in", round(dt, 5), "seconds.")
print("Thats %d characters per second" % int(len(c) / dt))
with open("res.py", "w") as f:
f.write(res)
|
downloaders | RehostTo | # -*- coding: utf-8 -*-
from ..base.multi_downloader import MultiDownloader
class RehostTo(MultiDownloader):
__name__ = "RehostTo"
__type__ = "downloader"
__version__ = "0.29"
__status__ = "testing"
__pattern__ = r"https?://.*rehost\.to\..+"
__config__ = [
("enabled", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("fallback", "bool", "Fallback to free download if premium fails", False),
("chk_filesize", "bool", "Check file size", True),
("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10),
("revert_failed", "bool", "Revert to standard download if fails", True),
]
__description__ = """Rehost.com multi-downloader plugin"""
__license__ = "GPLv3"
__authors__ = [("RaNaN", "RaNaN@pyload.net")]
def handle_premium(self, pyfile):
self.download(
"http://rehost.to/process_download.php",
get={
"user": "cookie",
"pass": self.account.get_data("session"),
"dl": pyfile.url,
},
disposition=True,
)
|
extractor | tvc | # coding: utf-8
from __future__ import unicode_literals
import re
from ..utils import clean_html, int_or_none
from .common import InfoExtractor
class TVCIE(InfoExtractor):
_VALID_URL = r"https?://(?:www\.)?tvc\.ru/video/iframe/id/(?P<id>\d+)"
_TEST = {
"url": "http://www.tvc.ru/video/iframe/id/74622/isPlay/false/id_stat/channel/?acc_video_id=/channel/brand/id/17/show/episodes/episode_id/39702",
"md5": "bbc5ff531d1e90e856f60fc4b3afd708",
"info_dict": {
"id": "74622",
"ext": "mp4",
"title": 'События. "События". Эфир от 22.05.2015 14:30',
"thumbnail": r"re:^https?://.*\.jpg$",
"duration": 1122,
},
}
@classmethod
def _extract_url(cls, webpage):
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:http:)?//(?:www\.)?tvc\.ru/video/iframe/id/[^"]+)\1',
webpage,
)
if mobj:
return mobj.group("url")
def _real_extract(self, url):
video_id = self._match_id(url)
video = self._download_json(
"http://www.tvc.ru/video/json/id/%s" % video_id, video_id
)
formats = []
for info in video.get("path", {}).get("quality", []):
video_url = info.get("url")
if not video_url:
continue
format_id = self._search_regex(
r"cdnvideo/([^/]+?)(?:-[^/]+?)?/", video_url, "format id", default=None
)
formats.append(
{
"url": video_url,
"format_id": format_id,
"width": int_or_none(info.get("width")),
"height": int_or_none(info.get("height")),
"tbr": int_or_none(info.get("bitrate")),
}
)
self._sort_formats(formats)
return {
"id": video_id,
"title": video["title"],
"thumbnail": video.get("picture"),
"duration": int_or_none(video.get("duration")),
"formats": formats,
}
class TVCArticleIE(InfoExtractor):
_VALID_URL = r"https?://(?:www\.)?tvc\.ru/(?!video/iframe/id/)(?P<id>[^?#]+)"
_TESTS = [
{
"url": "http://www.tvc.ru/channel/brand/id/29/show/episodes/episode_id/39702/",
"info_dict": {
"id": "74622",
"ext": "mp4",
"title": 'События. "События". Эфир от 22.05.2015 14:30',
"description": "md5:ad7aa7db22903f983e687b8a3e98c6dd",
"thumbnail": r"re:^https?://.*\.jpg$",
"duration": 1122,
},
},
{
"url": "http://www.tvc.ru/news/show/id/69944",
"info_dict": {
"id": "75399",
"ext": "mp4",
"title": "Эксперты: в столице встал вопрос о максимально безопасных остановках",
"description": "md5:f2098f71e21f309e89f69b525fd9846e",
"thumbnail": r"re:^https?://.*\.jpg$",
"duration": 278,
},
},
{
"url": "http://www.tvc.ru/channel/brand/id/47/show/episodes#",
"info_dict": {
"id": "2185",
"ext": "mp4",
"title": "Ещё не поздно. Эфир от 03.08.2013",
"description": "md5:51fae9f3f8cfe67abce014e428e5b027",
"thumbnail": r"re:^https?://.*\.jpg$",
"duration": 3316,
},
},
]
def _real_extract(self, url):
webpage = self._download_webpage(url, self._match_id(url))
return {
"_type": "url_transparent",
"ie_key": "TVC",
"url": self._og_search_video_url(webpage),
"title": clean_html(self._og_search_title(webpage)),
"description": clean_html(self._og_search_description(webpage)),
"thumbnail": self._og_search_thumbnail(webpage),
}
|
params | dtypes | # Copyright 2008-2017 Free Software Foundation, Inc.
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-2.0-or-later
#
import builtins
import re
from .. import Constants, blocks
# Blacklist certain ids, its not complete, but should help
ID_BLACKLIST = ["self"] + dir(builtins)
try:
from gnuradio import gr
ID_BLACKLIST.extend(
attr for attr in dir(gr.top_block()) if not attr.startswith("_")
)
except (ImportError, AttributeError):
pass
validators = {}
def validates(*dtypes):
def decorator(func):
for dtype in dtypes:
assert dtype in Constants.PARAM_TYPE_NAMES
validators[dtype] = func
return func
return decorator
class ValidateError(Exception):
"""Raised by validate functions"""
@validates("id")
def validate_block_id(param, black_listed_ids):
value = param.value
# Can python use this as a variable?
if not re.match(r"^[a-z|A-Z]\w*$", value):
raise ValidateError(
'ID "{}" must begin with a letter and may contain letters, numbers, '
"and underscores.".format(value)
)
if value in (black_listed_ids + ID_BLACKLIST) and not getattr(
param.parent_block, "exempt_from_id_validation", False
):
# Grant blacklist exemption to epy blocks and modules
raise ValidateError('ID "{}" is blacklisted.'.format(value))
block_names = [block.name for block in param.parent_flowgraph.iter_enabled_blocks()]
# Id should only appear once, or zero times if block is disabled
if param.key == "id" and block_names.count(value) > 1:
raise ValidateError('ID "{}" is not unique.'.format(value))
elif value not in block_names:
raise ValidateError('ID "{}" does not exist.'.format(value))
return value
@validates("name")
def validate_name(param, black_listed_ids):
# Name of a function or other block that will be generated literally not as a string
value = param.value
# Allow blank to pass validation
# Can python use this as a variable?
if not re.match(r"^([a-z|A-Z]\w*)?$", value):
raise ValidateError(
'ID "{}" must begin with a letter and may contain letters, numbers, '
"and underscores.".format(value)
)
return value
@validates("stream_id")
def validate_stream_id(param, black_listed_ids):
value = param.value
stream_ids = [
block.params["stream_id"].value
for block in param.parent_flowgraph.iter_enabled_blocks()
if isinstance(block, blocks.VirtualSink)
]
# Check that the virtual sink's stream id is unique
if (
isinstance(param.parent_block, blocks.VirtualSink)
and stream_ids.count(value) >= 2
):
# Id should only appear once, or zero times if block is disabled
raise ValidateError('Stream ID "{}" is not unique.'.format(value))
# Check that the virtual source's steam id is found
elif (
isinstance(param.parent_block, blocks.VirtualSource) and value not in stream_ids
):
raise ValidateError('Stream ID "{}" is not found.'.format(value))
@validates("complex", "real", "float", "int")
def validate_scalar(param, black_listed_ids):
valid_types = Constants.PARAM_TYPE_MAP[param.dtype]
if not isinstance(param.get_evaluated(), valid_types):
raise ValidateError(
"Expression {!r} is invalid for type {!r}.".format(
param.get_evaluated(), param.dtype
)
)
@validates("complex_vector", "real_vector", "float_vector", "int_vector")
def validate_vector(param, black_listed_ids):
# todo: check vector types
if param.get_evaluated() is None:
raise ValidateError(
"Expression {!r} is invalid for type{!r}.".format(
param.get_evaluated(), param.dtype
)
)
valid_types = Constants.PARAM_TYPE_MAP[param.dtype.split("_", 1)[0]]
if not all(isinstance(item, valid_types) for item in param.get_evaluated()):
raise ValidateError(
"Expression {!r} is invalid for type {!r}.".format(
param.get_evaluated(), param.dtype
)
)
@validates("gui_hint")
def validate_gui_hint(param, black_listed_ids):
try:
# Only parse the param if there are no errors
if len(param.get_error_messages()) > 0:
return
param.parse_gui_hint(param.value)
except Exception as e:
raise ValidateError(str(e))
|
addons | AntiVirus | # -*- coding: utf-8 -*-
import os
import shutil
import subprocess
from pyload.core.utils.convert import to_str
from ..base.addon import BaseAddon, expose, threaded
from ..helpers import exists
try:
import send2trash
except ImportError:
pass
class AntiVirus(BaseAddon):
__name__ = "AntiVirus"
__type__ = "addon"
__version__ = "0.22"
__status__ = "broken"
__config__ = [
("enabled", "bool", "Activated", False),
(
"action",
"Antivirus default;Delete;Quarantine",
"Manage infected files",
"Antivirus default",
),
("quardir", "folder", "Quarantine folder", ""),
("deltotrash", "bool", "Move to trash instead delete", True),
("scanfailed", "bool", "Scan failed downloads", False),
("avfile", "file", "Antivirus executable", ""),
("avargs", "str", "Executable arguments", ""),
("avtarget", "file;folder", "Scan target", "file"),
("ignore-err", "bool", "Ignore scan errors", False),
]
__description__ = """Scan downloaded files with antivirus program"""
__license__ = "GPLv3"
__authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
@expose
@threaded
def scan(self, pyfile, thread):
avfile = os.fsdecode(self.config.get("avfile"))
avargs = os.fsdecode(self.config.get("avargs").strip())
if not os.path.isfile(avfile):
self.fail(self._("Antivirus executable not found"))
scanfolder = self.config.get("avtarget") == "folder"
if scanfolder:
dl_folder = self.pyload.config.get("general", "storage_folder")
package_folder = (
pyfile.package().folder
if self.pyload.config.get("general", "folder_per_package")
else ""
)
target = os.path.join(dl_folder, package_folder, pyfile.name)
target_repr = "Folder: " + package_folder or dl_folder
else:
target = os.fsdecode(pyfile.plugin.last_download)
target_repr = "File: " + os.path.basename(pyfile.plugin.last_download)
if not exists(target):
return
thread.add_active(pyfile)
pyfile.set_custom_status(self._("virus scanning"))
pyfile.set_progress(0)
try:
p = subprocess.Popen([avfile, avargs, target])
out, err = (to_str(x).strip() for x in p.communicate())
if out:
self.log_info(target_repr, out)
if err:
self.log_warning(target_repr, err)
if not self.config.get("ignore-err"):
self.log_debug("Delete/Quarantine task aborted due scan error")
return
if p.returncode:
action = self.config.get("action")
if scanfolder:
if action == "Antivirus default":
self.log_warning(
self._("Delete/Quarantine task skipped in folder scan mode")
)
return
pyfile.error = self._("Infected file")
try:
if action == "Delete":
if not self.config.get("deltotrash"):
os.remove(target)
else:
try:
send2trash.send2trash(target)
except NameError:
self.log_warning(
self._(
"Send2Trash lib not found, moving to quarantine instead"
)
)
pyfile.set_custom_status(self._("file moving"))
shutil.move(target, self.config.get("quardir"))
except Exception as exc:
self.log_warning(
self._(
"Unable to move file to trash: {}, moving to quarantine instead"
).format(exc)
)
pyfile.set_custom_status(self._("file moving"))
shutil.move(target, self.config.get("quardir"))
else:
self.log_debug("Successfully moved file to trash")
elif action == "Quarantine":
pyfile.set_custom_status(self._("file moving"))
shutil.move(target, self.config.get("quardir"))
except (IOError, shutil.Error) as exc:
self.log_error(target_repr, action + " action failed!", exc)
elif not err:
self.log_debug(target_repr, "No infected file found")
finally:
pyfile.set_progress(100)
thread.finish_file(pyfile)
def download_finished(self, pyfile):
return self.scan(pyfile)
def download_failed(self, pyfile):
#: Check if pyfile is still "failed", maybe might has been restarted in meantime
if pyfile.status == 8 and self.config.get("scanfailed"):
return self.scan(pyfile)
|
parts | _base | # This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2008 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
Base types for parts.
"""
import collections
import listmodel
import ly.dom
from PyQt5.QtWidgets import (
QCheckBox,
QComboBox,
QGridLayout,
QHBoxLayout,
QLabel,
QSpinBox,
)
Category = collections.namedtuple("Category", "title items icon")
def translate(*args):
"""Translate the arguments using the application's language."""
return _(*args)
class Base:
"""Base class for both Part and Container."""
@staticmethod
def title(_=translate):
"""Should return a title.
If a translator is given, it is used instead of the builtin.
"""
@staticmethod
def short(_=translate):
"""Should return an abbreviated title.
If a translator is given, it is used instead of the builtin.
"""
def createWidgets(self, layout):
"""Should create widgets to adjust settings."""
self.noSettingsLabel = QLabel()
layout.addWidget(self.noSettingsLabel)
def translateWidgets(self):
"""Should set the text in the widgets when the language changes."""
self.noSettingsLabel.setText("({})".format(_("No settings available.")))
def accepts(self):
"""Should return a tuple of classes this part item accepts as child items."""
return ()
def build(self, data, builder):
"""Should populate the PartData (see build.py)."""
data.nodes.append(ly.dom.Comment(f"Part {self.__class__.__name__}"))
class Part(Base):
"""Base class for Parts (that can't contain other parts)."""
class Container(Base):
"""Base class for "part" types that can contain others, like a Staff Group or Score, Book etc."""
def accepts(self):
return (Part, Container)
class Group(Container):
"""Base class for "part" types that are a group such as Book, BookPart and Score."""
# Mixin- or base classes with basic behaviour
class SingleVoicePart(Part):
"""Base class for a part creating a single single-voice staff."""
midiInstrument = ""
clef = None
octave = 1
transposition = None # or a three tuple (octave, note, alteration)
def build(self, data, builder):
a = data.assignMusic(None, self.octave, self.transposition)
staff = ly.dom.Staff()
builder.setInstrumentNamesFromPart(staff, self, data)
if self.midiInstrument:
builder.setMidiInstrument(staff, self.midiInstrument)
seq = ly.dom.Seqr(staff)
if self.clef:
ly.dom.Clef(self.clef, seq)
ly.dom.Identifier(a.name, seq)
data.nodes.append(staff)
class PianoStaffPart(Part):
"""Base class for parts creating a piano staff."""
midiInstruments = () # may contain a list of MIDI instruments.
def createWidgets(self, layout):
self.label = QLabel(wordWrap=True)
self.upperVoicesLabel = QLabel()
self.lowerVoicesLabel = QLabel()
self.upperVoices = QSpinBox(minimum=1, maximum=4, value=1)
self.lowerVoices = QSpinBox(minimum=1, maximum=4, value=1)
self.upperVoicesLabel.setBuddy(self.upperVoices)
self.lowerVoicesLabel.setBuddy(self.lowerVoices)
layout.addWidget(self.label)
grid = QGridLayout()
grid.addWidget(self.upperVoicesLabel, 0, 0)
grid.addWidget(self.upperVoices, 0, 1)
grid.addWidget(self.lowerVoicesLabel, 1, 0)
grid.addWidget(self.lowerVoices, 1, 1)
layout.addLayout(grid)
if self.midiInstruments:
self.createMidiInstrumentWidgets(layout)
def createMidiInstrumentWidgets(self, layout):
self.midiInstrumentLabel = QLabel()
self.midiInstrumentSelection = QComboBox()
self.midiInstrumentSelection.addItems(self.midiInstruments)
self.midiInstrumentSelection.setCurrentIndex(
self.midiInstruments.index(self.midiInstrument)
)
box = QHBoxLayout()
layout.addLayout(box)
box.addWidget(self.midiInstrumentLabel)
box.addWidget(self.midiInstrumentSelection)
def translateWidgets(self):
self.label.setText(
"{} <i>({})</i>".format(
_("Adjust how many separate voices you want on each staff."),
_(
"This is primarily useful when you write polyphonic music "
"like a fugue."
),
)
)
self.upperVoicesLabel.setText(_("Right hand:"))
self.lowerVoicesLabel.setText(_("Left hand:"))
if self.midiInstruments:
self.translateMidiInstrumentWidgets()
def translateMidiInstrumentWidgets(self):
self.midiInstrumentLabel.setText(_("MIDI instrument:"))
def buildStaff(
self, data, builder, name, octave, numVoices=1, node=None, clef=None
):
"""Build a staff with the given number of voices and name."""
staff = ly.dom.Staff(name, parent=node)
if self.midiInstruments:
midiInstrument = self.midiInstrumentSelection.currentText()
if midiInstrument == "percussive organ" and name != "right":
# The Hammond B3, which this MIDI instrument is intended
# to emulate, only supports percussion on the upper manual
midiInstrument = "drawbar organ"
builder.setMidiInstrument(staff, midiInstrument)
else:
builder.setMidiInstrument(staff, self.midiInstrument)
c = ly.dom.Seqr(staff)
if clef:
ly.dom.Clef(clef, c)
if numVoices == 1:
a = data.assignMusic(name, octave)
ly.dom.Identifier(a.name, c)
else:
c = ly.dom.Sim(c)
for i in range(1, numVoices):
a = data.assignMusic(name + ly.util.int2text(i), octave)
ly.dom.Identifier(a.name, c)
ly.dom.VoiceSeparator(c)
a = data.assignMusic(name + ly.util.int2text(numVoices), octave)
ly.dom.Identifier(a.name, c)
return staff
def build(self, data, builder):
"""Setup structure for a 2-staff PianoStaff."""
p = ly.dom.PianoStaff()
builder.setInstrumentNamesFromPart(p, self, data)
s = ly.dom.Sim(p)
# add two staves, with a respective number of voices.
self.buildStaff(data, builder, "right", 1, self.upperVoices.value(), s)
self.buildStaff(data, builder, "left", 0, self.lowerVoices.value(), s, "bass")
data.nodes.append(p)
class ChordNames:
def createWidgets(self, layout):
self.chordStyleLabel = QLabel()
self.chordStyle = QComboBox()
self.chordStyleLabel.setBuddy(self.chordStyle)
self.chordStyle.setModel(
listmodel.ListModel(
chordNameStyles, self.chordStyle, display=listmodel.translate
)
)
self.guitarFrets = QCheckBox()
box = QHBoxLayout()
box.addWidget(self.chordStyleLabel)
box.addWidget(self.chordStyle)
layout.addLayout(box)
layout.addWidget(self.guitarFrets)
def translateWidgets(self):
self.chordStyleLabel.setText(_("Chord style:"))
self.guitarFrets.setText(_("Guitar fret diagrams"))
self.guitarFrets.setToolTip(
_(
"Show predefined guitar fret diagrams below the chord names "
"(LilyPond 2.12 and above)."
)
)
self.chordStyle.model().update()
def build(self, data, builder):
p = ly.dom.ChordNames()
a = data.assign("chordNames")
ly.dom.Identifier(a.name, p)
s = ly.dom.ChordMode(a)
ly.dom.Identifier(data.globalName, s).after = 1
i = self.chordStyle.currentIndex()
if i > 0:
ly.dom.Line(
"\\{}Chords".format(
("german", "semiGerman", "italian", "french")[i - 1]
),
s,
)
ly.dom.LineComment(_("Chords follow here."), s)
ly.dom.BlankLine(s)
data.nodes.append(p)
if self.guitarFrets.isChecked():
f = ly.dom.FretBoards()
ly.dom.Identifier(a.name, f)
data.nodes.append(f)
data.includes.append("predefined-guitar-fretboards.ly")
chordNameStyles = (
lambda: _("Default"),
lambda: _("German"),
lambda: _("Semi-German"),
lambda: _("Italian"),
lambda: _("French"),
)
|
extractor | bitchute | # coding: utf-8
from __future__ import unicode_literals
import itertools
import re
from ..utils import orderedSet, unified_strdate, urlencode_postdata
from .common import InfoExtractor
class BitChuteIE(InfoExtractor):
_VALID_URL = r"https?://(?:www\.)?bitchute\.com/(?:video|embed|torrent/[^/]+)/(?P<id>[^/?#&]+)"
_TESTS = [
{
"url": "https://www.bitchute.com/video/szoMrox2JEI/",
"md5": "66c4a70e6bfc40dcb6be3eb1d74939eb",
"info_dict": {
"id": "szoMrox2JEI",
"ext": "mp4",
"title": "Fuck bitches get money",
"description": "md5:3f21f6fb5b1d17c3dee9cf6b5fe60b3a",
"thumbnail": r"re:^https?://.*\.jpg$",
"uploader": "Victoria X Rave",
"upload_date": "20170813",
},
},
{
"url": "https://www.bitchute.com/embed/lbb5G1hjPhw/",
"only_matching": True,
},
{
"url": "https://www.bitchute.com/torrent/Zee5BE49045h/szoMrox2JEI.webtorrent",
"only_matching": True,
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(
"https://www.bitchute.com/video/%s" % video_id,
video_id,
headers={
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.57 Safari/537.36",
},
)
title = (
self._html_search_regex(
(r'<[^>]+\bid=["\']video-title[^>]+>([^<]+)', r"<title>([^<]+)"),
webpage,
"title",
default=None,
)
or self._html_search_meta("description", webpage, "title", default=None)
or self._og_search_description(webpage)
)
format_urls = []
for mobj in re.finditer(
r'addWebSeed\s*\(\s*(["\'])(?P<url>(?:(?!\1).)+)\1', webpage
):
format_urls.append(mobj.group("url"))
format_urls.extend(re.findall(r'as=(https?://[^&"\']+)', webpage))
formats = [{"url": format_url} for format_url in orderedSet(format_urls)]
if not formats:
formats = self._parse_html5_media_entries(url, webpage, video_id)[0][
"formats"
]
self._check_formats(formats, video_id)
self._sort_formats(formats)
description = self._html_search_regex(
r'(?s)<div\b[^>]+\bclass=["\']full hidden[^>]+>(.+?)</div>',
webpage,
"description",
fatal=False,
)
thumbnail = self._og_search_thumbnail(
webpage, default=None
) or self._html_search_meta("twitter:image:src", webpage, "thumbnail")
uploader = self._html_search_regex(
(
r'(?s)<div class=["\']channel-banner.*?<p\b[^>]+\bclass=["\']name[^>]+>(.+?)</p>',
r'(?s)<p\b[^>]+\bclass=["\']video-author[^>]+>(.+?)</p>',
),
webpage,
"uploader",
fatal=False,
)
upload_date = unified_strdate(
self._search_regex(
r'class=["\']video-publish-date[^>]+>[^<]+ at \d+:\d+ UTC on (.+?)\.',
webpage,
"upload date",
fatal=False,
)
)
return {
"id": video_id,
"title": title,
"description": description,
"thumbnail": thumbnail,
"uploader": uploader,
"upload_date": upload_date,
"formats": formats,
}
class BitChuteChannelIE(InfoExtractor):
_VALID_URL = r"https?://(?:www\.)?bitchute\.com/channel/(?P<id>[^/?#&]+)"
_TEST = {
"url": "https://www.bitchute.com/channel/victoriaxrave/",
"playlist_mincount": 185,
"info_dict": {
"id": "victoriaxrave",
},
}
_TOKEN = "zyG6tQcGPE5swyAEFLqKUwMuMMuF6IO2DZ6ZDQjGfsL0e4dcTLwqkTTul05Jdve7"
def _entries(self, channel_id):
channel_url = "https://www.bitchute.com/channel/%s/" % channel_id
offset = 0
for page_num in itertools.count(1):
data = self._download_json(
"%sextend/" % channel_url,
channel_id,
"Downloading channel page %d" % page_num,
data=urlencode_postdata(
{
"csrfmiddlewaretoken": self._TOKEN,
"name": "",
"offset": offset,
}
),
headers={
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"Referer": channel_url,
"X-Requested-With": "XMLHttpRequest",
"Cookie": "csrftoken=%s" % self._TOKEN,
},
)
if data.get("success") is False:
break
html = data.get("html")
if not html:
break
video_ids = re.findall(
r'class=["\']channel-videos-image-container[^>]+>\s*<a\b[^>]+\bhref=["\']/video/([^"\'/]+)',
html,
)
if not video_ids:
break
offset += len(video_ids)
for video_id in video_ids:
yield self.url_result(
"https://www.bitchute.com/video/%s" % video_id,
ie=BitChuteIE.ie_key(),
video_id=video_id,
)
def _real_extract(self, url):
channel_id = self._match_id(url)
return self.playlist_result(self._entries(channel_id), playlist_id=channel_id)
|
qtui | folderpage | # Copyright (C) 2011 Chris Dekter
# Copyright (C) 2018 Thomas Hess <thomas.hess@udo.edu>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import subprocess
import autokey.configmanager.configmanager_constants as cm_constants
import autokey.qtui.common as ui_common
from autokey.model.folder import Folder
from PyQt5.QtWidgets import QMessageBox
logger = __import__("autokey.logger").logger.get_logger(__name__)
PROBLEM_MSG_PRIMARY = "Some problems were found"
PROBLEM_MSG_SECONDARY = "{}\n\nYour changes have not been saved."
class FolderPage(*ui_common.inherits_from_ui_file_with_name("folderpage")):
def __init__(self):
super(FolderPage, self).__init__()
self.setupUi(self)
self.current_folder = None # type: Folder
def load(self, folder: Folder):
self.current_folder = folder
self.showInTrayCheckbox.setChecked(folder.show_in_tray_menu)
self.settingsWidget.load(folder)
if self.is_new_item():
self.urlLabel.setEnabled(False)
self.urlLabel.setText("(Unsaved)") # TODO: i18n
else:
ui_common.set_url_label(self.urlLabel, self.current_folder.path)
def save(self):
self.current_folder.show_in_tray_menu = self.showInTrayCheckbox.isChecked()
self.settingsWidget.save()
self.current_folder.persist()
ui_common.set_url_label(self.urlLabel, self.current_folder.path)
return not self.current_folder.path.startswith(
cm_constants.CONFIG_DEFAULT_FOLDER
)
def get_current_item(self):
"""Returns the currently held item."""
return self.current_folder
def set_item_title(self, title: str):
self.current_folder.title = title
def rebuild_item_path(self):
self.current_folder.rebuild_path()
def is_new_item(self):
return self.current_folder.path is None
def reset(self):
self.load(self.current_folder)
def validate(self):
# Check settings
errors = self.settingsWidget.validate()
if errors:
msg = PROBLEM_MSG_SECONDARY.format("\n".join([str(e) for e in errors]))
QMessageBox.critical(self.window(), PROBLEM_MSG_PRIMARY, msg)
return not bool(errors)
def set_dirty(self):
self.window().set_dirty()
# --- Signal handlers
def on_showInTrayCheckbox_stateChanged(self, state: bool):
self.set_dirty()
@staticmethod
def on_urlLabel_leftClickedUrl(url: str = None):
if url:
subprocess.Popen(["/usr/bin/xdg-open", url])
|
plugins | afreeca | """
$description TV and live video game broadcasts, artist performances and personal daily-life video blogs & shows.
$url play.afreecatv.com
$type live
"""
import logging
import re
from streamlink.plugin import Plugin, pluginargument, pluginmatcher
from streamlink.plugin.api import validate
from streamlink.stream.hls import HLSStream, HLSStreamReader, HLSStreamWriter
log = logging.getLogger(__name__)
class AfreecaHLSStreamWriter(HLSStreamWriter):
def should_filter_sequence(self, sequence):
return "preloading" in sequence.segment.uri or super().should_filter_sequence(sequence)
class AfreecaHLSStreamReader(HLSStreamReader):
__writer__ = AfreecaHLSStreamWriter
class AfreecaHLSStream(HLSStream):
__reader__ = AfreecaHLSStreamReader
@pluginmatcher(
re.compile(
r"https?://play\.afreecatv\.com/(?P<username>\w+)(?:/(?P<bno>:\d+))?",
)
)
@pluginargument(
"username",
sensitive=True,
requires=["password"],
metavar="USERNAME",
help="The username used to register with afreecatv.com.",
)
@pluginargument(
"password",
sensitive=True,
metavar="PASSWORD",
help="A afreecatv.com account password to use with --afreeca-username.",
)
@pluginargument(
"purge-credentials",
action="store_true",
help="Purge cached AfreecaTV credentials to initiate a new session and reauthenticate.",
)
class AfreecaTV(Plugin):
_re_bno = re.compile(r"var nBroadNo = (?P<bno>\d+);")
CHANNEL_API_URL = "http://live.afreecatv.com/afreeca/player_live_api.php"
CHANNEL_RESULT_OK = 1
QUALITYS = ["original", "hd", "sd"]
QUALITY_WEIGHTS = {
"original": 1080,
"hd": 720,
"sd": 480,
}
_schema_channel = validate.Schema(
{
"CHANNEL": {
"RESULT": validate.transform(int),
validate.optional("BPWD"): str,
validate.optional("BNO"): str,
validate.optional("RMD"): str,
validate.optional("AID"): str,
validate.optional("CDN"): str,
},
},
validate.get("CHANNEL"),
)
_schema_stream = validate.Schema(
{
validate.optional("view_url"): validate.url(
scheme=validate.any("rtmp", "http"),
),
"stream_status": str,
},
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._authed = (
self.session.http.cookies.get("PdboxBbs")
and self.session.http.cookies.get("PdboxSaveTicket")
and self.session.http.cookies.get("PdboxTicket")
and self.session.http.cookies.get("PdboxUser")
and self.session.http.cookies.get("RDB")
)
@classmethod
def stream_weight(cls, key):
weight = cls.QUALITY_WEIGHTS.get(key)
if weight:
return weight, "afreeca"
return Plugin.stream_weight(key)
def _get_channel_info(self, broadcast, username):
data = {
"bid": username,
"bno": broadcast,
"from_api": "0",
"mode": "landing",
"player_type": "html5",
"pwd": "",
"stream_type": "common",
"type": "live",
}
res = self.session.http.post(self.CHANNEL_API_URL, data=data)
return self.session.http.json(res, schema=self._schema_channel)
def _get_hls_key(self, broadcast, username, quality):
data = {
"bid": username,
"bno": broadcast,
"from_api": "0",
"mode": "landing",
"player_type": "html5",
"pwd": "",
"quality": quality,
"stream_type": "common",
"type": "aid",
}
res = self.session.http.post(self.CHANNEL_API_URL, data=data)
return self.session.http.json(res, schema=self._schema_channel)
def _get_stream_info(self, broadcast, quality, rmd):
params = {
"return_type": "gs_cdn_pc_web",
"broad_key": f"{broadcast}-common-{quality}-hls",
}
res = self.session.http.get(f"{rmd}/broad_stream_assign.html", params=params)
return self.session.http.json(res, schema=self._schema_stream)
def _get_hls_stream(self, broadcast, username, quality, rmd):
keyjson = self._get_hls_key(broadcast, username, quality)
if keyjson["RESULT"] != self.CHANNEL_RESULT_OK:
return
key = keyjson["AID"]
info = self._get_stream_info(broadcast, quality, rmd)
if "view_url" in info:
return AfreecaHLSStream(self.session, info["view_url"], params={"aid": key})
def _login(self, username, password):
data = {
"szWork": "login",
"szType": "json",
"szUid": username,
"szPassword": password,
"isSaveId": "true",
"isSavePw": "false",
"isSaveJoin": "false",
"isLoginRetain": "Y",
}
res = self.session.http.post("https://login.afreecatv.com/app/LoginAction.php", data=data)
data = self.session.http.json(res)
log.trace(f"{data!r}")
if data["RESULT"] != self.CHANNEL_RESULT_OK:
return False
self.save_cookies()
return True
def _get_streams(self):
login_username = self.get_option("username")
login_password = self.get_option("password")
self.session.http.headers.update({"Referer": self.url, "Origin": "http://play.afreecatv.com"})
if self.options.get("purge_credentials"):
self.clear_cookies()
self._authed = False
log.info("All credentials were successfully removed")
if self._authed:
log.debug("Attempting to authenticate using cached cookies")
elif login_username and login_password:
log.debug("Attempting to login using username and password")
if self._login(login_username, login_password):
log.info("Login was successful")
else:
log.error("Failed to login")
m = self.match.groupdict()
username = m["username"]
bno = m["bno"]
if bno is None:
res = self.session.http.get(self.url)
m = self._re_bno.search(res.text)
if not m:
log.error("Could not find broadcast number.")
return
bno = m.group("bno")
channel = self._get_channel_info(bno, username)
log.trace(f"{channel!r}")
if channel.get("BPWD") == "Y":
log.error("Stream is Password-Protected")
return
elif channel.get("RESULT") == -6:
log.error("Login required")
return
elif channel.get("RESULT") != self.CHANNEL_RESULT_OK:
return
(broadcast, rmd) = (channel["BNO"], channel["RMD"])
if not (broadcast and rmd):
return
for qkey in self.QUALITYS:
hls_stream = self._get_hls_stream(broadcast, username, qkey, rmd)
if hls_stream:
yield qkey, hls_stream
__plugin__ = AfreecaTV
|
preferences | lilypond | # This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2008 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
LilyPond preferences page
"""
import functools
import json
import os
import platform
import shutil
import sys
import tempfile
from dataclasses import dataclass
import app
import icons
import lilypondinfo
import linux
import preferences
import qsettings
import qutil
import userguide
import widgets.listedit
import widgets.urlrequester
from PyQt5.QtCore import QSettings, Qt, QTimer, QUrl, pyqtSignal
from PyQt5.QtNetwork import QNetworkAccessManager, QNetworkRequest
from PyQt5.QtWidgets import (
QAbstractItemView,
QAction,
QCheckBox,
QDialog,
QDialogButtonBox,
QFileDialog,
QGridLayout,
QHBoxLayout,
QLabel,
QLineEdit,
QListWidgetItem,
QMenu,
QMessageBox,
QPushButton,
QRadioButton,
QTabWidget,
QVBoxLayout,
QWidget,
)
def settings():
s = QSettings()
s.beginGroup("lilypond_settings")
return s
@functools.cache
def _network_manager():
return QNetworkAccessManager()
class LilyPondPrefs(preferences.ScrolledGroupsPage):
def __init__(self, dialog):
super().__init__(dialog)
layout = QVBoxLayout()
self.scrolledWidget.setLayout(layout)
layout.addWidget(Versions(self))
layout.addWidget(Target(self))
layout.addWidget(Running(self))
class Versions(preferences.Group):
def __init__(self, page):
super().__init__(page)
layout = QVBoxLayout()
self.setLayout(layout)
self.instances = InfoList(self)
self.instances.changed.connect(self.changed)
self.instances.defaultButton.clicked.connect(self.defaultButtonClicked)
layout.addWidget(self.instances)
self.autoVersion = QCheckBox(clicked=self.changed)
layout.addWidget(self.autoVersion)
app.translateUI(self)
userguide.openWhatsThis(self)
def defaultButtonClicked(self):
item = self.instances.listBox.currentItem()
# This holds because otherwise the "Set as Default" button would
# be greyed out.
assert isinstance(item.state, InstalledState)
# Register the current default instance (identified by its command)
self._defaultCommand = item.state.info.command
# Remove the [default] marker from the previous item that had it.
for item in self.instances.items():
item.display()
self.changed.emit()
def translateUI(self):
self.setTitle(_("LilyPond versions to use"))
self.autoVersion.setText(
_("Automatically choose LilyPond version from document")
)
self.autoVersion.setToolTip(
_(
"If checked, the document's version determines the LilyPond version to use.\n"
'See "What\'s This" for more information.'
)
)
self.autoVersion.setWhatsThis(
userguide.html("prefs_lilypond_autoversion")
+ _("See also {link}.").format(link=userguide.link("prefs_lilypond"))
)
def loadSettings(self):
s = settings()
default = lilypondinfo.default()
self._defaultCommand = s.value("default", default.command, str)
self.autoVersion.setChecked(s.value("autoversion", False, bool))
infos = sorted(lilypondinfo.infos(), key=lambda i: i.version())
if not infos:
infos = [default]
items = [InfoItem(InstalledState(info)) for info in infos]
self.instances.setItems(items)
# Make the default version selected initially
for item in items:
if item.state.info.command == self._defaultCommand:
self.instances.setCurrentItem(item)
break
def saveSettings(self):
infos = []
pending_items = []
for item in self.instances.items():
if isinstance(item.state, DownloadingState):
pending_items.append(item)
elif isinstance(item.state, InstalledState):
infos.append(item.state.info)
# nothing to do for DownloadErrorState, just discard these
if pending_items:
answer = QMessageBox.warning(
self,
app.caption("Downloads pending"),
_("LilyPond downloads are in progress. Stop them?"),
QMessageBox.Ok | QMessageBox.Cancel,
)
if answer == QMessageBox.Cancel:
raise preferences.CancelClosingPreferences
else:
# Stop unfinished downloads
for item in pending_items:
item.abortDownload()
if infos:
for info in infos:
if info.command == self._defaultCommand:
break
else:
self._defaultCommand = infos[0].command
else:
infos = [lilypondinfo.default()]
self._defaultCommand = infos[0].command
s = settings()
s.setValue("default", self._defaultCommand)
s.setValue("autoversion", self.autoVersion.isChecked())
lilypondinfo.setinfos(infos)
lilypondinfo.saveinfos()
class InfoList(widgets.listedit.ListEdit):
def __init__(self, group):
# Compared to a basic ListEdit, this widget also has a "Set as Default"
# button. This must be initialized before the base class __init__, because
# it calls updateSelection().
self.defaultButton = QPushButton()
super().__init__(group)
# The "Add" button is transformed into a menu.
self.addMenu = AddMenu(self)
self.addButton.setMenu(self.addMenu)
self.layout().addWidget(self.defaultButton, 3, 1)
self.layout().addWidget(self.listBox, 0, 0, 5, 1)
def translateUI(self):
super().translateUI()
self.defaultButton.setText(_("Set as &Default"))
def updateSelection(self):
"""Overridden to grey out the "Edit" and "Set as Default" buttons in more cases."""
super().updateSelection()
# "Edit" and "Set as Default" are greyed out not only when there are
# no items at all, but also when the current item is for a version that
# is being downloaded. In theory, "Edit" could be made to work (with
# some refactorings) on such items, but the use case for auto-managed
# versions is not clear. Likewise, we could make "Set as Default" work,
# but it is problematic to know what to do if a version is set as
# default while being downloaded, and then the download fails (which
# becomes the default afterwards?). Practically, not being able to
# edit an installation that is being downloaded is probably intuitively
# normal for most people.
item = self.listBox.currentItem()
is_installed_item = bool(item) and isinstance(item.state, InstalledState)
self.defaultButton.setEnabled(is_installed_item)
self.editButton.setEnabled(is_installed_item)
def createItem(self):
if linux.inside_flatpak():
QMessageBox.critical(
self,
app.caption(_("Error")),
_(
"""You have installed Frescobaldi from Flathub using \
Flatpak. Because Flatpak isolates apps from the rest of the system, it is not possible \
to use custom LilyPond binaries in this configuration. Please either let Frescobaldi auto-install \
a given LilyPond version, using the “Add…” menu, or if you wish to use a custom build, \
install Frescobaldi using a distribution package or from source."""
),
)
return None
return InfoItem(InstalledState(lilypondinfo.LilyPondInfo("lilypond")))
def openEditor(self, item):
if item is None: # Flatpak case above
return False
# Either we're adding a custom version and the item is created in createItem
# above, or we're editing an existing version because the "Edit" button was clicked,
# and it's disabled for versions that are being downloaded or failed to download.
# Either way, we can only edit fully installed versions.
assert isinstance(item.state, InstalledState)
dlg = InfoDialog(self, item.state.info)
# Since we use the command as the way to remember which version is the
# default, and the user may edit the command here, we need to update
# it. The check "item in self.items()" is true if this is an existing
# item that we are updating, with the "Edit" button, but false if it's
# a fresh item as created by the "Add" button in createItem() above.
was_default = (
item in self.items()
and item.state.info.command == self.parentWidget()._defaultCommand
)
if dlg.exec_():
item.state.info = dlg.newInfo()
if was_default:
self.parentWidget()._defaultCommand = item.state.info.command
return True
return False
def itemChanged(self, item):
item.display()
self.setCurrentItem(item)
def itemRemoved(self, item):
if isinstance(item.state, DownloadingState):
# Cancel a pending download
item.abortDownload()
elif isinstance(item.state, InstalledState):
# Wipe out if auto-managed
item.state.info.forget()
# If this version of LilyPond is no longer installed, the menu needs to know
# about it, to reenable the action.
self.addButton.menu().updateActions()
class ItemState:
"""Base class for classes holding the state of an InfoItem."""
@dataclass
class DownloadingState(ItemState):
"""Data for an InfoItem whose LilyPond is being downloaded from the network."""
version_string: str
download_progress: int = 0 # percentage
@dataclass
class DownloadErrorState(ItemState):
"""Data for an InfoItem whose LilyPond could not be downloaded."""
version_string: str
@dataclass
class InstalledState(ItemState):
"""Data for an InfoItem whose LilyPond is fully installed on the system."""
info: lilypondinfo.LilyPondInfo
_LILYPOND_PACKAGE_URL = (
"https://gitlab.com/api/v4/projects/lilypond%2Flilypond/packages/"
+ "generic/lilypond/{version_string}/{archive}"
)
class InfoItem(QListWidgetItem):
"""An item in the list of LilyPond versions.
The `state` attribute holds an instance of `ItemState`.
"""
# TODO: Currently, download progress is displayed with a percentage in text,
# but it would be very nice to have it shown as a translucent progress bar
# overlaid behind the item.
def __init__(self, state):
super().__init__()
self.state = state
@classmethod
def fromNetwork(cls, infolist, version):
"""Make an InfoItem that will auto-download and install a given LilyPond version."""
major, minor, micro = version
version_string = f"{major}.{minor}.{micro}"
self = cls(DownloadingState(version_string))
self.parent = infolist
if platform.system() == "Linux":
archive = f"lilypond-{major}.{minor}.{micro}-linux-x86_64.tar.gz"
self.archive_format = "gztar"
elif platform.system() == "Darwin":
archive = f"lilypond-{major}.{minor}.{micro}-darwin-x86_64.tar.gz"
self.archive_format = "gztar"
elif platform.system() == "Windows":
archive = f"lilypond-{major}.{minor}.{micro}-mingw-x86_64.zip"
self.archive_format = "zip"
else:
raise AssertionError
# Download an archive of LilyPond binaries from GitLab
archive_url = QUrl(
_LILYPOND_PACKAGE_URL.format(version_string=version_string, archive=archive)
)
self.reply = _network_manager().get(QNetworkRequest(archive_url))
self.reply.downloadProgress.connect(self.updateProgress)
self.reply.finished.connect(self.downloadFinished)
self.reply.errorOccurred.connect(self.downloadError)
if platform.system() == "Windows":
exec_name = "lilypond.exe"
else:
exec_name = "lilypond"
self.executable_path = os.path.join(
lilypondinfo.LILYPOND_AUTOINSTALL_DIR,
f"lilypond-{version_string}",
"bin",
exec_name,
)
return self
def updateProgress(self, bytes_received, bytes_total):
if isinstance(self.state, DownloadErrorState):
# This happens when the download is cancelled with abortDownload();
# Qt triggers the downloadProgress signal one last time.
return
progress = round(bytes_received / bytes_total * 100)
assert isinstance(self.state, DownloadingState)
self.state.download_progress = progress
self.display()
def downloadFinished(self):
if isinstance(self.state, DownloadErrorState):
return # the finished signal is called even in the error case
assert isinstance(self.state, DownloadingState)
# Unpack the downloaded archive
with tempfile.NamedTemporaryFile() as tfile:
tfile.write(self.reply.readAll())
shutil.unpack_archive(
tfile.name,
lilypondinfo.LILYPOND_AUTOINSTALL_DIR,
format=self.archive_format,
)
# Switch to "installed" state
self.state = InstalledState(lilypondinfo.LilyPondInfo(self.executable_path))
self.display()
# If this item is still selected, the parent will need to reenable
# the "Edit" and "Set as Default" buttons.
self.parent.updateSelection()
def downloadError(self, code):
assert isinstance(self.state, DownloadingState)
self.state = DownloadErrorState(self.state.version_string)
self.display()
def abortDownload(self):
self.reply.abort() # also calls downloadError()
def display(self):
if isinstance(self.state, DownloadErrorState):
icon = "dialog-error"
text = _("Could not download LilyPond {version}").format(
version=self.state.version_string
)
elif isinstance(self.state, DownloadingState):
icon = "view-refresh"
text = _("Downloading LilyPond {version}... {progress}%").format(
version=self.state.version_string, progress=self.state.download_progress
)
elif isinstance(self.state, InstalledState):
if self.state.info.version():
icon = "lilypond-run"
else:
icon = "dialog-error"
text = self.state.info.prettyName()
default = self.listWidget().parentWidget().parentWidget()._defaultCommand
if self.state.info.command == default:
text += " [{}]".format(_("default"))
self.setIcon(icons.get(icon))
self.setText(text)
_LILYPOND_RELEASES_API_URL = "https://gitlab.com/api/v4/projects/18695663/releases"
class AddMenu(QMenu):
"""Menu added to the "Add" button of the version list.
It offers several methods of adding: auto-download, or use a local install.
There is a set of actions for auto-download and one "Custom" action.
Initially, the auto-download part displays a greyed-out action while
downloading the list of available LilyPond versions. When the download
completes, the normal buttons are added. If the download fails, the
greyed-out action is replaced with another one to signal the failure.
"""
def __init__(self, parent):
super().__init__(parent)
# Fake menu item as placeholder while the download is in progress.
self.addAction(_("Downloading LilyPond version list...")).setEnabled(False)
# Action to add custom LilyPond version anywhere on the file system.
self.customAction = QAction(
_("Button to register a custom LilyPond installation", "Custom...")
)
self.addAction(self.customAction)
# In most ListEdit's, this is the slot connected to the "Add"
# button, here's it's connected to the "Custom" button.
self.customAction.triggered.connect(parent.addClicked)
# Get the list of available LilyPond versions using the GitLab API.
url = QUrl(_LILYPOND_RELEASES_API_URL)
self.reply = _network_manager().get(QNetworkRequest(url))
self.reply.finished.connect(self.downloadFinished)
self.reply.errorOccurred.connect(self.downloadError)
self.hasError = False
self.downloadSuccessful = False
def downloadFinished(self):
if self.hasError:
return # self.reply.finished is triggered even in case of error
self.downloadSuccessful = True
# Decode the JSON data returned by the API
data = json.loads(bytes(self.reply.readAll()))
versions = []
for item in data:
tag = item["tag_name"]
# This release dates back to before LilyPond binaries became simple
# for us to install. (Its tag also uses a different naming convention.)
if tag == "release/2.22.2-1":
continue
# tag has the form v2.x.y
version = tuple(int(n) for n in tag.lstrip("v").split("."))
versions.append(version)
versions.sort(reverse=True) # most recent version first
self.clear() # rebuild the menu
self.actions = []
# Find the latest stable version
for major, minor, micro in versions:
if minor % 2 == 0:
latest_stable = (major, minor, micro)
stable_text = _(
"latest stable LilyPond release",
"{major}.{minor}.{micro} (latest &stable)",
).format(major=major, minor=minor, micro=micro)
# Make a dedicated menu item for this version, to make it
# reachable very easily.
self.makeVersionAction(latest_stable, stable_text, self)
versions.remove(latest_stable)
break
else: # no `break` reached
raise AssertionError # at least 2.24 should be in the list
# Likewise, find the latest unstable version and add a menu item
for major, minor, micro in versions:
if minor % 2 == 1:
latest_unstable = (major, minor, micro)
unstable_text = _(
"latest unstable LilyPond release",
"{major}.{minor}.{micro} (latest &unstable)",
).format(major=major, minor=minor, micro=micro)
self.makeVersionAction(latest_unstable, unstable_text, self)
versions.remove(latest_unstable)
break
else:
raise AssertionError
# Add a submenu for all other versions.
other_text = _(
"LilyPond versions other than latest stable/unstable", "Other..."
)
self.otherMenu = self.addMenu(other_text)
for version in versions:
major, minor, micro = version
text = f"{major}.{minor}.{micro}"
self.makeVersionAction(version, text, self.otherMenu)
# Add the "Custom" action that we already had.
self.addAction(self.customAction)
# Update which actions are enabled
self.updateActions()
def makeVersionAction(self, version, text, parent):
action = QAction(text, parent)
parent.addAction(action)
# Connect the action
action.triggered.connect(self.autoDownload(version))
self.actions.append((version, action))
def downloadError(self, code):
self.hasError = True
self.clear() # rebuild the menu
self.addAction(_("Could not download LilyPond version list")).setEnabled(False)
self.addAction(self.customAction)
def autoDownload(self, version):
"""Return a function that downloads a certain version, for connecting to menu actions."""
def download_inner():
item = InfoItem.fromNetwork(self.parent(), version)
self.parent().addItem(item)
self.updateActions()
return download_inner
def updateActions(self):
"""
Disable a menu action if there is already an auto-managed LilyPond installation for the same version.
Prevents the user from doing something silly and spares us error
handling downstream to cope with duplicate LilyPondInfo's for the same
installation (e.g., when removing them).
"""
if not self.downloadSuccessful:
# We aren't done downloading the list of versions yet (or there was
# an error), so we don't have the version actions and we needn't care
# about this.
return
disabled_versions = set()
for item in self.parent().items():
if isinstance(item.state, InstalledState) and item.state.info.isAutoManaged:
disabled_versions.add(item.state.info.versionString())
elif isinstance(item.state, DownloadingState):
disabled_versions.add(item.state.version_string)
for (major, minor, micro), action in self.actions:
enable = f"{major}.{minor}.{micro}" not in disabled_versions
action.setEnabled(enable)
class InfoDialog(QDialog):
def __init__(self, parent, info):
super().__init__(parent)
self.setWindowModality(Qt.WindowModal)
layout = QVBoxLayout()
layout.setSpacing(10)
self.setLayout(layout)
self.tab = QTabWidget()
tab_general = QWidget()
tab_toolcommands = QWidget()
self.tab.addTab(tab_general, "")
self.tab.addTab(tab_toolcommands, "")
# general tab
vbox = QVBoxLayout()
vbox.setSpacing(4)
tab_general.setLayout(vbox)
hbox = QHBoxLayout()
self.lilyname = QLineEdit()
self.lilyname.setText(info.name)
self.lilynameLabel = l = QLabel()
l.setBuddy(self.lilyname)
hbox.addWidget(l)
hbox.addWidget(self.lilyname)
vbox.addLayout(hbox)
# For auto-managed installation, don't show the installation
# path to the user or let them edit it.
self.haveCommandEdit = not info.isAutoManaged
if self.haveCommandEdit:
self.lilypond = widgets.urlrequester.UrlRequester()
self.lilypond.setFileMode(QFileDialog.ExistingFile)
self.lilypond.setPath(info.command)
self.lilypond.lineEdit.setFocus()
self.lilypondLabel = l = QLabel()
l.setBuddy(self.lilypond)
vbox.addWidget(l)
vbox.addWidget(self.lilypond)
else:
self.lilypondCommand = info.command
self.autoVersionIncluded = QCheckBox()
self.autoVersionIncluded.setChecked(info.autoVersionIncluded)
vbox.addWidget(self.autoVersionIncluded)
vbox.addStretch(1)
# toolcommands tab
grid = QGridLayout()
grid.setSpacing(4)
tab_toolcommands.setLayout(grid)
self.ly_tool_widgets = {}
row = 0
for name, gui in self.toolnames():
w = QLineEdit()
l = QLabel()
l.setBuddy(w)
grid.addWidget(l, row, 0)
grid.addWidget(w, row, 1)
row += 1
self.ly_tool_widgets[name] = (l, w)
self.ly_tool_widgets[name][1].setText(info.ly_tool(name))
layout.addWidget(self.tab)
layout.addWidget(widgets.Separator())
b = self.buttons = QDialogButtonBox(self)
layout.addWidget(b)
b.setStandardButtons(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)
b.accepted.connect(self.accept)
b.rejected.connect(self.reject)
userguide.addButton(b, "prefs_lilypond")
app.translateUI(self)
qutil.saveDialogSize(self, "/preferences/lilypond/lilypondinfo/dialog/size")
def toolnames(self):
"""Yield tuples (name, GUI name) for the sub tools we allow to be configured."""
yield "convert-ly", _("Convert-ly:")
yield "lilypond-book", _("LilyPond-book:")
yield "midi2ly", _("Midi2ly:")
yield "musicxml2ly", _("MusicXML2ly:")
yield "abc2ly", _("ABC2ly:")
def translateUI(self):
self.setWindowTitle(app.caption(_("LilyPond")))
self.lilynameLabel.setText(_("Label:"))
self.lilynameLabel.setToolTip(
_("How this version of LilyPond will be displayed.")
)
if self.haveCommandEdit:
self.lilypondLabel.setText(_("LilyPond Command:"))
self.lilypond.lineEdit.setToolTip(
_("Name or full path of the LilyPond program.")
)
self.autoVersionIncluded.setText(_("Include in automatic version selection"))
self.tab.setTabText(0, _("General"))
self.tab.setTabText(1, _("Tool Commands"))
for name, gui in self.toolnames():
self.ly_tool_widgets[name][0].setText(gui)
def newInfo(self):
"""Returns a new LilyPondInfo instance for our settings."""
if not self.haveCommandEdit:
info = lilypondinfo.LilyPondInfo(self.lilypondCommand)
elif platform.system() == "Darwin" and self.lilypond.path().endswith(".app"):
info = lilypondinfo.LilyPondInfo(
self.lilypond.path() + "/Contents/Resources/bin/lilypond"
)
else:
info = lilypondinfo.LilyPondInfo(self.lilypond.path())
if self.lilyname.text() and not self.lilyname.text().isspace():
info.name = self.lilyname.text()
info.autoVersionIncluded = self.autoVersionIncluded.isChecked()
for name, gui in self.toolnames():
info.set_ly_tool(name, self.ly_tool_widgets[name][1].text())
return info
class Running(preferences.Group):
def __init__(self, page):
super().__init__(page)
layout = QVBoxLayout()
self.setLayout(layout)
self.saveDocument = QCheckBox(clicked=self.changed)
self.deleteFiles = QCheckBox(clicked=self.changed)
self.embedSourceCode = QCheckBox(clicked=self.changed)
self.noTranslation = QCheckBox(clicked=self.changed)
self.includeLabel = QLabel()
self.include = widgets.listedit.FilePathEdit()
self.include.listBox.setDragDropMode(QAbstractItemView.InternalMove)
self.include.changed.connect(self.changed)
layout.addWidget(self.saveDocument)
layout.addWidget(self.deleteFiles)
layout.addWidget(self.embedSourceCode)
layout.addWidget(self.noTranslation)
layout.addWidget(self.includeLabel)
layout.addWidget(self.include)
app.translateUI(self)
def translateUI(self):
self.setTitle(_("Running LilyPond"))
self.saveDocument.setText(_("Save document if possible"))
self.saveDocument.setToolTip(
_(
"If checked, the document is saved when it is local and modified.\n"
"Otherwise a temporary file is used to run LilyPond."
)
)
self.deleteFiles.setText(_("Delete intermediate output files"))
self.deleteFiles.setToolTip(
_("If checked, LilyPond will delete intermediate PostScript files.")
)
self.embedSourceCode.setText(_("Embed Source Code files in publish mode"))
self.embedSourceCode.setToolTip(
_(
"If checked, the LilyPond source files will be embedded in the PDF\n"
"when LilyPond is started in publish mode."
)
)
self.noTranslation.setText(_("Run LilyPond with English messages"))
self.noTranslation.setToolTip(
_(
"If checked, LilyPond's output messages will be in English.\n"
"This can be useful for bug reports."
)
)
self.includeLabel.setText(_("LilyPond include path:"))
def loadSettings(self):
s = settings()
self.saveDocument.setChecked(s.value("save_on_run", False, bool))
self.deleteFiles.setChecked(s.value("delete_intermediate_files", True, bool))
self.embedSourceCode.setChecked(s.value("embed_source_code", False, bool))
self.noTranslation.setChecked(s.value("no_translation", False, bool))
include_path = qsettings.get_string_list(s, "include_path")
self.include.setValue(include_path)
def saveSettings(self):
s = settings()
s.setValue("save_on_run", self.saveDocument.isChecked())
s.setValue("delete_intermediate_files", self.deleteFiles.isChecked())
s.setValue("embed_source_code", self.embedSourceCode.isChecked())
s.setValue("no_translation", self.noTranslation.isChecked())
s.setValue("include_path", self.include.value())
class Target(preferences.Group):
def __init__(self, page):
super().__init__(page)
layout = QGridLayout()
self.setLayout(layout)
self.targetPDF = QRadioButton(toggled=page.changed)
self.targetSVG = QRadioButton(toggled=page.changed)
self.openDefaultView = QCheckBox(clicked=page.changed)
layout.addWidget(self.targetPDF, 0, 0)
layout.addWidget(self.targetSVG, 0, 1)
layout.addWidget(self.openDefaultView, 1, 0, 1, 5)
app.translateUI(self)
def translateUI(self):
self.setTitle(_("Default output format"))
self.targetPDF.setText(_("PDF"))
self.targetPDF.setToolTip(
_("Create PDF (Portable Document Format) documents by default.")
)
self.targetSVG.setText(_("SVG"))
self.targetSVG.setToolTip(
_("Create SVG (Scalable Vector Graphics) documents by default.")
)
self.openDefaultView.setText(_("Open default viewer after successful compile"))
self.openDefaultView.setToolTip(
_(
"Shows the PDF or SVG music view when a compile job finishes "
"successfully."
)
)
def loadSettings(self):
s = settings()
target = s.value("default_output_target", "pdf", str)
if target == "svg":
self.targetSVG.setChecked(True)
self.targetPDF.setChecked(False)
else:
self.targetSVG.setChecked(False)
self.targetPDF.setChecked(True)
self.openDefaultView.setChecked(s.value("open_default_view", True, bool))
def saveSettings(self):
s = settings()
if self.targetSVG.isChecked():
target = "svg"
else:
target = "pdf"
s.setValue("default_output_target", target)
s.setValue("open_default_view", self.openDefaultView.isChecked())
|
downloaders | ExashareCom | # -*- coding: utf-8 -*-
from ..base.xfs_downloader import XFSDownloader
class ExashareCom(XFSDownloader):
__name__ = "ExashareCom"
__type__ = "downloader"
__version__ = "0.08"
__status__ = "testing"
__pattern__ = r"http://(?:www\.)?exashare\.com/\w{12}"
__config__ = [
("enabled", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("fallback", "bool", "Fallback to free download if premium fails", True),
("chk_filesize", "bool", "Check file size", True),
("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10),
]
__description__ = """Exashare.com downloader plugin"""
__license__ = "GPLv3"
__authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
PLUGIN_DOMAIN = "exashare.com"
INFO_PATTERN = r">(?P<NAME>.+?)<small>\( (?P<S>[\d.,]+) (?P<U>[\w^_]+)"
def setup(self):
self.multi_dl = True
self.chunk_limit = 1
self.resume_download = self.premium
|
plugins | sync_inoreader | # Canto Inoreader Plugin
# by Jack Miller
# v0.4
# DEPENDENCIES
# This plugin requires the 'requests' module, which can usually be found in
# your distro's package manager as python3-requests (or python-requests on
# Arch).
# IMPORTANT NOTES
# - When this plugin is enabled, canto will synchronize your subscribed feeds
# with Inoreader. If you've been using canto, you should export your feeds
# (canto-remote export > feeds.opml) and import them into Inoreader if you
# don't want to lose your feeds because Inoreader's info is assumed to be more
# correct than ours.
#
# - Feed subscriptions are only synchronized *from Inoreader* on startup, so if
# you add / remove feeds with Inoreader, you will have to restart the daemon to
# pickup the changes. Adding or removing feeds with canto works properly all
# the time.
#
# - You must have a standard Inoreader account, not an OAuth (Google/Facebook
# login).
# CONFIGURATION
# Inoreader credentials
EMAIL = "somebody@somewhere.com"
PASSWORD = "passw0rd"
# You don't *have* to change these, but the API is rate limited. So if you want
# to avoid rate limit issues, register your own application Preferences ->
# Developer options on the Inoreader site and replace these.
APP_ID = "1000001299"
APP_KEY = "i0UOUtLQjj2WTre8WA3a9GWt_cgDhpkO"
BASE_URL = "https://www.inoreader.com/reader/"
# === You shouldn't have to change anything past this line. ===
from canto_next.plugins import check_program
check_program("canto-daemon")
import json
import logging
import time
import traceback
from urllib.parse import quote, urlencode
import requests
from canto_next.config import config
from canto_next.feed import DaemonFeedPlugin, allfeeds
from canto_next.fetch import DaemonFetchThreadPlugin
from canto_next.hooks import call_hook, on_hook
log = logging.getLogger("SYNC-INOREADER")
class InoreaderReqFailed(Exception):
pass
class InoreaderAuthFailed(Exception):
pass
class CantoInoreaderAPI:
def __init__(self):
self.extra_headers = {
"User-Agent": "Canto/0.9.0 + http://codezen.org/canto-ng",
"AppKey": APP_KEY,
"AppID": APP_ID,
}
try:
self.authorization = self.auth()
except:
self.authorization = None
self.dead = False
self.add_tags_queued = {}
self.del_tags_queued = {}
def auth(self):
headers = self.extra_headers.copy()
headers["Email"] = EMAIL
headers["Passwd"] = PASSWORD
try:
r = requests.get(
"https://www.inoreader.com/accounts/ClientLogin", headers, timeout=1
)
except Exception as e:
raise InoreaderReqFailed(str(e))
if r.status_code != 200:
raise InoreaderAuthFailed(
"Failed to authorize: [%s] %s" % (r.status_code, r.text)
)
for line in r.text.splitlines():
if line.startswith("Auth="):
log.debug("authorization: %s", line[5:])
return line[5:]
raise InoreaderAuthFailed("Failed to find Auth= in auth response")
def inoreader_req(self, path, query={}):
tries = 3
r = {}
while tries and not self.dead:
tries -= 1
if not self.authorization:
try:
self.authorization = self.auth()
except InoreaderReqFailed as e:
log.debug("Auth request failed: %s", e)
continue
except InoreaderAuthFailed:
log.error(
"Inoreader authorization failed, please check your credentials in sync-inoreader.py"
)
self.dead = True
raise
headers = self.extra_headers.copy()
headers["Authorization"] = "GoogleLogin auth=" + self.authorization
try:
r = requests.get(
BASE_URL + path, params=query, headers=headers, timeout=1
)
except requests.exceptions.Timeout:
raise InoreaderReqFailed
if r.status_code != 200:
log.debug("STATUS %s", r.status_code)
log.debug(r.headers)
log.debug(r.text)
else:
return r
# No authorization, attempt to get another code on the next try.
if r.status_code == 401:
self.authorization = None
elif r.status_code == 429:
log.error("Inoreader rate limit reached.")
self.dead = True
elif r.status_code == 503:
log.error("Inoreader appears down, state may be lost")
raise InoreaderReqFailed
# Convert special tags into /state/com.google/tag and others into
# /label/tag, useful when matching without knowing the user.
def full_ino_tag_suffix(self, tag):
if tag in ["read", "starred", "fresh"]:
return "/state/com.google/" + tag
return "/label/" + tag
# Add the user/- prefix to go upstream to Inoreader.
def full_ino_tag(self, tag):
return "user/-" + self.full_ino_tag_suffix(tag)
# Do the opposite, convert an Inoreader tag into a natural name. (i.e.)
# /user/whatever/state/com.google/read -> read
def strip_ino_tag(self, tag):
tag = tag.split("/", 3)
if tag[2] == "state":
return tag[3].split("/", 1)[1]
return tag[3]
# Return whether Inoreader data includes this natural tag
def has_tag(self, item, tag):
if "canto_inoreader_categories" not in item:
return False
suff = self.full_ino_tag_suffix(tag)
for category in item["canto_inoreader_categories"]:
if category.endswith(suff):
return True
return False
def add_tag(self, item, tag):
ino_id = item["canto_inoreader_id"]
if not self.has_tag(item, tag):
if tag in self.add_tags_queued:
self.add_tags_queued[tag].append(ino_id)
else:
self.add_tags_queued[tag] = [ino_id]
def remove_tag(self, item, tag):
ino_id = item["canto_inoreader_id"]
if self.has_tag(item, tag):
if tag in self.del_tags_queued:
self.del_tags_queued[tag].append(ino_id)
else:
self.del_tags_queued[tag] = [ino_id]
def _urllimit(self, prefix, ino_ids):
t = prefix
l = len(t)
for i, ino_id in enumerate(ino_ids):
if l + len(ino_id) > 2048:
self.inoreader_req(t)
return ino_ids[i:]
t += ino_id
l += len(ino_id)
self.inoreader_req(t)
return []
def flush_changes(self):
for key in self.add_tags_queued:
to_add = ["&i=" + quote(x) for x in self.add_tags_queued[key]]
while to_add:
to_add = self._urllimit(
"api/0/edit-tag?a=" + quote(self.full_ino_tag(key)), to_add
)
for key in self.del_tags_queued:
to_del = ["&i=" + quote(x) for x in self.del_tags_queued[key]]
while to_del:
to_del = self._urllimit(
"api/0/edit-tag?r=" + quote(self.full_ino_tag(key)), to_del
)
self.add_tags_queued = {}
self.del_tags_queued = {}
def get_subs(self):
return self.inoreader_req("api/0/subscription/list").json()["subscriptions"]
def add_sub(self, feed_url, title):
query = {"ac": "subscribe", "s": "feed/" + feed_url, "t": title}
self.inoreader_req("api/0/subscription/edit", query)
def del_sub(self, feed_url):
query = {"ac": "unsubscribe", "s": "feed/" + feed_url}
self.inoreader_req("api/0/subscription/edit", query)
api = CantoInoreaderAPI()
# Given a change set, and the current attributes of a canto item, tell
# Inoreader about it.
def sync_state_to(changes, attrs, add_only=False):
if "canto-state" in changes:
if "read" in changes["canto-state"]:
api.add_tag(attrs, "read")
elif not add_only:
if api.has_tag(attrs, "read"):
api.remove_tag(attrs, "read")
if "canto-tags" in changes:
for tag in changes["canto-tags"]:
tag = tag.split(":", 1)[1] # strip user: or category: prefix
if not api.has_tag(attrs, tag):
api.add_tag(attrs, tag)
if add_only:
return
for tag in attrs["canto_inoreader_categories"]:
tag = api.strip_ino_tag(tag)
if "user:" + tag not in changes["canto-tags"]:
api.remove_tag(attrs, tag)
class CantoFeedInoReader(DaemonFeedPlugin):
def __init__(self, feed):
self.plugin_attrs = {
"edit_inoreader_sync": self.edit_inoreader_sync,
"additems_inoreader": self.additems_inoreader,
}
self.feed = feed
self.ino_data = None
def _list_add(self, item, attr, new):
if attr not in item:
item[attr] = [new]
elif new not in item[attr]:
item[attr].append(new)
def add_utag(self, item, tags_to_add, tag):
self._list_add(item, "canto-tags", "user:" + tag)
tags_to_add.append((item, "user:" + tag))
def add_state(self, item, state):
self._list_add(item, "canto-state", state)
def additems_inoreader(
self, feed, newcontent, tags_to_add, tags_to_remove, remove_items
):
stream_id = quote("feed/" + feed.URL, [])
query = {"n": 1000}
# Collect all of the items
self.ino_data = []
content_path = "api/0/stream/contents/" + stream_id
try:
r = api.inoreader_req(content_path, query).json()
self.ino_data.extend(r["items"])
except (InoreaderAuthFailed, InoreaderReqFailed):
return (tags_to_add, tags_to_remove, remove_items)
except Exception as e:
log.debug("EXCEPT: %s", traceback.format_exc())
raise
# Find items that were inserted last time, and remove them, potentially
# adding them to our fresh Inoreader data.
# This keeps us from getting dupes when Inoreader finds an item, we
# insert it, and then a real copy comes to canto but canto doesn't
# detect the dupe since the ids are different.
for canto_entry in newcontent["entries"][:]:
if "canto-from-inoreader" not in canto_entry:
continue
remove_items.append(canto_entry)
tags_to_add = [x for x in tags_to_add if x[0] != canto_entry]
newcontent["entries"].remove(canto_entry)
for ino_entry in self.ino_data[:]:
if canto_entry["id"] == ino_entry["id"]:
break
else:
self.ino_data.append(canto_entry)
# Now insert (or re-insert) items that aren't already in our data.
# NOTE: It's okay if re-inserted items are also in remove_ids, since
# that's processed first, and will be cancelled out by adding the tags
# afterwards.
for ino_entry in self.ino_data:
for canto_entry in newcontent["entries"]:
if ino_entry["canonical"][0]["href"] != canto_entry["link"]:
continue
if ino_entry["id"] == canto_entry["id"]:
canto_entry["canto-from-inoreader"] = True
break
else:
if "canto-from-inoreader" not in ino_entry:
# feedparser compatibility
ino_entry["summary"] = ino_entry["summary"]["content"]
ino_entry["link"] = ino_entry["canonical"][0]["href"]
# mark this item as from inoreader (missing from feed)
ino_entry["canto-from-inoreader"] = True
newcontent["entries"].append(ino_entry)
tags_to_add.append((ino_entry, "maintag:" + feed.name))
return (tags_to_add, tags_to_remove, remove_items)
def edit_inoreader_sync(
self, feed, newcontent, tags_to_add, tags_to_remove, remove_items
):
# Add inoreader_id/categories information to the items
# This is very similar to the loop in additems_inoreader, but needs to
# be separate in case other plugins add items that inoreader might
# track.
for ino_entry in self.ino_data:
for canto_entry in newcontent["entries"][:]:
if ino_entry["canonical"][0]["href"] != canto_entry["link"]:
continue
canto_entry["canto_inoreader_id"] = ino_entry["id"]
canto_entry["canto_inoreader_categories"] = ino_entry["categories"]
break
# Drop the data.
self.ino_data = None
for entry in newcontent["entries"]:
# If we didn't get an id for this item, skip it
if "canto_inoreader_id" not in entry:
continue
for category in entry["canto_inoreader_categories"]:
if category.endswith("/state/com.google/read"):
self.add_state(entry, "read")
continue
cat = category.split("/", 3)
if len(cat) < 4:
log.debug("Weird category? %s", cat)
continue
if cat[2] == "state":
if cat[3] == "com.google/starred":
self.add_utag(entry, tags_to_add, "starred")
elif cat[2] == "label":
self.add_utag(entry, tags_to_add, cat[3])
# If this is the first time we've paired an item up with its
# Inoreader data, our state is better, so sync it to Inoreader, and
# then skip the remainder of the logic to remove canto state/tags
if "canto-inoreader-sync" not in entry:
sync_state_to(entry, entry, True)
entry["canto-inoreader-sync"] = True
continue
if "canto-state" not in entry or type(entry["canto-state"]) != list:
continue
# It appears that if an item is "fresh" it will resist all attempts
# to set it as read?
if "read" in entry["canto-state"] and not (
api.has_tag(entry, "read") or api.has_tag(entry, "fresh")
):
log.debug("Marking unread from Inoreader")
entry["canto-state"].remove("read")
if "canto-tags" not in entry or type(entry["canto-tags"]) != list:
continue
for tag in entry["canto-tags"][:]:
if not api.has_tag(entry, tag.split(":", 1)[1]):
entry["canto-tags"].remove(tag)
tags_to_remove.append((entry, tag))
api.flush_changes()
return (tags_to_add, tags_to_remove, remove_items)
# For canto communicating to Inoreader, we tap into the relevant hooks to
# pickup state / tag changes, and convert that into Inoreader API calls.
def post_setattributes(socket, args):
for item_id in args.keys():
dict_id = json.loads(item_id)
feed = allfeeds.get_feed(dict_id["URL"])
attrs = feed.get_attributes(
[item_id],
{
item_id: [
"canto_inoreader_id",
"canto_inoreader_categories",
"canto-state",
"canto-tags",
]
},
)
attrs = attrs[item_id]
# If the canto_inoreader_id isn't right (likely empty since get_attributes
# will sub in "") then skip synchronizing this item.
ino_id = attrs["canto_inoreader_id"]
if not ino_id.startswith("tag:google.com,2005:reader/item/"):
continue
sync_state_to(args[item_id], attrs)
api.flush_changes()
on_hook("daemon_post_setattributes", post_setattributes)
def post_setconfigs(socket, args):
if "feeds" in args:
for feed in args["feeds"]:
api.add_sub(feed["url"], feed["name"])
on_hook("daemon_post_setconfigs", post_setconfigs)
def post_delconfigs(socket, args):
if "feeds" in args:
for feed in args["feeds"]:
api.del_sub(feed["url"])
on_hook("daemon_post_delconfigs", post_delconfigs)
# Do the initial feed synchronization. This only occurs once per run, and
# assumes Inoreader knows everything.
def on_daemon_serving():
log.debug("Synchronizing subscriptions.")
ino_subs = api.get_subs()
for c_feed in config.json["feeds"]:
url = c_feed["url"]
for sub in ino_subs:
if sub["url"] == url:
break
else:
log.debug("Old feed: %s", url)
call_hook("daemon_del_configs", [None, {"feeds": [c_feed]}])
for sub in ino_subs:
url = sub["url"]
name = sub["title"]
for c_feed in config.json["feeds"]:
if c_feed["url"] == url:
break
if c_feed["name"] == name:
log.info("Found feed with same name, but not URL? Skipping.")
break
else:
log.debug("New feed: %s", url)
call_hook(
"daemon_set_configs", [None, {"feeds": [{"name": name, "url": url}]}]
)
on_hook("daemon_serving", on_daemon_serving)
|
views | torrents | import json
from ipaddress import ip_address
from urllib.parse import quote
import flask
from nyaa import backend, forms, models, torrents
from nyaa.extensions import db
from nyaa.utils import cached_function
from sqlalchemy.orm import joinedload
from werkzeug.datastructures import CombinedMultiDict
app = flask.current_app
bp = flask.Blueprint("torrents", __name__)
@bp.route("/view/<int:torrent_id>", endpoint="view", methods=["GET", "POST"])
def view_torrent(torrent_id):
if flask.request.method == "POST":
torrent = models.Torrent.by_id(torrent_id)
else:
torrent = (
models.Torrent.query.options(joinedload("filelist"))
.filter_by(id=torrent_id)
.first()
)
if not torrent:
flask.abort(404)
# Only allow admins see deleted torrents
if torrent.deleted and not (flask.g.user and flask.g.user.is_moderator):
flask.abort(404)
comment_form = None
if flask.g.user and (not torrent.comment_locked or flask.g.user.is_moderator):
comment_form = forms.CommentForm()
if flask.request.method == "POST":
if not comment_form:
flask.abort(403)
if comment_form.validate():
comment_text = (comment_form.comment.data or "").strip()
comment = models.Comment(
torrent_id=torrent_id, user_id=flask.g.user.id, text=comment_text
)
db.session.add(comment)
db.session.flush()
torrent_count = torrent.update_comment_count()
db.session.commit()
flask.flash("Comment successfully posted.", "success")
return flask.redirect(
flask.url_for(
"torrents.view",
torrent_id=torrent_id,
_anchor="com-" + str(torrent_count),
)
)
# Only allow owners and admins to edit torrents
can_edit = flask.g.user and (
flask.g.user is torrent.user or flask.g.user.is_moderator
)
files = None
if torrent.filelist:
files = json.loads(torrent.filelist.filelist_blob.decode("utf-8"))
torrent_comments = models.Comment.query.filter_by(torrent_id=torrent_id).order_by(
models.Comment.id.asc()
)
report_form = forms.ReportForm()
return flask.render_template(
"view.html",
torrent=torrent,
files=files,
comment_form=comment_form,
comments=torrent_comments,
can_edit=can_edit,
report_form=report_form,
)
@bp.route("/view/<int:torrent_id>/edit", endpoint="edit", methods=["GET", "POST"])
def edit_torrent(torrent_id):
torrent = models.Torrent.by_id(torrent_id)
form = forms.EditForm(flask.request.form)
form.category.choices = _create_upload_category_choices()
delete_form = forms.DeleteForm()
ban_form = None
editor = flask.g.user
if not torrent:
flask.abort(404)
# Only allow admins edit deleted torrents
if torrent.deleted and not (editor and editor.is_moderator):
flask.abort(404)
# Only allow torrent owners or admins edit torrents
if not editor or not (editor is torrent.user or editor.is_moderator):
flask.abort(403)
torrent_user_level = torrent.user and torrent.user.level
if (
editor
and editor.is_moderator
and (torrent_user_level is None or editor.level > torrent_user_level)
):
ban_form = forms.BanForm()
if flask.request.method == "POST" and form.submit.data and form.validate():
# Form has been sent, edit torrent with data.
(
torrent.main_category_id,
torrent.sub_category_id,
) = form.category.parsed_data.get_category_ids()
torrent.display_name = backend.sanitize_string(
(form.display_name.data or "").strip()
)
torrent.information = backend.sanitize_string(
(form.information.data or "").strip()
)
torrent.description = backend.sanitize_string(
(form.description.data or "").strip()
)
torrent.hidden = form.is_hidden.data
torrent.remake = form.is_remake.data
torrent.complete = form.is_complete.data
torrent.anonymous = form.is_anonymous.data
if editor.is_trusted:
torrent.trusted = form.is_trusted.data
if editor.is_moderator:
locked_changed = torrent.comment_locked != form.is_comment_locked.data
torrent.comment_locked = form.is_comment_locked.data
url = flask.url_for("torrents.view", torrent_id=torrent.id)
if editor.is_moderator and locked_changed:
log = "Torrent [#{0}]({1}) marked as {2}".format(
torrent.id,
url,
"comments locked" if torrent.comment_locked else "comments unlocked",
)
adminlog = models.AdminLog(log=log, admin_id=editor.id)
db.session.add(adminlog)
db.session.commit()
flask.flash(
flask.Markup(
"Torrent has been successfully edited! Changes might take a few minutes to show up."
),
"success",
)
url = flask.url_for("torrents.view", torrent_id=torrent.id)
return flask.redirect(url)
elif (
flask.request.method == "POST"
and delete_form.validate()
and (not ban_form or ban_form.validate())
):
return _delete_torrent(torrent, delete_form, ban_form)
else:
if flask.request.method != "POST":
# Fill form data only if the POST didn't fail
form.category.data = torrent.sub_category.id_as_string
form.display_name.data = torrent.display_name
form.information.data = torrent.information
form.description.data = torrent.description
form.is_hidden.data = torrent.hidden
form.is_remake.data = torrent.remake
form.is_complete.data = torrent.complete
form.is_anonymous.data = torrent.anonymous
form.is_trusted.data = torrent.trusted
form.is_comment_locked.data = torrent.comment_locked
ipbanned = None
if editor.is_moderator:
torrent_ip_banned = True
user_ip_banned = True
# Archived torrents do not have a null uploader_ip
if torrent.uploader_ip:
torrent_ip_banned = models.Ban.banned(None, torrent.uploader_ip).first()
if torrent.user:
user_ip_banned = models.Ban.banned(
None, torrent.user.last_login_ip
).first()
ipbanned = torrent_ip_banned and user_ip_banned
return flask.render_template(
"edit.html",
form=form,
delete_form=delete_form,
ban_form=ban_form,
torrent=torrent,
ipbanned=ipbanned,
)
def _delete_torrent(torrent, form, banform):
editor = flask.g.user
uploader = torrent.user
# Only allow admins edit deleted torrents
if torrent.deleted and not (editor and editor.is_moderator):
flask.abort(404)
action = None
url = flask.url_for("main.home")
ban_torrent = form.ban.data
if banform:
ban_torrent = ban_torrent or banform.ban_user.data or banform.ban_userip.data
if form.delete.data and not torrent.deleted:
action = "deleted"
torrent.deleted = True
db.session.add(torrent)
elif ban_torrent and not torrent.banned and editor.is_moderator:
action = "banned"
torrent.banned = True
if not torrent.deleted:
torrent.deleted = True
action = "deleted and banned"
db.session.add(models.TrackerApi(torrent.info_hash, "remove"))
torrent.stats.seed_count = 0
torrent.stats.leech_count = 0
db.session.add(torrent)
elif form.undelete.data and torrent.deleted:
action = "undeleted"
torrent.deleted = False
if torrent.banned:
action = "undeleted and unbanned"
torrent.banned = False
db.session.add(models.TrackerApi(torrent.info_hash, "insert"))
db.session.add(torrent)
elif form.unban.data and torrent.banned:
action = "unbanned"
torrent.banned = False
db.session.add(models.TrackerApi(torrent.info_hash, "insert"))
db.session.add(torrent)
if not action and not ban_torrent:
flask.flash(flask.Markup("What the fuck are you doing?"), "danger")
return flask.redirect(flask.url_for("torrents.edit", torrent_id=torrent.id))
if action and editor.is_moderator:
url = flask.url_for("torrents.view", torrent_id=torrent.id)
if editor is not uploader:
log = "Torrent [#{0}]({1}) has been {2}".format(torrent.id, url, action)
adminlog = models.AdminLog(log=log, admin_id=editor.id)
db.session.add(adminlog)
if action:
db.session.commit()
flask.flash(
flask.Markup("Torrent has been successfully {0}.".format(action)), "success"
)
if not banform or not (banform.ban_user.data or banform.ban_userip.data):
return flask.redirect(url)
if banform.ban_userip.data:
tbanned = models.Ban.banned(None, torrent.uploader_ip).first()
ubanned = True
if uploader:
ubanned = models.Ban.banned(None, uploader.last_login_ip).first()
ipbanned = tbanned and ubanned
if (banform.ban_user.data and (not uploader or uploader.is_banned)) or (
banform.ban_userip.data and ipbanned
):
flask.flash(flask.Markup("What the fuck are you doing?"), "danger")
return flask.redirect(flask.url_for("torrents.edit", torrent_id=torrent.id))
flavor = "Nyaa" if app.config["SITE_FLAVOR"] == "nyaa" else "Sukebei"
eurl = flask.url_for("torrents.view", torrent_id=torrent.id, _external=True)
reason = "[{0}#{1}]({2}) {3}".format(flavor, torrent.id, eurl, banform.reason.data)
ban1 = models.Ban(admin_id=editor.id, reason=reason)
ban2 = models.Ban(admin_id=editor.id, reason=reason)
db.session.add(ban1)
if uploader:
uploader.status = models.UserStatusType.BANNED
db.session.add(uploader)
ban1.user_id = uploader.id
ban2.user_id = uploader.id
if banform.ban_userip.data:
if not ubanned:
ban1.user_ip = ip_address(uploader.last_login_ip)
if not tbanned:
uploader_ip = ip_address(torrent.uploader_ip)
if ban1.user_ip != uploader_ip:
ban2.user_ip = uploader_ip
db.session.add(ban2)
else:
ban1.user_ip = ip_address(torrent.uploader_ip)
uploader_str = "Anonymous"
if uploader:
uploader_url = flask.url_for("users.view_user", user_name=uploader.username)
uploader_str = "[{0}]({1})".format(uploader.username, uploader_url)
if ban1.user_ip:
uploader_str += " IP({0})".format(ban1.user_ip)
ban1.user_ip = ban1.user_ip.packed
if ban2.user_ip:
uploader_str += " IP({0})".format(ban2.user_ip)
ban2.user_ip = ban2.user_ip.packed
log = "Uploader {0} of torrent [#{1}]({2}) has been banned.".format(
uploader_str,
torrent.id,
flask.url_for("torrents.view", torrent_id=torrent.id),
action,
)
adminlog = models.AdminLog(log=log, admin_id=editor.id)
db.session.add(adminlog)
db.session.commit()
flask.flash(flask.Markup("Uploader has been successfully banned."), "success")
return flask.redirect(url)
@bp.route("/view/<int:torrent_id>/magnet")
def redirect_magnet(torrent_id):
torrent = models.Torrent.by_id(torrent_id)
if not torrent:
flask.abort(404)
return flask.redirect(torrents.create_magnet(torrent))
@bp.route("/view/<int:torrent_id>/torrent")
@bp.route("/download/<int:torrent_id>.torrent", endpoint="download")
def download_torrent(torrent_id):
torrent = models.Torrent.by_id(torrent_id)
if not torrent or not torrent.has_torrent:
flask.abort(404)
if torrent.deleted and not (flask.g.user and flask.g.user.is_moderator):
flask.abort(404)
torrent_file, torrent_file_size = _make_torrent_file(torrent)
disposition = "inline; filename=\"{0}\"; filename*=UTF-8''{0}".format(
quote(torrent.torrent_name.encode("utf-8"))
)
resp = flask.Response(torrent_file)
resp.headers["Content-Type"] = "application/x-bittorrent"
resp.headers["Content-Disposition"] = disposition
resp.headers["Content-Length"] = torrent_file_size
return resp
@bp.route("/view/<int:torrent_id>/comment/<int:comment_id>/edit", methods=["POST"])
def edit_comment(torrent_id, comment_id):
if not flask.g.user:
flask.abort(403)
torrent = models.Torrent.by_id(torrent_id)
if not torrent:
flask.abort(404)
comment = models.Comment.query.get(comment_id)
if not comment:
flask.abort(404)
if not comment.user.id == flask.g.user.id:
flask.abort(403)
if torrent.comment_locked and not flask.g.user.is_moderator:
flask.abort(403)
if comment.editing_limit_exceeded:
flask.abort(
flask.make_response(
flask.jsonify({"error": "Editing time limit exceeded."}), 400
)
)
form = forms.CommentForm(flask.request.form)
if not form.validate():
error_str = " ".join(form.errors)
flask.abort(flask.make_response(flask.jsonify({"error": error_str}), 400))
comment.text = form.comment.data
db.session.commit()
return flask.jsonify({"comment": comment.text})
@bp.route("/view/<int:torrent_id>/comment/<int:comment_id>/delete", methods=["POST"])
def delete_comment(torrent_id, comment_id):
if not flask.g.user:
flask.abort(403)
torrent = models.Torrent.by_id(torrent_id)
if not torrent:
flask.abort(404)
comment = models.Comment.query.filter_by(id=comment_id).first()
if not comment:
flask.abort(404)
if not (comment.user.id == flask.g.user.id or flask.g.user.is_superadmin):
flask.abort(403)
if torrent_id != comment.torrent_id:
flask.abort(400)
if torrent.comment_locked and not flask.g.user.is_moderator:
flask.abort(403)
if comment.editing_limit_exceeded and not flask.g.user.is_superadmin:
flask.abort(403)
db.session.delete(comment)
db.session.flush()
torrent.update_comment_count()
url = flask.url_for("torrents.view", torrent_id=torrent.id)
if flask.g.user.is_moderator:
log = "Comment deleted on torrent [#{}]({})".format(torrent.id, url)
adminlog = models.AdminLog(log=log, admin_id=flask.g.user.id)
db.session.add(adminlog)
db.session.commit()
flask.flash("Comment successfully deleted.", "success")
return flask.redirect(url)
@bp.route("/view/<int:torrent_id>/submit_report", endpoint="report", methods=["POST"])
def submit_report(torrent_id):
if not flask.g.user or flask.g.user.age < app.config["RATELIMIT_ACCOUNT_AGE"]:
flask.abort(403)
form = forms.ReportForm(flask.request.form)
torrent = models.Torrent.by_id(torrent_id)
if not torrent:
flask.abort(404)
if torrent.banned:
flask.flash("The torrent you've tried to report is already banned.", "danger")
flask.abort(404)
if flask.request.method == "POST" and form.validate():
report_reason = form.reason.data
current_user_id = flask.g.user.id
report = models.Report(
torrent_id=torrent_id, user_id=current_user_id, reason=report_reason
)
db.session.add(report)
db.session.commit()
flask.flash("Successfully reported torrent!", "success")
elif len(form.reason.data) == 0:
flask.flash("Please give a report reason!", "danger")
return flask.redirect(flask.url_for("torrents.view", torrent_id=torrent_id))
@bp.route("/upload", methods=["GET", "POST"])
def upload():
upload_form = forms.UploadForm(
CombinedMultiDict((flask.request.files, flask.request.form))
)
upload_form.category.choices = _create_upload_category_choices()
show_ratelimit = False
next_upload_time = None
ratelimit_count = 0
# Anonymous uploaders and non-trusted uploaders
no_or_new_account = not flask.g.user or (
flask.g.user.age < app.config["RATELIMIT_ACCOUNT_AGE"]
and not flask.g.user.is_trusted
)
if app.config["RATELIMIT_UPLOADS"] and no_or_new_account:
now, ratelimit_count, next_upload_time = backend.check_uploader_ratelimit(
flask.g.user
)
show_ratelimit = ratelimit_count >= app.config["MAX_UPLOAD_BURST"]
next_upload_time = next_upload_time if next_upload_time > now else None
if flask.request.method == "POST" and upload_form.validate():
try:
torrent = backend.handle_torrent_upload(upload_form, flask.g.user)
return flask.redirect(flask.url_for("torrents.view", torrent_id=torrent.id))
except backend.TorrentExtraValidationException:
pass
# If we get here with a POST, it means the form data was invalid: return a non-okay status
status_code = 400 if flask.request.method == "POST" else 200
return (
flask.render_template(
"upload.html",
upload_form=upload_form,
show_ratelimit=show_ratelimit,
ratelimit_count=ratelimit_count,
next_upload_time=next_upload_time,
),
status_code,
)
@cached_function
def _create_upload_category_choices():
"""Turns categories in the database into a list of (id, name)s"""
choices = [("", "[Select a category]")]
id_map = backend.get_category_id_map()
for key in sorted(id_map.keys()):
cat_names = id_map[key]
is_main_cat = key.endswith("_0")
# cat_name = is_main_cat and cat_names[0] or (' - ' + cat_names[1])
cat_name = " - ".join(cat_names)
choices.append((key, cat_name, is_main_cat))
return choices
def _make_torrent_file(torrent):
with open(torrent.info_dict_path, "rb") as in_file:
bencoded_info = in_file.read()
bencoded_torrent_data = torrents.create_bencoded_torrent(torrent, bencoded_info)
return bencoded_torrent_data, len(bencoded_torrent_data)
|
dataset-tools | create_pascal_tf_record_test | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for create_pascal_tf_record.py."""
import os
import numpy as np
import PIL.Image
import tensorflow as tf
from app.object_detection.dataset_tools import create_pascal_tf_record
class DictToTFExampleTest(tf.test.TestCase):
def _assertProtoEqual(self, proto_field, expectation):
"""Helper function to assert if a proto field equals some value.
Args:
proto_field: The protobuf field to compare.
expectation: The expected value of the protobuf field.
"""
proto_list = [p for p in proto_field]
self.assertListEqual(proto_list, expectation)
def test_dict_to_tf_example(self):
image_file_name = "tmp_image.jpg"
image_data = np.random.rand(256, 256, 3)
save_path = os.path.join(self.get_temp_dir(), image_file_name)
image = PIL.Image.fromarray(image_data, "RGB")
image.save(save_path)
data = {
"folder": "",
"filename": image_file_name,
"size": {
"height": 256,
"width": 256,
},
"object": [
{
"difficult": 1,
"bndbox": {
"xmin": 64,
"ymin": 64,
"xmax": 192,
"ymax": 192,
},
"name": "person",
"truncated": 0,
"pose": "",
},
],
}
label_map_dict = {
"background": 0,
"person": 1,
"notperson": 2,
}
example = create_pascal_tf_record.dict_to_tf_example(
data, self.get_temp_dir(), label_map_dict, image_subdirectory=""
)
self._assertProtoEqual(
example.features.feature["image/height"].int64_list.value, [256]
)
self._assertProtoEqual(
example.features.feature["image/width"].int64_list.value, [256]
)
self._assertProtoEqual(
example.features.feature["image/filename"].bytes_list.value,
[image_file_name],
)
self._assertProtoEqual(
example.features.feature["image/source_id"].bytes_list.value,
[image_file_name],
)
self._assertProtoEqual(
example.features.feature["image/format"].bytes_list.value, ["jpeg"]
)
self._assertProtoEqual(
example.features.feature["image/object/bbox/xmin"].float_list.value, [0.25]
)
self._assertProtoEqual(
example.features.feature["image/object/bbox/ymin"].float_list.value, [0.25]
)
self._assertProtoEqual(
example.features.feature["image/object/bbox/xmax"].float_list.value, [0.75]
)
self._assertProtoEqual(
example.features.feature["image/object/bbox/ymax"].float_list.value, [0.75]
)
self._assertProtoEqual(
example.features.feature["image/object/class/text"].bytes_list.value,
["person"],
)
self._assertProtoEqual(
example.features.feature["image/object/class/label"].int64_list.value, [1]
)
self._assertProtoEqual(
example.features.feature["image/object/difficult"].int64_list.value, [1]
)
self._assertProtoEqual(
example.features.feature["image/object/truncated"].int64_list.value, [0]
)
self._assertProtoEqual(
example.features.feature["image/object/view"].bytes_list.value, [""]
)
if __name__ == "__main__":
tf.test.main()
|
src | class_singleCleaner | """
The `singleCleaner` class is a timer-driven thread that cleans data structures
to free memory, resends messages when a remote node doesn't respond, and
sends pong messages to keep connections alive if the network isn't busy.
It cleans these data structures in memory:
- inventory (moves data to the on-disk sql database)
- inventorySets (clears then reloads data out of sql database)
It cleans these tables on the disk:
- inventory (clears expired objects)
- pubkeys (clears pubkeys older than 4 weeks old which we have not used
personally)
- knownNodes (clears addresses which have not been online for over 3 days)
It resends messages when there has been no response:
- resends getpubkey messages in 5 days (then 10 days, then 20 days, etc...)
- resends msg messages in 5 days (then 10 days, then 20 days, etc...)
"""
import gc
import os
import time
import queues
import state
from bmconfigparser import config
from helper_sql import sqlExecute, sqlQuery
from inventory import Inventory
from network import BMConnectionPool, StoppableThread, knownnodes
from tr import _translate
#: Equals 4 weeks. You could make this longer if you want
#: but making it shorter would not be advisable because
#: there is a very small possibility that it could keep you
#: from obtaining a needed pubkey for a period of time.
lengthOfTimeToHoldOnToAllPubkeys = 2419200
class singleCleaner(StoppableThread):
"""The singleCleaner thread class"""
name = "singleCleaner"
cycleLength = 300
expireDiscoveredPeers = 300
def run(self): # pylint: disable=too-many-branches
gc.disable()
timeWeLastClearedInventoryAndPubkeysTables = 0
try:
state.maximumLengthOfTimeToBotherResendingMessages = (
config.getfloat("bitmessagesettings", "stopresendingafterxdays")
* 24
* 60
* 60
) + (
config.getfloat("bitmessagesettings", "stopresendingafterxmonths")
* (60 * 60 * 24 * 365)
/ 12
)
except: # noqa:E722
# Either the user hasn't set stopresendingafterxdays and
# stopresendingafterxmonths yet or the options are missing
# from the config file.
state.maximumLengthOfTimeToBotherResendingMessages = float("inf")
while state.shutdown == 0:
self.stop.wait(self.cycleLength)
queues.UISignalQueue.put(
(
"updateStatusBar",
"Doing housekeeping (Flushing inventory in memory to disk...)",
)
)
Inventory().flush()
queues.UISignalQueue.put(("updateStatusBar", ""))
# If we are running as a daemon then we are going to fill up the UI
# queue which will never be handled by a UI. We should clear it to
# save memory.
# FIXME redundant?
if state.thisapp.daemon or not state.enableGUI:
queues.UISignalQueue.queue.clear()
tick = int(time.time())
if timeWeLastClearedInventoryAndPubkeysTables < tick - 7380:
timeWeLastClearedInventoryAndPubkeysTables = tick
Inventory().clean()
queues.workerQueue.put(("sendOnionPeerObj", ""))
# pubkeys
sqlExecute(
"DELETE FROM pubkeys WHERE time<? AND usedpersonally='no'",
tick - lengthOfTimeToHoldOnToAllPubkeys,
)
# Let us resend getpubkey objects if we have not yet heard
# a pubkey, and also msg objects if we have not yet heard
# an acknowledgement
queryreturn = sqlQuery(
"SELECT toaddress, ackdata, status FROM sent"
" WHERE ((status='awaitingpubkey' OR status='msgsent')"
" AND folder='sent' AND sleeptill<? AND senttime>?)",
tick,
tick - state.maximumLengthOfTimeToBotherResendingMessages,
)
for toAddress, ackData, status in queryreturn:
if status == "awaitingpubkey":
self.resendPubkeyRequest(toAddress)
elif status == "msgsent":
self.resendMsg(ackData)
try:
# Cleanup knownnodes and handle possible severe exception
# while writing it to disk
if state.enableNetwork:
knownnodes.cleanupKnownNodes()
except Exception as err:
if "Errno 28" in str(err):
self.logger.fatal(
"(while writing knownnodes to disk)"
" Alert: Your disk or data storage volume is full."
)
queues.UISignalQueue.put(
(
"alert",
(
_translate("MainWindow", "Disk full"),
_translate(
"MainWindow",
"Alert: Your disk or data storage volume"
" is full. Bitmessage will now exit.",
),
True,
),
)
)
# FIXME redundant?
if state.thisapp.daemon or not state.enableGUI:
os._exit(1) # pylint: disable=protected-access
# inv/object tracking
for connection in BMConnectionPool().connections():
connection.clean()
# discovery tracking
exp = time.time() - singleCleaner.expireDiscoveredPeers
reaper = (k for k, v in state.discoveredPeers.items() if v < exp)
for k in reaper:
try:
del state.discoveredPeers[k]
except KeyError:
pass
# ..todo:: cleanup pending upload / download
gc.collect()
def resendPubkeyRequest(self, address):
"""Resend pubkey request for address"""
self.logger.debug(
"It has been a long time and we haven't heard a response to our"
" getpubkey request. Sending again."
)
try:
# We need to take this entry out of the neededPubkeys structure
# because the queues.workerQueue checks to see whether the entry
# is already present and will not do the POW and send the message
# because it assumes that it has already done it recently.
del state.neededPubkeys[address]
except KeyError:
pass
except RuntimeError:
self.logger.warning(
"Can't remove %s from neededPubkeys, requesting pubkey will be delayed",
address,
exc_info=True,
)
queues.UISignalQueue.put(
(
"updateStatusBar",
"Doing work necessary to again attempt to request a public key...",
)
)
sqlExecute(
"UPDATE sent SET status = 'msgqueued'"
" WHERE toaddress = ? AND folder = 'sent'",
address,
)
queues.workerQueue.put(("sendmessage", ""))
def resendMsg(self, ackdata):
"""Resend message by ackdata"""
self.logger.debug(
"It has been a long time and we haven't heard an acknowledgement"
" to our msg. Sending again."
)
sqlExecute(
"UPDATE sent SET status = 'msgqueued'"
" WHERE ackdata = ? AND folder = 'sent'",
ackdata,
)
queues.workerQueue.put(("sendmessage", ""))
queues.UISignalQueue.put(
(
"updateStatusBar",
"Doing work necessary to again attempt to deliver a message...",
)
)
|
scorewiz | dialog | # This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2008 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
The Score Wizard dialog.
"""
import app
import indent
import ly.document
import ly.dom
import ly.music
import ly.util
import qutil
import userguide
from PyQt5.QtCore import QSettings, QUrl, pyqtSignal
from PyQt5.QtWidgets import (
QDialog,
QDialogButtonBox,
QGroupBox,
QTabWidget,
QVBoxLayout,
QWidget,
)
class ScoreWizardDialog(QDialog):
pitchLanguageChanged = pyqtSignal(str)
def __init__(self, mainwindow):
super().__init__(mainwindow)
self.addAction(mainwindow.actionCollection.help_whatsthis)
self._pitchLanguage = None
self.mainwindow = mainwindow
layout = QVBoxLayout()
self.setLayout(layout)
self.tabs = QTabWidget()
b = self.dialogButtons = QDialogButtonBox()
b.setStandardButtons(
QDialogButtonBox.Reset | QDialogButtonBox.Ok | QDialogButtonBox.Cancel
)
b.accepted.connect(self.accept)
b.rejected.connect(self.reject)
userguide.addButton(b, "scorewiz")
b.button(QDialogButtonBox.Reset).clicked.connect(self.reset)
self.previewButton = b.addButton("", QDialogButtonBox.ActionRole)
self.previewButton.clicked.connect(self.showPreview)
layout.addWidget(self.tabs)
layout.addWidget(b)
self.header = Header(self)
self.tabs.addTab(self.header, "")
self.parts = Parts(self)
self.tabs.addTab(self.parts, "")
self.settings = Settings(self)
self.tabs.addTab(self.settings, "")
self.tabs.setCurrentIndex(0)
self.tabs.widget(0).widget() # activate it
self.tabs.currentChanged.connect(self.slotCurrentChanged)
qutil.saveDialogSize(self, "scorewiz/dialog/size")
app.translateUI(self)
self.accepted.connect(self.slotAccepted)
def translateUI(self):
self.setWindowTitle(app.caption(_("Score Setup Wizard")))
for i in range(self.tabs.count()):
self.tabs.setTabText(i, self.tabs.widget(i).title())
self.dialogButtons.button(QDialogButtonBox.Reset).setText(_("Clear"))
self.dialogButtons.button(QDialogButtonBox.Reset).setToolTip(
_("Clears the current page of the Score Wizard.")
)
self.previewButton.setText(_("Preview"))
def slotCurrentChanged(self, i):
"""Lazy-loads the tab's page if shown for the first time."""
self.tabs.widget(i).widget()
def reset(self):
self.tabs.currentWidget().widget().clear()
def resetAll(self):
for tab in self.header, self.parts, self.settings:
tab.widget().clear()
def setPitchLanguage(self, language):
if language != self._pitchLanguage:
self._pitchLanguage = language
self.pitchLanguageChanged.emit(language)
def pitchLanguage(self):
if self._pitchLanguage is None:
# load setting; saving occurs in .settings.py
lang = QSettings().value("scorewiz/lilypond/pitch_language", "", str)
from .scoreproperties import keyNames
if lang not in keyNames:
lang = ""
self._pitchLanguage = lang
return self._pitchLanguage
def slotAccepted(self):
"""Makes the score and puts it in the editor."""
from . import build
builder = build.Builder(self) # get the builder
doc = builder.document() # get the ly.dom document tree
if not self.settings.widget().generalPreferences.relpitch.isChecked():
# remove pitches from \relative commands
for n in doc.find(ly.dom.Relative):
for n1 in n.find(ly.dom.Pitch, 1):
n.remove(n1)
text = builder.text(doc) # convert to LilyPond source text
lydoc = ly.document.Document(text) # temporarily store it in a lydoc
cursor = ly.document.Cursor(lydoc) # make a cursor selecting it
indent.indenter().indent(cursor) # indent it according to user prefs
doc = app.openUrl(QUrl()) # get a new Frescobaldi document
doc.setPlainText(lydoc.plaintext()) # write the text in it
doc.setModified(False) # make it "not modified"
self.parent().setCurrentDocument(doc)
def showPreview(self):
"""Shows a preview."""
# get the document and fill in some example music
from . import build, preview
builder = build.Builder(self)
doc = builder.document()
preview.examplify(doc)
# preview it
import musicpreview
dlg = musicpreview.MusicPreviewDialog(self)
dlg.preview(builder.text(doc), _("Score Preview"))
dlg.exec_()
dlg.cleanup()
def readScore(self):
"""Read the score of an existing document."""
self.resetAll()
cursor = self.mainwindow.textCursor()
text = cursor.document().toPlainText()
# Parse the music
music = ly.music.document(ly.document.Document(text))
for item in music:
if isinstance(item, ly.music.items.Header):
self.header.readFromMusicItem(item)
elif isinstance(item, ly.music.items.Assignment):
name = item.name()
if name == "global":
self.settings.readFromMusicItem(item)
elif name.endswith("Part"):
self.parts.readFromMusicItem(item)
class Page(QWidget):
"""A Page in the tab widget.
Basically this is just a QWidget that loads the desired page
as soon as the widget() is called for the first time.
"""
def __init__(self, dialog):
super().__init__(dialog)
self._widget = None
def title(self):
"""Should return a title."""
def widget(self):
if self._widget is None:
layout = QVBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(layout)
w = self._widget = self.createWidget(self)
layout.addWidget(w)
return self._widget
def createWidget(self, parent):
"""Should return the widget for this tab."""
class Header(Page):
def title(self):
return _("&Titles and Headers")
def createWidget(self, parent):
from . import header
return header.HeaderWidget(parent)
def readFromMusicItem(self, headerBlock):
"""Read an existing header block as a ly.music.items.Header."""
widget = self.widget()
for item in headerBlock:
name = item.name()
value = item.value()
if name in widget.edits:
widget.edits[name].setText(value.plaintext())
class Parts(Page):
def __init__(self, dialog):
super().__init__(dialog)
# Directory mapping assignment identifiers to the corresponding
# Part class. For example: self._partTypes['jazzGuitar'] == JazzGuitar
self._partTypes = {}
def title(self):
return _("&Parts")
def createWidget(self, parent):
from . import score
return score.ScorePartsWidget(parent)
def readFromMusicItem(self, assignment):
"""Read a part definition from an ly.music.items.Assignment."""
from . import score
widget = self.widget()
if not self._partTypes:
# This is only needed when reading from an existing score,
# so generate it the first time it is used.
from . import build, parts
for category in parts.categories:
for item in category.items:
self._partTypes[ly.util.mkid(item.__name__)] = item
name = assignment.name()
# Make sure this is, in fact, a part definition before proceeding.
# We already check this in ScoreWizardDialog.readScore(), but it never
# hurts to be safe...
if name.endswith("Part"):
try:
# TODO: Handle containers
parent = widget.scoreView
part = self._partTypes[name[:-4]]
box = QGroupBox(widget.partSettings)
item = score.PartItem(parent, part, box)
except KeyError:
# Unrecognized part type; fall back on a piano staff since
# they can hold almost anything.
partType = self._partTypes["piano"]
class Settings(Page):
def title(self):
return _("&Score settings")
def createWidget(self, parent):
from . import settings
return settings.SettingsWidget(parent)
def readFromMusicItem(self, assignment):
"""Read settings from an ly.music.items.Assignment."""
from . import scoreproperties
widget = self.widget()
sp = widget.scoreProperties
for item in assignment.value():
if isinstance(item, ly.music.items.KeySignature):
pitch = item.pitch()
# pitch.alter is a fractions.Fraction
key = (pitch.note, pitch.alter.numerator)
if key in scoreproperties.keys:
sp.keyNote.setCurrentIndex(scoreproperties.keys.index(key))
for mode, translation in scoreproperties.modes:
if mode == item.mode():
sp.keyMode.setCurrentText(translation())
break
elif isinstance(item, ly.music.items.Partial):
length = item.partial_length()
midiDuration = (length.denominator, length.numerator)
if midiDuration in scoreproperties.midiDurations:
# index 0 is "None"
sp.pickup.setCurrentIndex(
scoreproperties.midiDurations.index(midiDuration) + 1
)
elif isinstance(item, ly.music.items.Tempo):
fraction = item.fraction()
midiDuration = (fraction.denominator, fraction.numerator)
if midiDuration in scoreproperties.midiDurations:
sp.metronomeNote.setCurrentIndex(
scoreproperties.midiDurations.index(midiDuration)
)
tempo = item.tempo()
if tempo:
sp.metronomeValue.setCurrentText(str(tempo[0]))
if item.text():
sp.tempo.setText(item.text().plaintext())
elif isinstance(item, ly.music.items.TimeSignature):
# Note item.fraction().numerator is always 1
fraction = "{}/{}".format(item.numerator(), item.fraction().denominator)
sp.timeSignature.setCurrentText(fraction)
|
client | escape | # Methods used for rendering a ClickHouse query, given a template string and a
# set of parameters.
#
# This uses the `escape_param` function from the `clickhouse-driver` package,
# but passes an empty `Context` object to it. Prior to
# https://github.com/mymarilyn/clickhouse-driver/commit/87090902f0270ed51a0b6754d5cbf0dc8544ec4b
# the `escape_param` function didn't take a `Context` object. As of
# `clickhouse-driver` 0.2.4 all it uses the context for is to determine the
# "server" timezone, so passing an empty context maintains the existing
# behaviour of `clickhouse-driver` 0.2.1, the version we were previously using.
#
# This is of course a bit of a hack but we want to be able to render queries
# without the need of having a connection, which seems like a reasonable thing
# to be able to do. Having a dependency on a connection to render a query is a
# little over the top.
#
# NOTE: this change is necessary because the `clickhouse-driver` package up to
# 0.2.3 uses an invalid `python_requires` in it's `setup.py` at least for
# recent versions of setuptools. This was highlighted as a consequence of
# upgrading to Python 3.10. See
# https://github.com/mymarilyn/clickhouse-driver/pull/291 for further context.
from typing import Any
from clickhouse_driver.connection import ServerInfo
from clickhouse_driver.context import Context
from clickhouse_driver.util.escape import escape_param
def substitute_params(query, params):
"""
This is a copy of clickhouse-driver's `substitute_params` function without
the dependency that you need to connect to the server before you can escape
params. There was a bug in which we were trying to substitute params before
the connection was established, which caused the query to fail. Presumably
this was on initial worker startup only.
It seems somewhat unusual that you need to connect to the server before
you can escape params, so we're just going to copy the function here
and remove that dependency.
See
https://github.com/mymarilyn/clickhouse-driver/blob/87090902f0270ed51a0b6754d5cbf0dc8544ec4b/clickhouse_driver/client.py#L593
for the original function.
"""
if not isinstance(params, dict):
raise ValueError("Parameters are expected in dict form")
escaped = escape_params(params)
return query % escaped
def escape_params(params):
"""
This is a copy of clickhouse-driver's `escape_params` function without the
dependency that you need to connect to the server before you can escape
params.
See
https://github.com/mymarilyn/clickhouse-driver/blob/87090902f0270ed51a0b6754d5cbf0dc8544ec4b/clickhouse_driver/util/escape.py#L60
for the original function.
"""
escaped = {}
for key, value in params.items():
escaped[key] = escape_param_for_clickhouse(value)
return escaped
def escape_param_for_clickhouse(param: Any) -> str:
"""
This is a wrapper around the `escape_param` function from the
`clickhouse-driver` package, but passes a placeholder `Context` object to it
just such that it can run. The only value that the real `escape_param` uses
from the context is the server timezone. We assume that the server timezone
is UTC.
See
https://github.com/mymarilyn/clickhouse-driver/blob/87090902f0270ed51a0b6754d5cbf0dc8544ec4b/clickhouse_driver/util/escape.py#L31
for the wrapped function.
"""
context = Context()
context.server_info = ServerInfo(
name="placeholder server_info value",
version_major="placeholder server_info value",
version_minor="placeholder server_info value",
version_patch="placeholder server_info value",
revision="placeholder server_info value",
display_name="placeholder server_info value",
timezone="UTC",
)
return escape_param(param, context=context)
|
schemas | challenges | from CTFd.models import Challenges, ma
from marshmallow import validate
from marshmallow.exceptions import ValidationError
from marshmallow_sqlalchemy import field_for
class ChallengeRequirementsValidator(validate.Validator):
default_message = "Error parsing challenge requirements"
def __init__(self, error=None):
self.error = error or self.default_message
def __call__(self, value):
if isinstance(value, dict) is False:
raise ValidationError(self.default_message)
prereqs = value.get("prerequisites", [])
if all(prereqs) is False:
raise ValidationError(
"Challenge requirements cannot have a null prerequisite"
)
return value
class ChallengeSchema(ma.ModelSchema):
class Meta:
model = Challenges
include_fk = True
dump_only = ("id",)
name = field_for(
Challenges,
"name",
validate=[
validate.Length(
min=0,
max=80,
error="Challenge could not be saved. Challenge name too long",
)
],
)
category = field_for(
Challenges,
"category",
validate=[
validate.Length(
min=0,
max=80,
error="Challenge could not be saved. Challenge category too long",
)
],
)
description = field_for(
Challenges,
"description",
allow_none=True,
validate=[
validate.Length(
min=0,
max=65535,
error="Challenge could not be saved. Challenge description too long",
)
],
)
requirements = field_for(
Challenges,
"requirements",
validate=[ChallengeRequirementsValidator()],
)
|
process-manager | process | from __future__ import annotations
import logging
import os
import sqlite3
import time
from datetime import datetime, timedelta
from enum import Enum
from typing import TYPE_CHECKING, Any, List, Optional
import psutil
from tribler.core.utilities.process_manager import sql_scripts
from tribler.core.utilities.process_manager.utils import with_retry
from tribler.core.version import version_id
if TYPE_CHECKING:
from tribler.core.utilities.process_manager import ProcessManager
class ProcessKind(Enum):
GUI = "gui"
Core = "core"
class TriblerProcess:
def __init__(
self,
pid: int,
kind: ProcessKind,
app_version: str,
started_at: int,
row_version: int = 0,
rowid: Optional[int] = None,
creator_pid: Optional[int] = None,
primary: bool = False,
canceled: bool = False,
api_port: Optional[int] = None,
finished_at: Optional[int] = None,
exit_code: Optional[int] = None,
error_msg: Optional[str] = None,
manager: Optional[ProcessManager] = None,
):
self._manager = manager
self.rowid = rowid
self.row_version = row_version
self.pid = pid
self.kind = kind
self.primary = primary
self.canceled = canceled
self.app_version = app_version
self.started_at = started_at
self.creator_pid = creator_pid
self.api_port = api_port
self.finished_at = finished_at
self.exit_code = exit_code
self.error_msg = error_msg
@property
def manager(self) -> ProcessManager:
if self._manager is None:
raise RuntimeError("Tribler process manager is not set in process object")
return self._manager
@manager.setter
def manager(self, manager: ProcessManager):
self._manager = manager
@property
def logger(self) -> logging.Logger:
"""Used by the `with_retry` decorator"""
return self.manager.logger
@property
def connection(self) -> Optional[sqlite3.Connection]:
"""Used by the `with_retry` decorator"""
return self.manager.connection
@with_retry
def save(self):
"""Saves object into the database"""
with self.manager.connect() as connection:
if self.rowid is None:
self._insert(connection)
else:
self._update(connection)
@classmethod
def from_row(cls, manager: ProcessManager, row: tuple) -> TriblerProcess:
"""Constructs an object from the database row"""
(
rowid,
row_version,
pid,
kind,
primary,
canceled,
app_version,
started_at,
creator_pid,
api_port,
finished_at,
exit_code,
error_msg,
) = row
return TriblerProcess(
manager=manager,
rowid=rowid,
row_version=row_version,
pid=pid,
kind=ProcessKind(kind),
primary=primary,
canceled=canceled,
app_version=app_version,
started_at=started_at,
creator_pid=creator_pid,
api_port=api_port,
finished_at=finished_at,
exit_code=exit_code,
error_msg=error_msg,
)
def __str__(self) -> str:
kind = self.kind.value.capitalize()
elements: List[str] = []
append = elements.append
append(
"finished" if self.finished_at or self.exit_code is not None else "running"
)
if self.is_current_process():
append("current process")
if self.primary:
append("primary")
if self.canceled:
append("canceled")
append(f"pid={self.pid}")
if self.creator_pid is not None:
append(f"gui_pid={self.creator_pid}")
started = datetime.utcfromtimestamp(self.started_at)
append(f"version='{self.app_version}'")
append(f"started='{started.strftime('%Y-%m-%d %H:%M:%S')}'")
if self.api_port is not None:
append(f"api_port={self.api_port}")
if self.finished_at:
finished = datetime.utcfromtimestamp(self.finished_at)
duration = finished - started
else:
duration = timedelta(seconds=int(time.time()) - self.started_at)
append(f"duration='{duration}'")
if self.exit_code is not None:
append(f"exit_code={self.exit_code}")
if self.error_msg:
append(f"error={repr(self.error_msg)}")
result = f'{kind}Process({", ".join(elements)})'
return "".join(result)
@classmethod
def current_process(
cls,
kind: ProcessKind,
creator_pid: Optional[int] = None,
manager: Optional[ProcessManager] = None,
) -> TriblerProcess:
"""Constructs an object for a current process, specifying the PID value of the current process"""
pid = os.getpid()
psutil_process = psutil.Process(pid)
started_at = int(psutil_process.create_time())
return cls(
manager=manager,
row_version=0,
pid=pid,
kind=kind,
app_version=version_id,
started_at=started_at,
creator_pid=creator_pid,
)
def is_current_process(self) -> bool:
"""Returns True if the object represents the current process"""
return self.pid == os.getpid() and self.is_running()
@with_retry
def become_primary(self) -> bool:
"""
If there is no primary process already, makes the current process primary and returns the primary status
"""
with self.manager.connect():
# for a new process object self.rowid is None
primary_rowid = self.manager.primary_process_rowid(self.kind)
if primary_rowid is None or primary_rowid == self.rowid:
self.primary = True
else:
self.canceled = True
self.save()
return bool(self.primary)
def is_running(self):
"""Returns True if the object represents a running process"""
if not psutil.pid_exists(self.pid):
return False
try:
psutil_process = psutil.Process(self.pid)
status = psutil_process.status()
except psutil.Error as e:
self.logger.warning(e)
return False
if status == psutil.STATUS_ZOMBIE:
return False
psutil_process_create_time = int(psutil_process.create_time())
if psutil_process_create_time > self.started_at:
# The same PID value was reused for a new process, so the previous process is not running anymore
return False
return True
def set_api_port(self, api_port: int):
self.api_port = api_port
self.save()
def set_error(self, error: Any, replace: bool = False):
# It is expected for `error` to be str or an instance of exception, but values of other types
# are handled gracefully as well: everything except None is converted to str
if error is not None and not isinstance(error, str):
error = f"{error.__class__.__name__}: {error}"
if replace or not self.error_msg:
self.error_msg = error
self.save()
def finish(self, exit_code: Optional[int] = None):
self.primary = False
self.finished_at = int(time.time())
# if exit_code is specified, it overrides the previously set exit code
if exit_code is not None:
self.exit_code = exit_code
# if no exit code is specified, use exit code 0 (success) as a default value
if self.exit_code is None:
self.exit_code = 0 if not self.error_msg else 1
self.save()
def _insert(self, connection: sqlite3.Connection):
"""Insert a new row into the table"""
self.row_version = 0
cursor = connection.cursor()
cursor.execute(
"""
INSERT INTO processes (
pid, kind, "primary", canceled, app_version, started_at,
creator_pid, api_port, finished_at, exit_code, error_msg
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
""",
[
self.pid,
self.kind.value,
int(self.primary),
int(self.canceled),
self.app_version,
self.started_at,
self.creator_pid,
self.api_port,
self.finished_at,
self.exit_code,
self.error_msg,
],
)
self.rowid = cursor.lastrowid
def _update(self, connection: sqlite3.Connection):
"""Update an existing row in the table"""
prev_version = self.row_version
self.row_version += 1
cursor = connection.cursor()
cursor.execute(
"""
UPDATE processes
SET row_version = ?, "primary" = ?, canceled = ?, creator_pid = ?, api_port = ?,
finished_at = ?, exit_code = ?, error_msg = ?
WHERE rowid = ? and row_version = ? and pid = ? and kind = ? and app_version = ? and started_at = ?
""",
[
self.row_version,
int(self.primary),
int(self.canceled),
self.creator_pid,
self.api_port,
self.finished_at,
self.exit_code,
self.error_msg,
self.rowid,
prev_version,
self.pid,
self.kind.value,
self.app_version,
self.started_at,
],
)
if cursor.rowcount == 0:
self.logger.error(
f"Row {self.rowid} with row version {prev_version} was not found"
)
def get_core_process(self) -> Optional[TriblerProcess]:
"""
Returns Core process created by the current GUI process, or None if the Core process was not found in the DB.
"""
if self.kind != ProcessKind.GUI:
raise TypeError(
"The `get_core_process` method can only be used for a GUI process"
)
with self.manager.connect() as connection:
cursor = connection.execute(
f"""
SELECT {sql_scripts.SELECT_COLUMNS}
FROM processes WHERE "primary" = 1 and kind = ? and creator_pid = ?
""",
[ProcessKind.Core.value, self.pid],
)
rows = cursor.fetchall()
if len(rows) > 1: # should not happen
raise RuntimeError(
"Multiple Core processes were found for a single GUI process"
)
return TriblerProcess.from_row(self.manager, rows[0]) if rows else None
|
extractor | telequebec | # coding: utf-8
from __future__ import unicode_literals
from ..compat import compat_str
from ..utils import int_or_none, smuggle_url, try_get, unified_timestamp
from .common import InfoExtractor
class TeleQuebecBaseIE(InfoExtractor):
BRIGHTCOVE_URL_TEMPLATE = (
"http://players.brightcove.net/%s/%s_default/index.html?videoId=%s"
)
@staticmethod
def _brightcove_result(brightcove_id, player_id, account_id="6150020952001"):
return {
"_type": "url_transparent",
"url": smuggle_url(
TeleQuebecBaseIE.BRIGHTCOVE_URL_TEMPLATE
% (account_id, player_id, brightcove_id),
{"geo_countries": ["CA"]},
),
"ie_key": "BrightcoveNew",
}
class TeleQuebecIE(TeleQuebecBaseIE):
_VALID_URL = r"""(?x)
https?://
(?:
zonevideo\.telequebec\.tv/media|
coucou\.telequebec\.tv/videos
)/(?P<id>\d+)
"""
_TESTS = [
{
# available till 01.01.2023
"url": "http://zonevideo.telequebec.tv/media/37578/un-petit-choc-et-puis-repart/un-chef-a-la-cabane",
"info_dict": {
"id": "6155972771001",
"ext": "mp4",
"title": "Un petit choc et puis repart!",
"description": "md5:b04a7e6b3f74e32d7b294cffe8658374",
"timestamp": 1589262469,
"uploader_id": "6150020952001",
"upload_date": "20200512",
},
"params": {
"format": "bestvideo",
},
"add_ie": ["BrightcoveNew"],
},
{
"url": "https://zonevideo.telequebec.tv/media/55267/le-soleil/passe-partout",
"info_dict": {
"id": "6167180337001",
"ext": "mp4",
"title": "Le soleil",
"description": "md5:64289c922a8de2abbe99c354daffde02",
"uploader_id": "6150020952001",
"upload_date": "20200625",
"timestamp": 1593090307,
},
"params": {
"format": "bestvideo",
},
"add_ie": ["BrightcoveNew"],
},
{
# no description
"url": "http://zonevideo.telequebec.tv/media/30261",
"only_matching": True,
},
{
"url": "https://coucou.telequebec.tv/videos/41788/idee-de-genie/l-heure-du-bain",
"only_matching": True,
},
]
def _real_extract(self, url):
media_id = self._match_id(url)
media = self._download_json(
"https://mnmedias.api.telequebec.tv/api/v3/media/" + media_id, media_id
)["media"]
source_id = next(
source_info["sourceId"]
for source_info in media["streamInfos"]
if source_info.get("source") == "Brightcove"
)
info = self._brightcove_result(source_id, "22gPKdt7f")
product = media.get("product") or {}
season = product.get("season") or {}
info.update(
{
"description": try_get(
media, lambda x: x["descriptions"][-1]["text"], compat_str
),
"series": try_get(season, lambda x: x["serie"]["titre"]),
"season": season.get("name"),
"season_number": int_or_none(season.get("seasonNo")),
"episode": product.get("titre"),
"episode_number": int_or_none(product.get("episodeNo")),
}
)
return info
class TeleQuebecSquatIE(InfoExtractor):
_VALID_URL = r"https://squat\.telequebec\.tv/videos/(?P<id>\d+)"
_TESTS = [
{
"url": "https://squat.telequebec.tv/videos/9314",
"info_dict": {
"id": "d59ae78112d542e793d83cc9d3a5b530",
"ext": "mp4",
"title": "Poupeflekta",
"description": "md5:2f0718f8d2f8fece1646ee25fb7bce75",
"duration": 1351,
"timestamp": 1569057600,
"upload_date": "20190921",
"series": "Miraculous : Les Aventures de Ladybug et Chat Noir",
"season": "Saison 3",
"season_number": 3,
"episode_number": 57,
},
"params": {
"skip_download": True,
},
}
]
def _real_extract(self, url):
video_id = self._match_id(url)
video = self._download_json(
"https://squat.api.telequebec.tv/v1/videos/%s" % video_id, video_id
)
media_id = video["sourceId"]
return {
"_type": "url_transparent",
"url": "http://zonevideo.telequebec.tv/media/%s" % media_id,
"ie_key": TeleQuebecIE.ie_key(),
"id": media_id,
"title": video.get("titre"),
"description": video.get("description"),
"timestamp": unified_timestamp(video.get("datePublication")),
"series": video.get("container"),
"season": video.get("saison"),
"season_number": int_or_none(video.get("noSaison")),
"episode_number": int_or_none(video.get("episode")),
}
class TeleQuebecEmissionIE(InfoExtractor):
_VALID_URL = r"""(?x)
https?://
(?:
[^/]+\.telequebec\.tv/emissions/|
(?:www\.)?telequebec\.tv/
)
(?P<id>[^?#&]+)
"""
_TESTS = [
{
"url": "http://lindicemcsween.telequebec.tv/emissions/100430013/des-soins-esthetiques-a-377-d-interets-annuels-ca-vous-tente",
"info_dict": {
"id": "6154476028001",
"ext": "mp4",
"title": "Des soins esthétiques à 377 % d’intérêts annuels, ça vous tente?",
"description": "md5:cb4d378e073fae6cce1f87c00f84ae9f",
"upload_date": "20200505",
"timestamp": 1588713424,
"uploader_id": "6150020952001",
},
"params": {
"format": "bestvideo",
},
},
{
"url": "http://bancpublic.telequebec.tv/emissions/emission-49/31986/jeunes-meres-sous-pression",
"only_matching": True,
},
{
"url": "http://www.telequebec.tv/masha-et-michka/epi059masha-et-michka-3-053-078",
"only_matching": True,
},
{
"url": "http://www.telequebec.tv/documentaire/bebes-sur-mesure/",
"only_matching": True,
},
]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
media_id = self._search_regex(r"mediaId\s*:\s*(?P<id>\d+)", webpage, "media id")
return self.url_result(
"http://zonevideo.telequebec.tv/media/" + media_id, TeleQuebecIE.ie_key()
)
class TeleQuebecLiveIE(TeleQuebecBaseIE):
_VALID_URL = r"https?://zonevideo\.telequebec\.tv/(?P<id>endirect)"
_TEST = {
"url": "http://zonevideo.telequebec.tv/endirect/",
"info_dict": {
"id": "6159095684001",
"ext": "mp4",
"title": "re:^Télé-Québec [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$",
"is_live": True,
"description": "Canal principal de Télé-Québec",
"uploader_id": "6150020952001",
"timestamp": 1590439901,
"upload_date": "20200525",
},
"params": {
"skip_download": True,
},
}
def _real_extract(self, url):
return self._brightcove_result("6159095684001", "skCsmi2Uw")
class TeleQuebecVideoIE(TeleQuebecBaseIE):
_VALID_URL = r"https?://video\.telequebec\.tv/player(?:-live)?/(?P<id>\d+)"
_TESTS = [
{
"url": "https://video.telequebec.tv/player/31110/stream",
"info_dict": {
"id": "6202570652001",
"ext": "mp4",
"title": "Le coût du véhicule le plus vendu au Canada / Tous les frais liés à la procréation assistée",
"description": "md5:685a7e4c450ba777c60adb6e71e41526",
"upload_date": "20201019",
"timestamp": 1603115930,
"uploader_id": "6101674910001",
},
"params": {
"format": "bestvideo",
},
},
{
"url": "https://video.telequebec.tv/player-live/28527",
"only_matching": True,
},
]
def _call_api(self, path, video_id):
return self._download_json(
"http://beacon.playback.api.brightcove.com/telequebec/api/assets/" + path,
video_id,
query={"device_layout": "web", "device_type": "web"},
)["data"]
def _real_extract(self, url):
asset_id = self._match_id(url)
asset = self._call_api(asset_id, asset_id)["asset"]
stream = self._call_api(
asset_id + "/streams/" + asset["streams"][0]["id"], asset_id
)["stream"]
stream_url = stream["url"]
account_id = (
try_get(stream, lambda x: x["video_provider_details"]["account_id"])
or "6101674910001"
)
info = self._brightcove_result(stream_url, "default", account_id)
info.update(
{
"description": asset.get("long_description")
or asset.get("short_description"),
"series": asset.get("series_original_name"),
"season_number": int_or_none(asset.get("season_number")),
"episode": asset.get("original_name"),
"episode_number": int_or_none(asset.get("episode_number")),
}
)
return info
|
machine | keyboard | # -*- coding: utf-8 -*-
# Copyright (c) 2010 Joshua Harlan Lifton.
# See LICENSE.txt for details.
"For use with a computer keyboard (preferably NKRO) as a steno machine."
from plover import _
from plover.machine.base import StenotypeBase
from plover.misc import boolean
from plover.oslayer.keyboardcontrol import KeyboardCapture
# i18n: Machine name.
_._("Keyboard")
class Keyboard(StenotypeBase):
"""Standard stenotype interface for a computer keyboard.
This class implements the three methods necessary for a standard
stenotype interface: start_capture, stop_capture, and
add_callback.
"""
KEYS_LAYOUT = """
Escape F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12
` 1 2 3 4 5 6 7 8 9 0 - = \\ BackSpace Insert Home Page_Up
Tab q w e r t y u i o p [ ] Delete End Page_Down
a s d f g h j k l ; ' Return
z x c v b n m , . / Up
space Left Down Right
"""
ACTIONS = ("arpeggiate",)
def __init__(self, params):
"""Monitor the keyboard's events."""
super().__init__()
self._arpeggiate = params["arpeggiate"]
self._first_up_chord_send = params["first_up_chord_send"]
if self._arpeggiate and self._first_up_chord_send:
self._error()
raise RuntimeError(
"Arpeggiate and first-up chord send cannot both be enabled!"
)
self._is_suppressed = False
# Currently held keys.
self._down_keys = set()
if self._first_up_chord_send:
# If this is True, the first key in a stroke has already been released
# and subsequent key-up events should not send more strokes
self._chord_already_sent = False
else:
# Collect the keys in the stroke, in case first_up_chord_send is False
self._stroke_keys = set()
self._keyboard_capture = None
self._last_stroke_key_down_count = 0
self._stroke_key_down_count = 0
self._update_bindings()
def _update_suppression(self):
if self._keyboard_capture is None:
return
suppressed_keys = self._bindings.keys() if self._is_suppressed else ()
self._keyboard_capture.suppress(suppressed_keys)
def _update_bindings(self):
self._arpeggiate_key = None
self._bindings = dict(self.keymap.get_bindings())
for key, mapping in list(self._bindings.items()):
if "no-op" == mapping:
self._bindings[key] = None
elif "arpeggiate" == mapping:
if self._arpeggiate:
self._bindings[key] = None
self._arpeggiate_key = key
else:
# Don't suppress arpeggiate key if it's not used.
del self._bindings[key]
self._update_suppression()
def set_keymap(self, keymap):
super().set_keymap(keymap)
self._update_bindings()
def start_capture(self):
"""Begin listening for output from the stenotype machine."""
self._initializing()
try:
self._keyboard_capture = KeyboardCapture()
self._keyboard_capture.key_down = self._key_down
self._keyboard_capture.key_up = self._key_up
self._keyboard_capture.start()
self._update_suppression()
except:
self._error()
raise
self._ready()
def stop_capture(self):
"""Stop listening for output from the stenotype machine."""
if self._keyboard_capture is not None:
self._is_suppressed = False
self._update_suppression()
self._keyboard_capture.cancel()
self._keyboard_capture = None
self._stopped()
def set_suppression(self, enabled):
self._is_suppressed = enabled
self._update_suppression()
def suppress_last_stroke(self, send_backspaces):
send_backspaces(self._last_stroke_key_down_count)
self._last_stroke_key_down_count = 0
def _key_down(self, key):
"""Called when a key is pressed."""
assert key is not None
self._stroke_key_down_count += 1
self._down_keys.add(key)
if self._first_up_chord_send:
self._chord_already_sent = False
else:
self._stroke_keys.add(key)
def _key_up(self, key):
"""Called when a key is released."""
assert key is not None
self._down_keys.discard(key)
if self._first_up_chord_send:
if self._chord_already_sent:
return
else:
# A stroke is complete if all pressed keys have been released,
# and — when arpeggiate mode is enabled — the arpeggiate key
# is part of it.
if (
self._down_keys
or not self._stroke_keys
or (self._arpeggiate and self._arpeggiate_key not in self._stroke_keys)
):
return
self._last_stroke_key_down_count = self._stroke_key_down_count
if self._first_up_chord_send:
steno_keys = {self._bindings.get(k) for k in self._down_keys | {key}}
self._chord_already_sent = True
else:
steno_keys = {self._bindings.get(k) for k in self._stroke_keys}
self._stroke_keys.clear()
steno_keys -= {None}
if steno_keys:
self._notify(steno_keys)
self._stroke_key_down_count = 0
@classmethod
def get_option_info(cls):
return {
"arpeggiate": (False, boolean),
"first_up_chord_send": (False, boolean),
}
|
conversions | releaseMar2016 | """
Conversion pack for January 2016 (YC118.1) release
"""
CONVERSIONS = {
# Renamed items
"Basic Gyrostabilizer": "'Basic' Gyrostabilizer",
"Basic Damage Control": "'Basic' Damage Control",
"Micro Capacitor Battery I": "'Micro' Cap Battery",
"ECM Burst I": "Burst Jammer I",
"Small Capacitor Battery I": "Small Cap Battery I",
"Basic Heat Sink": "'Basic' Heat Sink",
"ECM - Ion Field Projector I": "Magnetometric ECM I",
"Basic Signal Amplifier": "'Basic' Signal Amplifier",
"Basic Tracking Enhancer": "'Basic' Tracking Enhancer",
"ECM - Spatial Destabilizer I": "Gravimetric ECM I",
"ECM - White Noise Generator I": "Radar ECM I",
"ECM - Multispectral Jammer I": "Multispectral ECM I",
"ECM - Phase Inverter I": "Ladar ECM I",
"Medium Capacitor Battery I": "Medium Cap Battery I",
"Large Capacitor Battery I": "Large Cap Battery I",
"ECM Burst II": "Burst Jammer II",
"Guristas Nova Citadel Cruise Missile": "Guristas Nova XL Cruise Missile",
"Guristas Scourge Citadel Cruise Missile": "Guristas Scourge XL Cruise Missile",
"Guristas Inferno Citadel Cruise Missile": "Guristas Inferno XL Cruise Missile",
"Guristas Mjolnir Citadel Cruise Missile": "Guristas Mjolnir XL Cruise Missile",
"ECM - Phase Inverter II": "Ladar ECM II",
"ECM - Ion Field Projector II": "Magnetometric ECM II",
"ECM - Multispectral Jammer II": "Multispectral ECM II",
"ECM - Spatial Destabilizer II": "Gravimetric ECM II",
"ECM - White Noise Generator II": "Radar ECM II",
"Small Capacitor Battery II": "Small Cap Battery II",
"Medium Capacitor Battery II": "Medium Cap Battery II",
"Large Capacitor Battery II": "Large Cap Battery II",
"'Limos' Citadel Cruise Launcher I": "'Limos' XL Cruise Launcher I",
"Shock 'Limos' Citadel Torpedo Bay I": "Shock 'Limos' XL Torpedo Bay I",
"X5 Prototype Engine Enervator": "X5 Enduring Stasis Webifier",
"Fleeting Propulsion Inhibitor I": "Fleeting Compact Stasis Webifier",
"Caldari Fuel Block": "Nitrogen Fuel Block",
"Minmatar Fuel Block": "Hydrogen Fuel Block",
"Amarr Fuel Block": "Helium Fuel Block",
"Gallente Fuel Block": "Oxygen Fuel Block",
"Small Ld-Acid Capacitor Battery I": "Small Compact Pb-Acid Cap Battery ",
"Large Ld-Acid Capacitor Battery I": "Large Compact Pb-Acid Cap Battery",
"F-23 Reciprocal Remote Sensor Booster": "F-23 Compact Remote Sensor Booster",
"Coadjunct Linked Remote Sensor Booster": "Coadjunct Scoped Remote Sensor Booster",
"Linked Remote Sensor Booster": "Linked Enduring Sensor Booster",
"Low Frequency Sensor Suppressor I": "LFT Enduring Sensor Dampener",
"Kapteyn Sensor Array Inhibitor I": "Kapteyn Compact Sensor Dampener",
"Phased Muon Sensor Disruptor I": "Phased Muon Scoped Sensor Dampener",
"F-293 Nutation Remote Tracking Computer": "F-293 Scoped Remote Tracking Computer",
"Phase Switching Remote Tracking Computer": "P-S Compact Remote Tracking Computer",
"Alfven Surface Remote Tracking Computer": "Alfven Enduring Remote Tracking Computer",
"'Deluge' ECM Burst I": "Deluge Enduring Burst Jammer",
"'Rash' ECM Emission I": "Rash Compact Burst Jammer",
"'Cetus' ECM Shockwave I": "Cetus Scoped Burst Jammer",
"J5 Prototype Warp Disruptor I": "J5 Enduring Warp Disruptor",
"Faint Warp Disruptor I": "Faint Scoped Warp Disruptor",
"Initiated Warp Disruptor I": "Initiated Compact Warp Disruptor",
"J5b Phased Prototype Warp Scrambler I": "J5b Enduring Warp Scrambler",
"Faint Epsilon Warp Scrambler I": "Faint Epsilon Scoped Warp Scrambler",
"Initiated Harmonic Warp Scrambler I": "Initiated Compact Warp Scrambler",
"Internal Force Field Array I": "IFFA Compact Damage Control",
"Extruded Heat Sink I": "Extruded Compact Heat Sink",
"Counterbalanced Weapon Mounts I": "Counterbalanced Compact Gyrostabilizer",
"Medium Ld-Acid Capacitor Battery I": "Medium Compact Pb-Acid Cap Battery",
"Alumel-Wired Sensor Augmentation": "Alumel-Wired Enduring Sensor Booster",
"F-90 Positional Sensor Subroutines": "F-90 Compact Sensor Booster",
"Optical Tracking Computer I": "Optical Compact Tracking Computer",
"F-12 Nonlinear Tracking Processor": "F-12 Enduring Tracking Computer",
"F-89 Synchronized Signal Amplifier": "F-89 Compact Signal Amplifier",
"Fourier Transform Tracking Program": "Fourier Compact Tracking Enhancer",
"Initiated Multispectral ECM I": "Initiated Enduring Multispectral ECM",
"Basic Magnetic Field Stabilizer": "'Basic' Magnetic Field Stabilizer",
"Magnetic Vortex Stabilizer I": "Vortex Compact Magnetic Field Stabilizer",
"Mizuro's Modified Warp Disruptor": "Mizuro's Modified Heavy Warp Disruptor",
"Hakim's Modified Warp Disruptor": "Hakim's Modified Heavy Warp Disruptor",
"Gotan's Modified Warp Disruptor": "Gotan's Modified Heavy Warp Disruptor",
"Tobias' Modified Warp Disruptor": "Tobias' Modified Heavy Warp Disruptor",
"Mizuro's Modified Warp Scrambler": "Mizuro's Modified Heavy Warp Scrambler",
"Hakim's Modified Warp Scrambler": "Hakim's Modified Heavy Warp Scrambler",
"Gotan's Modified Warp Scrambler": "Gotan's Modified Heavy Warp Scrambler",
"Tobias' Modified Warp Scrambler": "Tobias' Modified Heavy Warp Scrambler",
"Cross-linked Bolt Array I": "Crosslink Compact Ballistic Control System",
"Citadel Torpedo Battery": "XL Torpedo Battery",
"Mjolnir Citadel Torpedo": "Mjolnir XL Torpedo",
"Scourge Citadel Torpedo": "Scourge XL Torpedo",
"Inferno Citadel Torpedo": "Inferno XL Torpedo",
"Nova Citadel Torpedo": "Nova XL Torpedo",
"Peripheral Weapon Navigation Diameter": "Peripheral Compact Target Painter",
"Parallel Weapon Navigation Transmitter": "Parallel Enduring Target Painter",
"Phased Weapon Navigation Array Generation Extron": "Phased Scoped Target Painter",
"Induced Ion Field ECM I": "Morpheus Enduring Magnetometric ECM",
"Compulsive Ion Field ECM I": "Aergia Compact Magnetometric ECM",
"'Hypnos' Ion Field ECM I": "Hypnos Scoped Magnetometric ECM",
"Induced Multispectral ECM I": "Induced Compact Multispectral ECM",
"Compulsive Multispectral ECM I": "Compulsive Scoped Multispectral ECM",
"Languid Phase Inversion ECM I": "Languid Enduring Ladar ECM",
"Halting Phase Inversion ECM I": "Halting Compact Ladar ECM",
"Enfeebling Phase Inversion ECM I": "Enfeebling Scoped Ladar ECM",
"FZ-3a Disruptive Spatial Destabilizer ECM": "FZ-3a Enduring Gravimetric ECM",
"CZ-4 Concussive Spatial Destabilizer ECM": "CZ-4 Compact Gravimetric ECM",
"BZ-5 Neutralizing Spatial Destabilizer ECM": "BZ-5 Scoped Gravimetric ECM",
"'Gloom' White Noise ECM": "Gloom Enduring Radar ECM",
"'Shade' White Noise ECM": "Shade Compact Radar ECM",
"'Umbra' White Noise ECM": "Umbra Scoped Radar ECM",
"Dread Guristas ECM Multispectral Jammer": "Dread Guristas Multispectral ECM",
"Kaikka's Modified ECM Multispectral Jammer": "Kaikka's Modified Multispectral ECM",
"Thon's Modified ECM Multispectral Jammer": "Thon's Modified Multispectral ECM",
"Vepas' Modified ECM Multispectral Jammer": "Vepas' Modified Multispectral ECM",
"Estamel's Modified ECM Multispectral Jammer": "Estamel's Modified Multispectral ECM",
"Citadel Torpedo Launcher I": "XL Torpedo Launcher I",
"'Marshall' Ion Field Projector": "'Marshall' Magnetometric ECM",
"'Gambler' Phase Inverter": "'Gambler' Ladar ECM",
"'Plunderer' Spatial Destabilizer": "'Plunderer' Gravimetric ECM",
"'Heist' White Noise Generator": "'Heist' Radar ECM",
"'Ghost' ECM Burst": "'Ghost' Burst Jammer",
"'Full Duplex' Ballistic Targeting System": "'Full Duplex' Ballistic Control System",
"'Kindred' Stabilization Actuator I": "'Kindred' Gyrostabilizer",
"Process-Interruptive Warp Disruptor": "'Interruptive' Warp Disruptor",
"'Inception' Target Painter I": "'Inception' Target Painter",
"Citadel Torpedoes": "XL Torpedoes",
"'Shady' ECCM - Gravimetric I": "'Shady' Sensor Booster",
"'Monopoly' Magnetic Field Stabilizer I": "'Monopoly' Magnetic Field Stabilizer",
"'Bootleg' ECCM Projector I": "'Bootleg' Remote Sensor Booster",
"'Marketeer' Tracking Computer I": "'Marketeer' Tracking Computer",
"'Executive' Remote Sensor Dampener I": "'Executive' Remote Sensor Dampener",
"'Radical' Damage Control I": "'Radical' Damage Control",
"'Crucible' Small Capacitor Battery I": "'Crucible' Small Cap Battery",
"'Censer' Medium Capacitor Battery I": "'Censer' Medium Cap Battery",
"'Thurifer' Large Capacitor Battery I": "'Thurifer' Large Cap Battery",
"Guristas Citadel Torpedo Battery": "Guristas XL Torpedo Battery",
"Dread Guristas Citadel Torpedo Battery": "Dread Guristas XL Torpedo Battery",
"Legion ECM Ion Field Projector": "Legion Magnetometric ECM",
"Legion ECM Multispectral Jammer": "Legion Multispectral ECM",
"Legion ECM Phase Inverter": "Legion Ladar ECM",
"Legion ECM Spatial Destabilizer": "Legion Gravimetric ECM",
"Legion ECM White Noise Generator": "Legion Radar ECM",
"Guristas Nova Citadel Torpedo": "Guristas Nova XL Torpedo",
"Guristas Inferno Citadel Torpedo": "Guristas Inferno XL Torpedo",
"Guristas Scourge Citadel Torpedo": "Guristas Scourge XL Torpedo",
"Guristas Mjolnir Citadel Torpedo": "Guristas Mjolnir XL Torpedo",
"Citadel Cruise Missiles": "XL Cruise Missiles",
"Scourge Citadel Cruise Missile": "Scourge XL Cruise Missile",
"Nova Citadel Cruise Missile": "Nova XL Cruise Missile",
"Inferno Citadel Cruise Missile": "Inferno XL Cruise Missile",
"Mjolnir Citadel Cruise Missile": "Mjolnir XL Cruise Missile",
"Citadel Cruise Launcher I": "XL Cruise Launcher I",
# Converted items
"Muon Coil Bolt Array I": "Crosslink Compact Ballistic Control System",
"Multiphasic Bolt Array I": "Crosslink Compact Ballistic Control System",
"'Pandemonium' Ballistic Enhancement": "Crosslink Compact Ballistic Control System",
"Piercing ECCM Emitter I": "Coadjunct Scoped Remote Sensor Booster",
"1Z-3 Subversive ECM Eruption": "Cetus Scoped Burst Jammer",
"ECCM - Radar I": "Sensor Booster I",
"ECCM - Ladar I": "Sensor Booster I",
"ECCM - Magnetometric I": "Sensor Booster I",
"ECCM - Gravimetric I": "Sensor Booster I",
"ECCM - Omni I": "Sensor Booster I",
"Beta-Nought Tracking Mode": "'Basic' Tracking Enhancer",
"Azimuth Descalloping Tracking Enhancer": "'Basic' Tracking Enhancer",
"F-AQ Delay-Line Scan Tracking Subroutines": "'Basic' Tracking Enhancer",
"Beam Parallax Tracking Program": "'Basic' Tracking Enhancer",
"GLFF Containment Field": "'Basic' Damage Control",
"Interior Force Field Array": "'Basic' Damage Control",
"F84 Local Damage System": "'Basic' Damage Control",
"Systematic Damage Control": "'Basic' Damage Control",
"'Boss' Remote Sensor Booster": "'Bootleg' Remote Sensor Booster",
"'Entrepreneur' Remote Sensor Booster": "'Bootleg' Remote Sensor Booster",
"Fleeting Progressive Warp Scrambler I": "Faint Epsilon Scoped Warp Scrambler",
"ECCM Projector I": "Remote Sensor Booster I",
"Sigma-Nought Tracking Mode I": "Fourier Compact Tracking Enhancer",
"Auto-Gain Control Tracking Enhancer I": "Fourier Compact Tracking Enhancer",
"F-aQ Phase Code Tracking Subroutines": "Fourier Compact Tracking Enhancer",
"Monophonic Stabilization Actuator I": "'Kindred' Gyrostabilizer",
"Initiated Ion Field ECM I": "Hypnos Scoped Magnetometric ECM",
"Insulated Stabilizer Array": "'Basic' Magnetic Field Stabilizer",
"Linear Flux Stabilizer": "'Basic' Magnetic Field Stabilizer",
"Gauss Field Balancer": "'Basic' Magnetic Field Stabilizer",
"Magnetic Vortex Stabilizer": "'Basic' Magnetic Field Stabilizer",
"'Capitalist' Magnetic Field Stabilizer I": "'Monopoly' Magnetic Field Stabilizer",
"Emergency Damage Control I": "IFFA Compact Damage Control",
"F85 Peripheral Damage System I": "IFFA Compact Damage Control",
"Pseudoelectron Containment Field I": "IFFA Compact Damage Control",
"Micro Ld-Acid Capacitor Battery I": "'Micro' Cap Battery",
"Micro Ohm Capacitor Reserve I": "'Micro' Cap Battery",
"Micro F-4a Ld-Sulfate Capacitor Charge Unit": "'Micro' Cap Battery",
"Micro Peroxide Capacitor Power Cell": "'Micro' Cap Battery",
"Micro Capacitor Battery II": "'Micro' Cap Battery",
"Lateral Gyrostabilizer": "'Basic' Gyrostabilizer",
"F-M2 Weapon Inertial Suspensor": "'Basic' Gyrostabilizer",
"Hydraulic Stabilization Actuator": "'Basic' Gyrostabilizer",
"Stabilized Weapon Mounts": "'Basic' Gyrostabilizer",
"'Hypnos' Multispectral ECM I": "Compulsive Scoped Multispectral ECM",
"Fleeting Warp Disruptor I": "Faint Scoped Warp Disruptor",
"'Mangonel' Heat Sink I": "'Trebuchet' Heat Sink I",
"Heat Exhaust System": "'Basic' Heat Sink",
"C3S Convection Thermal Radiator": "'Basic' Heat Sink",
"'Boreas' Coolant System": "'Basic' Heat Sink",
"Stamped Heat Sink": "'Basic' Heat Sink",
"Extra Radar ECCM Scanning Array I": "F-90 Compact Sensor Booster",
"Extra Ladar ECCM Scanning Array I": "F-90 Compact Sensor Booster",
"Extra Gravimetric ECCM Scanning Array I": "F-90 Compact Sensor Booster",
"Extra Magnetometric ECCM Scanning Array I": "F-90 Compact Sensor Booster",
"Gravimetric Positional ECCM Sensor System I": "F-90 Compact Sensor Booster",
"Radar Positional ECCM Sensor System I": "F-90 Compact Sensor Booster",
"Omni Positional ECCM Sensor System I": "F-90 Compact Sensor Booster",
"Ladar Positional ECCM Sensor System I": "F-90 Compact Sensor Booster",
"Magnetometric Positional ECCM Sensor System I": "F-90 Compact Sensor Booster",
"Conjunctive Radar ECCM Scanning Array I": "F-90 Compact Sensor Booster",
"Conjunctive Ladar ECCM Scanning Array I": "F-90 Compact Sensor Booster",
"Conjunctive Gravimetric ECCM Scanning Array I": "F-90 Compact Sensor Booster",
"Conjunctive Magnetometric ECCM Scanning Array I": "F-90 Compact Sensor Booster",
"Supplemental Scanning CPU I": "F-90 Compact Sensor Booster",
"'Gonzo' Damage Control I": "'Radical' Damage Control",
"'Penumbra' White Noise ECM": "Umbra Scoped Radar ECM",
"ECCM - Omni II": "Sensor Booster II",
"ECCM - Gravimetric II": "Sensor Booster II",
"ECCM - Ladar II": "Sensor Booster II",
"ECCM - Magnetometric II": "Sensor Booster II",
"ECCM - Radar II": "Sensor Booster II",
"Scattering ECCM Projector I": "Linked Enduring Sensor Booster",
"Gravimetric Backup Array I": "Signal Amplifier I",
"Ladar Backup Array I": "Signal Amplifier I",
"Magnetometric Backup Array I": "Signal Amplifier I",
"Multi Sensor Backup Array I": "Signal Amplifier I",
"RADAR Backup Array I": "Signal Amplifier I",
"Large Ohm Capacitor Reserve I": "Large Compact Pb-Acid Cap Battery",
"Large F-4a Ld-Sulfate Capacitor Charge Unit": "Large Compact Pb-Acid Cap Battery",
"Large Peroxide Capacitor Power Cell": "Large Compact Pb-Acid Cap Battery",
"ECCM Projector II": "Remote Sensor Booster II",
"Cross-Lateral Gyrostabilizer I": "Counterbalanced Compact Gyrostabilizer",
"F-M3 Munition Inertial Suspensor": "Counterbalanced Compact Gyrostabilizer",
"Pneumatic Stabilization Actuator I": "Counterbalanced Compact Gyrostabilizer",
"'Langour' Drive Disruptor I": "X5 Enduring Stasis Webifier",
"Patterned Stasis Web I": "Fleeting Compact Stasis Webifier",
"'Tycoon' Remote Tracking Computer": "'Enterprise' Remote Tracking Computer",
"'Economist' Tracking Computer I": "'Marketeer' Tracking Computer",
"Gravimetric Backup Array II": "Signal Amplifier II",
"Ladar Backup Array II": "Signal Amplifier II",
"Magnetometric Backup Array II": "Signal Amplifier II",
"Multi Sensor Backup Array II": "Signal Amplifier II",
"RADAR Backup Array II": "Signal Amplifier II",
"Faint Phase Inversion ECM I": "Enfeebling Scoped Ladar ECM",
"'Prayer' Remote Tracking Computer": "P-S Compact Remote Tracking Computer",
"Partial Weapon Navigation": "Phased Scoped Target Painter",
"Basic RADAR Backup Array": "'Basic' Signal Amplifier",
"Basic Ladar Backup Array": "'Basic' Signal Amplifier",
"Basic Gravimetric Backup Array": "'Basic' Signal Amplifier",
"Basic Magnetometric Backup Array": "'Basic' Signal Amplifier",
"Basic Multi Sensor Backup Array": "'Basic' Signal Amplifier",
"Emergency Magnetometric Scanners": "'Basic' Signal Amplifier",
"Emergency Multi-Frequency Scanners": "'Basic' Signal Amplifier",
"Emergency RADAR Scanners": "'Basic' Signal Amplifier",
"Emergency Ladar Scanners": "'Basic' Signal Amplifier",
"Emergency Gravimetric Scanners": "'Basic' Signal Amplifier",
"Sealed RADAR Backup Cluster": "'Basic' Signal Amplifier",
"Sealed Magnetometric Backup Cluster": "'Basic' Signal Amplifier",
"Sealed Multi-Frequency Backup Cluster": "'Basic' Signal Amplifier",
"Sealed Ladar Backup Cluster": "'Basic' Signal Amplifier",
"Sealed Gravimetric Backup Cluster": "'Basic' Signal Amplifier",
"Surplus RADAR Reserve Array": "'Basic' Signal Amplifier",
"F-42 Reiterative RADAR Backup Sensors": "'Basic' Signal Amplifier",
"Surplus Magnetometric Reserve Array": "'Basic' Signal Amplifier",
"F-42 Reiterative Magnetometric Backup Sensors": "'Basic' Signal Amplifier",
"Surplus Multi-Frequency Reserve Array": "'Basic' Signal Amplifier",
"F-42 Reiterative Multi-Frequency Backup Sensors": "'Basic' Signal Amplifier",
"Surplus Ladar Reserve Array": "'Basic' Signal Amplifier",
"F-42 Reiterative Ladar Backup Sensors": "'Basic' Signal Amplifier",
"Surplus Gravimetric Reserve Array": "'Basic' Signal Amplifier",
"F-42 Reiterative Gravimetric Backup Sensors": "'Basic' Signal Amplifier",
"Amplitude Signal Enhancer": "'Basic' Signal Amplifier",
"'Acolyth' Signal Booster": "'Basic' Signal Amplifier",
"Type-E Discriminative Signal Augmentation": "'Basic' Signal Amplifier",
"F-90 Positional Signal Amplifier": "'Basic' Signal Amplifier",
"Gravimetric Firewall": "'Firewall' Signal Amplifier",
"Ladar Firewall": "'Firewall' Signal Amplifier",
"Magnetometric Firewall": "'Firewall' Signal Amplifier",
"Multi Sensor Firewall": "'Firewall' Signal Amplifier",
"RADAR Firewall": "'Firewall' Signal Amplifier",
"'Pacifier' Large Remote Armor Repairer": "'Peace' Large Remote Armor Repairer",
"Monopulse Tracking Mechanism I": "F-12 Enduring Tracking Computer",
"Alumel Radar ECCM Sensor Array I": "Alumel-Wired Enduring Sensor Booster",
"Alumel Ladar ECCM Sensor Array I": "Alumel-Wired Enduring Sensor Booster",
"Alumel Gravimetric ECCM Sensor Array I": "Alumel-Wired Enduring Sensor Booster",
"Alumel Omni ECCM Sensor Array I": "Alumel-Wired Enduring Sensor Booster",
"Alumel Magnetometric ECCM Sensor Array I": "Alumel-Wired Enduring Sensor Booster",
"Supplemental Ladar ECCM Scanning Array I": "Alumel-Wired Enduring Sensor Booster",
"Supplemental Gravimetric ECCM Scanning Array I": "Alumel-Wired Enduring Sensor Booster",
"Supplemental Omni ECCM Scanning Array I": "Alumel-Wired Enduring Sensor Booster",
"Supplemental Radar ECCM Scanning Array I": "Alumel-Wired Enduring Sensor Booster",
"Supplemental Magnetometric ECCM Scanning Array I": "Alumel-Wired Enduring Sensor Booster",
"Incremental Radar ECCM Scanning Array I": "Alumel-Wired Enduring Sensor Booster",
"Incremental Ladar ECCM Scanning Array I": "Alumel-Wired Enduring Sensor Booster",
"Incremental Gravimetric ECCM Scanning Array I": "Alumel-Wired Enduring Sensor Booster",
"Incremental Magnetometric ECCM Scanning Array I": "Alumel-Wired Enduring Sensor Booster",
"Prototype ECCM Radar Sensor Cluster": "Alumel-Wired Enduring Sensor Booster",
"Prototype ECCM Ladar Sensor Cluster": "Alumel-Wired Enduring Sensor Booster",
"Prototype ECCM Gravimetric Sensor Cluster": "Alumel-Wired Enduring Sensor Booster",
"Prototype ECCM Omni Sensor Cluster": "Alumel-Wired Enduring Sensor Booster",
"Prototype ECCM Magnetometric Sensor Cluster": "Alumel-Wired Enduring Sensor Booster",
"Prototype Sensor Booster": "Alumel-Wired Enduring Sensor Booster",
"Thermal Exhaust System I": "Extruded Compact Heat Sink",
"C4S Coiled Circuit Thermal Radiator": "Extruded Compact Heat Sink",
"'Skadi' Coolant System I": "Extruded Compact Heat Sink",
"'Forger' ECCM - Magnetometric I": "'Shady' Sensor Booster",
"Insulated Stabilizer Array I": "Vortex Compact Magnetic Field Stabilizer",
"Linear Flux Stabilizer I": "Vortex Compact Magnetic Field Stabilizer",
"Gauss Field Balancer I": "Vortex Compact Magnetic Field Stabilizer",
"'Broker' Remote Sensor Dampener I": "'Executive' Remote Sensor Dampener",
"'Orion' Tracking CPU I": "Optical Compact Tracking Computer",
"Spot Pulsing ECCM I": "F-23 Compact Remote Sensor Booster",
"Phased Muon ECCM Caster I": "F-23 Compact Remote Sensor Booster",
"Connected Remote Sensor Booster": "F-23 Compact Remote Sensor Booster",
"Small Ohm Capacitor Reserve I": "Small Compact Pb-Acid Cap Battery",
"Small F-4a Ld-Sulfate Capacitor Charge Unit": "Small Compact Pb-Acid Cap Battery",
"Small Peroxide Capacitor Power Cell": "Small Compact Pb-Acid Cap Battery",
"FZ-3 Subversive Spatial Destabilizer ECM": "BZ-5 Scoped Gravimetric ECM",
"Medium Ohm Capacitor Reserve I": "Medium Compact Pb-Acid Cap Battery",
"Medium F-4a Ld-Sulfate Capacitor Charge Unit": "Medium Compact Pb-Acid Cap Battery",
"Medium Peroxide Capacitor Power Cell": "Medium Compact Pb-Acid Cap Battery",
"Ballistic 'Purge' Targeting System I": "'Full Duplex' Ballistic Control System",
"Protected Gravimetric Backup Cluster I": "F-89 Compact Signal Amplifier",
"Protected Ladar Backup Cluster I": "F-89 Compact Signal Amplifier",
"Protected Magnetometric Backup Cluster I": "F-89 Compact Signal Amplifier",
"Protected Multi-Frequency Backup Cluster I": "F-89 Compact Signal Amplifier",
"Protected RADAR Backup Cluster I": "F-89 Compact Signal Amplifier",
"Reserve Gravimetric Scanners": "F-89 Compact Signal Amplifier",
"Reserve Ladar Scanners": "F-89 Compact Signal Amplifier",
"Reserve Magnetometric Scanners": "F-89 Compact Signal Amplifier",
"Reserve Multi-Frequency Scanners": "F-89 Compact Signal Amplifier",
"Reserve RADAR Scanners": "F-89 Compact Signal Amplifier",
"Secure Gravimetric Backup Cluster I": "F-89 Compact Signal Amplifier",
"Secure Ladar Backup Cluster I": "F-89 Compact Signal Amplifier",
"Secure Magnetometric Backup Cluster I": "F-89 Compact Signal Amplifier",
"Secure Radar Backup Cluster I": "F-89 Compact Signal Amplifier",
"F-43 Repetitive Gravimetric Backup Sensors": "F-89 Compact Signal Amplifier",
"F-43 Repetitive Ladar Backup Sensors": "F-89 Compact Signal Amplifier",
"F-43 Repetitive Magnetometric Backup Sensors": "F-89 Compact Signal Amplifier",
"F-43 Repetitive Multi-Frequency Backup Sensors": "F-89 Compact Signal Amplifier",
"F-43 Repetitive RADAR Backup Sensors": "F-89 Compact Signal Amplifier",
"Shielded Gravimetric Backup Cluster I": "F-89 Compact Signal Amplifier",
"Shielded Ladar Backup Cluster I": "F-89 Compact Signal Amplifier",
"Shielded Magnetometric Backup Cluster I": "F-89 Compact Signal Amplifier",
"Shielded Radar Backup Cluster I": "F-89 Compact Signal Amplifier",
"Surrogate Gravimetric Reserve Array I": "F-89 Compact Signal Amplifier",
"Surrogate Ladar Reserve Array I": "F-89 Compact Signal Amplifier",
"Surrogate Magnetometric Reserve Array I": "F-89 Compact Signal Amplifier",
"Surrogate Multi-Frequency Reserve Array I": "F-89 Compact Signal Amplifier",
"Surrogate RADAR Reserve Array I": "F-89 Compact Signal Amplifier",
"Warded Gravimetric Backup Cluster I": "F-89 Compact Signal Amplifier",
"Warded Ladar Backup Cluster I": "F-89 Compact Signal Amplifier",
"Warded Magnetometric Backup Cluster I": "F-89 Compact Signal Amplifier",
"Warded Radar Backup Cluster I": "F-89 Compact Signal Amplifier",
"'Mendicant' Signal Booster I": "F-89 Compact Signal Amplifier",
"Wavelength Signal Enhancer I": "F-89 Compact Signal Amplifier",
"Type-D Attenuation Signal Augmentation": "F-89 Compact Signal Amplifier",
"Indirect Scanning Dampening Unit I": "Phased Muon Scoped Sensor Dampener",
}
|
api | notebook | from typing import Any, Dict, List, Optional
import structlog
from django.db import transaction
from django.db.models import Q, QuerySet
from django.utils.timezone import now
from django_filters.rest_framework import DjangoFilterBackend
from drf_spectacular.types import OpenApiTypes
from drf_spectacular.utils import (
OpenApiExample,
OpenApiParameter,
extend_schema,
extend_schema_view,
)
from posthog.api.forbid_destroy_model import ForbidDestroyModel
from posthog.api.routing import StructuredViewSetMixin
from posthog.api.shared import UserBasicSerializer
from posthog.exceptions import Conflict
from posthog.models import User
from posthog.models.activity_logging.activity_log import (
Change,
Detail,
changes_between,
load_activity,
log_activity,
)
from posthog.models.activity_logging.activity_page import activity_page_response
from posthog.models.notebook.notebook import Notebook
from posthog.models.utils import UUIDT
from posthog.permissions import (
ProjectMembershipNecessaryPermissions,
TeamMemberAccessPermission,
)
from posthog.settings import DEBUG
from posthog.utils import relative_date_parse
from rest_framework import request, serializers, viewsets
from rest_framework.decorators import action
from rest_framework.permissions import IsAuthenticated
logger = structlog.get_logger(__name__)
def depluralize(string: str | None) -> str | None:
if not string:
return None
if string.endswith("ies"):
return string[:-3] + "y"
elif string.endswith("s"):
return string[:-1]
else:
return string
def log_notebook_activity(
activity: str,
notebook_id: str,
notebook_short_id: str,
notebook_name: str,
organization_id: UUIDT,
team_id: int,
user: User,
changes: Optional[List[Change]] = None,
) -> None:
log_activity(
organization_id=organization_id,
team_id=team_id,
user=user,
item_id=notebook_id,
scope="Notebook",
activity=activity,
detail=Detail(changes=changes, short_id=notebook_short_id, name=notebook_name),
)
class NotebookSerializer(serializers.ModelSerializer):
class Meta:
model = Notebook
fields = [
"id",
"short_id",
"title",
"content",
"text_content",
"version",
"deleted",
"created_at",
"created_by",
"last_modified_at",
"last_modified_by",
]
read_only_fields = [
"id",
"short_id",
"created_at",
"created_by",
"last_modified_at",
"last_modified_by",
]
created_by = UserBasicSerializer(read_only=True)
last_modified_by = UserBasicSerializer(read_only=True)
def create(self, validated_data: Dict, *args, **kwargs) -> Notebook:
request = self.context["request"]
team = self.context["get_team"]()
created_by = validated_data.pop("created_by", request.user)
notebook = Notebook.objects.create(
team=team,
created_by=created_by,
last_modified_by=request.user,
**validated_data,
)
log_notebook_activity(
activity="created",
notebook_id=notebook.id,
notebook_short_id=str(notebook.short_id),
notebook_name=notebook.title,
organization_id=self.context["request"].user.current_organization_id,
team_id=team.id,
user=self.context["request"].user,
)
return notebook
def update(self, instance: Notebook, validated_data: Dict, **kwargs) -> Notebook:
try:
before_update = Notebook.objects.get(pk=instance.id)
except Notebook.DoesNotExist:
before_update = None
with transaction.atomic():
# select_for_update locks the database row so we ensure version updates are atomic
locked_instance = Notebook.objects.select_for_update().get(pk=instance.pk)
if validated_data.keys():
locked_instance.last_modified_at = now()
locked_instance.last_modified_by = self.context["request"].user
if validated_data.get("content"):
if validated_data.get("version") != locked_instance.version:
raise Conflict("Someone else edited the Notebook")
validated_data["version"] = locked_instance.version + 1
updated_notebook = super().update(locked_instance, validated_data)
changes = changes_between(
"Notebook", previous=before_update, current=updated_notebook
)
log_notebook_activity(
activity="updated",
notebook_id=str(updated_notebook.id),
notebook_short_id=str(updated_notebook.short_id),
notebook_name=updated_notebook.title,
organization_id=self.context["request"].user.current_organization_id,
team_id=self.context["team_id"],
user=self.context["request"].user,
changes=changes,
)
return updated_notebook
@extend_schema(
description="The API for interacting with Notebooks. This feature is in early access and the API can have "
"breaking changes without announcement.",
)
@extend_schema_view(
list=extend_schema(
parameters=[
OpenApiParameter("short_id", exclude=True),
OpenApiParameter(
"created_by",
OpenApiTypes.INT,
description="The UUID of the Notebook's creator",
required=False,
),
OpenApiParameter(
"user",
description="If any value is provided for this parameter, return notebooks created by the logged in user.",
required=False,
),
OpenApiParameter(
"date_from",
OpenApiTypes.DATETIME,
description="Filter for notebooks created after this date & time",
required=False,
),
OpenApiParameter(
"date_to",
OpenApiTypes.DATETIME,
description="Filter for notebooks created before this date & time",
required=False,
),
OpenApiParameter(
"contains",
description="""Filter for notebooks that match a provided filter.
Each match pair is separated by a colon,
multiple match pairs can be sent separated by a space or a comma""",
examples=[
OpenApiExample(
"Filter for notebooks that have any recording",
value="recording:true",
),
OpenApiExample(
"Filter for notebooks that do not have any recording",
value="recording:false",
),
OpenApiExample(
"Filter for notebooks that have a specific recording",
value="recording:the-session-recording-id",
),
],
required=False,
),
],
)
)
class NotebookViewSet(
StructuredViewSetMixin, ForbidDestroyModel, viewsets.ModelViewSet
):
queryset = Notebook.objects.all()
serializer_class = NotebookSerializer
permission_classes = [
IsAuthenticated,
ProjectMembershipNecessaryPermissions,
TeamMemberAccessPermission,
]
filter_backends = [DjangoFilterBackend]
filterset_fields = ["short_id"]
# TODO: Remove this once we have released notebooks
include_in_docs = DEBUG
lookup_field = "short_id"
def get_queryset(self) -> QuerySet:
queryset = super().get_queryset()
if not self.action.endswith("update"):
# Soft-deleted notebooks can be brought back with a PATCH request
queryset = queryset.filter(deleted=False)
queryset = queryset.select_related("created_by", "last_modified_by", "team")
if self.action == "list":
queryset = queryset.filter(deleted=False)
queryset = self._filter_request(self.request, queryset)
order = self.request.GET.get("order", None)
if order:
queryset = queryset.order_by(order)
else:
queryset = queryset.order_by("-last_modified_at")
return queryset
def _filter_request(self, request: request.Request, queryset: QuerySet) -> QuerySet:
filters = request.GET.dict()
for key in filters:
if key == "user":
queryset = queryset.filter(created_by=request.user)
elif key == "created_by":
queryset = queryset.filter(created_by__uuid=request.GET["created_by"])
elif key == "date_from":
queryset = queryset.filter(
last_modified_at__gt=relative_date_parse(
request.GET["date_from"], self.team.timezone_info
)
)
elif key == "date_to":
queryset = queryset.filter(
last_modified_at__lt=relative_date_parse(
request.GET["date_to"], self.team.timezone_info
)
)
elif key == "search":
queryset = queryset.filter(
# some notebooks have no text_content until next saved, so we need to check the title too
# TODO this can be removed once all/most notebooks have text_content
Q(title__search=request.GET["search"])
| Q(text_content__search=request.GET["search"])
)
elif key == "contains":
contains = request.GET["contains"]
match_pairs = contains.replace(",", " ").split(" ")
# content is a JSONB field that has an array of objects under the key "content"
# each of those (should) have a "type" field
# and for recordings that type is "ph-recording"
# each of those objects can have attrs which is a dict with id for the recording
for match_pair in match_pairs:
splat = match_pair.split(":")
target = depluralize(splat[0])
match = splat[1] if len(splat) > 1 else None
if target:
# the JSONB query requires a specific structure
basic_structure = List[Dict[str, Any]]
nested_structure = (
basic_structure | List[Dict[str, basic_structure]]
)
presence_match_structure: basic_structure | nested_structure = [
{"type": f"ph-{target}"}
]
id_match_structure: basic_structure | nested_structure = [
{"attrs": {"id": match}}
]
if target == "replay-timestamp":
# replay timestamps are not at the top level, they're one-level down in a content array
presence_match_structure = [
{"content": [{"type": f"ph-{target}"}]}
]
id_match_structure = [
{"content": [{"attrs": {"sessionRecordingId": match}}]}
]
if match == "true" or match is None:
queryset = queryset.filter(
content__content__contains=presence_match_structure
)
elif match == "false":
queryset = queryset.exclude(
content__content__contains=presence_match_structure
)
else:
queryset = queryset.filter(
content__content__contains=presence_match_structure
)
queryset = queryset.filter(
content__content__contains=id_match_structure
)
return queryset
@action(methods=["GET"], url_path="activity", detail=False)
def all_activity(self, request: request.Request, **kwargs):
limit = int(request.query_params.get("limit", "10"))
page = int(request.query_params.get("page", "1"))
activity_page = load_activity(
scope="Notebook", team_id=self.team_id, limit=limit, page=page
)
return activity_page_response(activity_page, limit, page, request)
@action(methods=["GET"], url_path="activity", detail=True)
def activity(self, request: request.Request, **kwargs):
notebook = self.get_object()
limit = int(request.query_params.get("limit", "10"))
page = int(request.query_params.get("page", "1"))
activity_page = load_activity(
scope="Notebook",
team_id=self.team_id,
item_id=notebook.id,
limit=limit,
page=page,
)
return activity_page_response(activity_page, limit, page, request)
|
Code | GestorOpeningLines | import random
import time
from Code import (
Books,
ControlPosicion,
Gestor,
Jugada,
OpeningLines,
Partida,
TrListas,
Util,
XMotorRespuesta,
)
from Code.Constantes import *
from Code.QT import Iconos, QTUtil2, QTVarios
class GestorOpeningEngines(Gestor.Gestor):
def inicio(self, pathFichero):
self.tablero.saveVisual()
self.pathFichero = pathFichero
dbop = OpeningLines.Opening(pathFichero)
self.tablero.dbVisual_setFichero(dbop.nomFichero)
self.reinicio(dbop)
def reinicio(self, dbop):
self.dbop = dbop
self.dbop.open_cache_engines()
self.tipoJuego = kJugOpeningLines
self.level = self.dbop.getconfig("ENG_LEVEL", 0)
self.numengine = self.dbop.getconfig("ENG_ENGINE", 0)
self.trainingEngines = self.dbop.trainingEngines()
self.auto_analysis = self.trainingEngines.get("AUTO_ANALYSIS", True)
self.ask_movesdifferent = self.trainingEngines.get("ASK_MOVESDIFFERENT", False)
liTimes = self.trainingEngines.get("TIMES")
if not liTimes:
liTimes = [500, 1000, 2000, 4000, 8000]
liBooks = self.trainingEngines.get("BOOKS")
if not liBooks:
liBooks = ["", "", "", "", ""]
liEngines = self.trainingEngines["ENGINES"]
num_engines_base = len(liEngines)
liEnginesExt = self.trainingEngines.get("EXT_ENGINES", [])
num_engines = num_engines_base + len(liEnginesExt)
if self.numengine >= num_engines:
self.level += 1
self.numengine = 0
self.dbop.setconfig("ENG_LEVEL", self.level)
self.dbop.setconfig("ENG_ENGINE", 0)
num_levels = len(liTimes)
if self.level >= num_levels:
if QTUtil2.pregunta(
self.pantalla,
"%s.\n%s" % (_("Training finished"), _("Do you want to reinit?")),
):
self.dbop.setconfig("ENG_LEVEL", 0)
self.dbop.setconfig("ENG_ENGINE", 0)
self.reinicio(dbop)
return
self.time = liTimes[self.level]
nombook = liBooks[self.level]
if nombook:
listaLibros = Books.ListaLibros()
listaLibros.recuperaVar(self.configuracion.ficheroBooks)
self.book = listaLibros.buscaLibro(nombook)
if self.book:
self.book.polyglot()
else:
self.book = None
if self.numengine < num_engines_base:
self.keyengine = liEngines[self.numengine]
else:
self.keyengine = "*" + liEnginesExt[self.numengine - num_engines_base - 1]
self.plies_mandatory = self.trainingEngines["MANDATORY"]
self.plies_control = self.trainingEngines["CONTROL"]
self.plies_pendientes = self.plies_control
self.lost_points = self.trainingEngines["LOST_POINTS"]
self.siJugamosConBlancas = self.trainingEngines["COLOR"] == "WHITE"
self.siRivalConBlancas = not self.siJugamosConBlancas
self.siAprobado = False
rival = self.configuracion.buscaRivalExt(self.keyengine)
self.xrival = self.procesador.creaGestorMotor(rival, self.time, None)
self.xrival.siBlancas = self.siRivalConBlancas
juez = self.configuracion.buscaRival(self.trainingEngines["ENGINE_CONTROL"])
self.xjuez = self.procesador.creaGestorMotor(
juez, int(self.trainingEngines["ENGINE_TIME"] * 1000), None
)
self.xjuez.anulaMultiPV()
self.li_info = [
"<b>%s</b>: %d/%d - %s"
% (_("Engine"), self.numengine + 1, num_engines, self.xrival.nombre),
'<b>%s</b>: %d/%d - %0.1f"'
% (_("Level"), self.level + 1, num_levels, self.time / 1000.0),
]
self.dicFENm2 = self.trainingEngines["DICFENM2"]
self.siAyuda = False
self.tablero.dbVisual_setShowAllways(False)
self.ayudas = 9999 # Para que analice sin problemas
self.partida = Partida.Partida()
self.pantalla.ponToolBar((k_mainmenu, k_abandonar, k_reiniciar))
self.pantalla.activaJuego(True, False, siAyudas=False)
self.ponMensajero(self.mueveHumano)
self.ponPosicion(self.partida.ultPosicion)
self.mostrarIndicador(True)
self.quitaAyudas()
self.ponPiezasAbajo(self.siJugamosConBlancas)
self.pgnRefresh(True)
self.ponCapInfoPorDefecto()
self.estado = kJugando
self.ponPosicionDGT()
self.errores = 0
self.ini_time = time.time()
self.muestraInformacion()
self.siguienteJugada()
def siguienteJugada(self):
self.muestraInformacion()
if self.estado == kFinJuego:
return
self.estado = kJugando
self.siJuegaHumano = False
self.ponVista()
siBlancas = self.partida.ultPosicion.siBlancas
self.ponIndicador(siBlancas)
self.refresh()
siRival = siBlancas == self.siRivalConBlancas
if not self.runcontrol():
if siRival:
self.desactivaTodas()
if self.mueveRival():
self.siguienteJugada()
else:
self.activaColor(siBlancas)
self.siJuegaHumano = True
def mueveRival(self):
si_obligatorio = self.partida.numJugadas() <= self.plies_mandatory
si_pensar = True
fenM2 = self.partida.ultPosicion.fenM2()
moves = self.dicFENm2.get(fenM2, set())
if si_obligatorio:
nmoves = len(moves)
if nmoves == 0:
si_obligatorio = False
else:
move = self.dbop.get_cache_engines(self.keyengine, self.time, fenM2)
if move is None:
if self.book:
move_book = self.book.eligeJugadaTipo(
self.partida.ultPosicion.fen(), "au"
)
if move_book in list(moves):
move = move_book
if move is None:
move = random.choice(list(moves))
self.dbop.set_cache_engines(self.keyengine, self.time, fenM2, move)
desde, hasta, coronacion = move[:2], move[2:4], move[4:]
si_pensar = False
if si_pensar:
move = None
if self.book:
move = self.book.eligeJugadaTipo(self.partida.ultPosicion.fen(), "mp")
if move is None:
move = self.dbop.get_cache_engines(self.keyengine, self.time, fenM2)
if move is None:
rmRival = self.xrival.juegaPartida(self.partida)
move = rmRival.movimiento()
self.dbop.set_cache_engines(self.keyengine, self.time, fenM2, move)
desde, hasta, coronacion = move[:2], move[2:4], move[4:]
if si_obligatorio:
if move not in moves:
move = list(moves)[0]
desde, hasta, coronacion = move[:2], move[2:4], move[4:]
siBien, mens, jg = Jugada.dameJugada(
self.partida.ultPosicion, desde, hasta, coronacion
)
if siBien:
self.partida.ultPosicion = jg.posicion
self.masJugada(jg, False)
self.movimientosPiezas(jg.liMovs, True)
self.error = ""
return True
else:
self.error = mens
return False
def mueveHumano(self, desde, hasta, coronacion=""):
jg = self.checkMueveHumano(desde, hasta, coronacion)
if not jg:
return False
fenM2 = self.partida.ultPosicion.fenM2()
moves = self.dicFENm2.get(fenM2, [])
nmoves = len(moves)
if nmoves > 0:
if jg.movimiento() not in moves:
for move in moves:
self.tablero.creaFlechaMulti(move, False)
self.tablero.creaFlechaMulti(jg.movimiento(), True)
if self.ask_movesdifferent:
mensaje = "%s\n%s" % (
_("This is not the move in the opening lines"),
_("Do you want to go on with this move?"),
)
if not QTUtil2.pregunta(self.pantalla, mensaje):
self.ponFinJuego()
return True
else:
self.mensajeEnPGN(
_(
"This is not the move in the opening lines, you must repeat the game"
)
)
self.ponFinJuego()
return True
self.movimientosPiezas(jg.liMovs)
self.masJugada(jg, True)
self.siguienteJugada()
return True
def masJugada(self, jg, siNuestra):
fenM2 = jg.posicionBase.fenM2()
jg.es_linea = False
if fenM2 in self.dicFENm2:
if jg.movimiento() in self.dicFENm2[fenM2]:
jg.criticaDirecta = "!"
jg.es_linea = True
self.partida.append_jg(jg)
if self.partida.pendienteApertura:
self.partida.asignaApertura()
self.ponFlechaSC(jg.desde, jg.hasta)
self.beepExtendido(siNuestra)
self.pgnRefresh(self.partida.ultPosicion.siBlancas)
self.refresh()
self.ponPosicionDGT()
def muestraInformacion(self):
li = []
li.extend(self.li_info)
si_obligatorio = self.partida.numJugadas() < self.plies_mandatory
if si_obligatorio and self.estado != kFinJuego:
fenM2 = self.partida.ultPosicion.fenM2()
moves = self.dicFENm2.get(fenM2, [])
if len(moves) > 0:
li.append(
"<b>%s</b>: %d/%d"
% (
_("Mandatory move"),
self.partida.numJugadas() + 1,
self.plies_mandatory,
)
)
else:
si_obligatorio = False
if not si_obligatorio and self.estado != kFinJuego:
tm = self.plies_pendientes
if (
tm > 1
and self.partida.numJugadas()
and not self.partida.jugada(-1).es_linea
):
li.append("%s: %d" % (_("Moves until the control"), tm - 1))
self.ponRotulo1("<br>".join(li))
def run_auto_analysis(self):
lista = []
for njg in range(self.partida.numJugadas()):
jg = self.partida.jugada(njg)
if jg.siBlancas() == self.siJugamosConBlancas:
fenM2 = jg.posicionBase.fenM2()
if fenM2 not in self.dicFENm2:
jg.njg = njg
lista.append(jg)
jg.fenM2 = fenM2
total = len(lista)
for pos, jg in enumerate(lista, 1):
if self.siCancelado():
break
self.ponteEnJugada(jg.njg)
self.mensEspera(siCancelar=True, masTitulo="%d/%d" % (pos, total))
nombre = self.xanalyzer.nombre
tiempo = self.xanalyzer.motorTiempoJugada
depth = self.xanalyzer.motorProfundidad
mrm = self.dbop.get_cache_engines(nombre, tiempo, jg.fenM2, depth)
ok = False
if mrm:
rm, pos = mrm.buscaRM(jg.movimiento())
if rm:
ok = True
if not ok:
mrm, pos = self.xanalyzer.analizaJugada(
jg,
self.xanalyzer.motorTiempoJugada,
self.xanalyzer.motorProfundidad,
)
self.dbop.set_cache_engines(nombre, tiempo, jg.fenM2, mrm, depth)
jg.analisis = mrm, pos
self.pantalla.base.pgnRefresh()
def mensEspera(self, siFinal=False, siCancelar=False, masTitulo=None):
if siFinal:
if self.um:
self.um.final()
else:
if self.um is None:
self.um = QTUtil2.mensajeTemporal(
self.pantalla,
_("Analyzing"),
0,
posicion="ad",
siCancelar=True,
titCancelar=_("Cancel"),
)
if masTitulo:
self.um.rotulo(_("Analyzing") + " " + masTitulo)
self.um.me.activarCancelar(siCancelar)
def siCancelado(self):
si = self.um.cancelado()
if si:
self.um.final()
return si
def runcontrol(self):
puntosInicio, mateInicio = 0, 0
puntosFinal, mateFinal = 0, 0
numJugadas = self.partida.numJugadas()
if numJugadas == 0:
return False
self.um = None # controla unMomento
def aprobado():
mens = '<b><span style="color:green">%s</span></b>' % _(
"Congratulations, goal achieved"
)
self.li_info.append("")
self.li_info.append(mens)
self.muestraInformacion()
self.dbop.setconfig("ENG_ENGINE", self.numengine + 1)
self.mensajeEnPGN(mens)
self.siAprobado = True
def suspendido():
mens = '<b><span style="color:red">%s</span></b>' % _(
"You must repeat the game"
)
self.li_info.append("")
self.li_info.append(mens)
self.muestraInformacion()
self.mensajeEnPGN(mens)
def calculaJG(jg, siinicio):
fen = jg.posicionBase.fen() if siinicio else jg.posicion.fen()
nombre = self.xjuez.nombre
tiempo = self.xjuez.motorTiempoJugada
mrm = self.dbop.get_cache_engines(nombre, tiempo, fen)
if mrm is None:
self.mensEspera()
mrm = self.xjuez.analiza(fen)
self.dbop.set_cache_engines(nombre, tiempo, fen, mrm)
rm = mrm.mejorMov()
if (" w " in fen) == self.siJugamosConBlancas:
return rm.puntos, rm.mate
else:
return -rm.puntos, -rm.mate
siCalcularInicio = True
if self.partida.siTerminada():
self.ponFinJuego()
jg = self.partida.jugada(-1)
if jg.siJaqueMate:
if jg.siBlancas() == self.siJugamosConBlancas:
aprobado()
else:
suspendido()
self.ponFinJuego()
return True
puntosFinal, mateFinal = 0, 0
else:
jg = self.partida.jugada(-1)
if jg.es_linea:
self.plies_pendientes = self.plies_control
else:
self.plies_pendientes -= 1
if self.plies_pendientes > 0:
return False
# Si la ultima jugada es de la linea no se calcula nada
self.mensEspera()
puntosFinal, mateFinal = calculaJG(jg, False)
# Se marcan todas las jugadas que no siguen las lineas
# Y se busca la ultima del color del jugador
if siCalcularInicio:
jg_inicial = None
for njg in range(numJugadas):
jg = self.partida.jugada(njg)
fenM2 = jg.posicionBase.fenM2()
if fenM2 in self.dicFENm2:
moves = self.dicFENm2[fenM2]
if jg.movimiento() not in moves:
jg.criticaDirecta = "?!"
if jg_inicial is None:
jg_inicial = jg
elif jg_inicial is None:
jg_inicial = jg
if jg_inicial:
puntosInicio, mateInicio = calculaJG(jg_inicial, True)
else:
puntosInicio, mateInicio = 0, 0
self.li_info.append("<b>%s:</b>" % _("Score"))
template = " <b>%s</b>: %d"
def appendInfo(label, puntos, mate):
mens = template % (label, puntos)
if mate:
mens += " %s %d" % (_("Mate"), mate)
self.li_info.append(mens)
appendInfo(_("Start"), puntosInicio, mateInicio)
appendInfo(_("End"), puntosFinal, mateFinal)
perdidos = puntosInicio - puntosFinal
ok = perdidos < self.lost_points
if mateInicio or mateFinal:
ok = mateFinal > mateInicio
mens = template % ("(%d)-(%d)" % (puntosInicio, puntosFinal), perdidos)
mens = "%s %s %d" % (mens, "<" if ok else ">", self.lost_points)
self.li_info.append(mens)
if not ok:
if self.auto_analysis:
self.run_auto_analysis()
self.mensEspera(siFinal=True)
suspendido()
else:
self.mensEspera(siFinal=True)
aprobado()
self.ponFinJuego()
return True
def procesarAccion(self, clave):
if clave == k_mainmenu:
self.finPartida()
elif clave in (k_reiniciar, k_siguiente):
self.reiniciar()
elif clave == k_peliculaRepetir:
self.dbop.setconfig("ENG_ENGINE", self.numengine)
self.reiniciar()
elif clave == k_abandonar:
self.ponFinJuego()
elif clave == k_configurar:
self.configurar(siSonidos=True)
elif clave == k_utilidades:
liMasOpciones = []
liMasOpciones.append(("libros", _("Consult a book"), Iconos.Libros()))
liMasOpciones.append((None, None, None))
liMasOpciones.append((None, _("Options"), Iconos.Opciones()))
mens = _("cancel") if self.auto_analysis else _("activate")
liMasOpciones.append(
(
"auto_analysis",
"%s: %s" % (_("Automatic analysis"), mens),
Iconos.Analizar(),
)
)
liMasOpciones.append((None, None, None))
mens = _("cancel") if self.ask_movesdifferent else _("activate")
liMasOpciones.append(
(
"ask_movesdifferent",
"%s: %s"
% (_("Ask when the moves are different from the line"), mens),
Iconos.Pelicula_Seguir(),
)
)
liMasOpciones.append((None, None, True)) # Para salir del submenu
liMasOpciones.append((None, None, None))
liMasOpciones.append(
("run_analysis", _("Specific analysis"), Iconos.Analizar())
)
liMasOpciones.append((None, None, None))
liMasOpciones.append(
("add_line", _("Add this line"), Iconos.OpeningLines())
)
resp = self.utilidades(liMasOpciones)
if resp == "libros":
self.librosConsulta(False)
elif resp == "add_line":
numJugadas, nj, fila, siBlancas = self.jugadaActual()
partida = self.partida
if numJugadas != nj + 1:
menu = QTVarios.LCMenu(self.pantalla)
menu.opcion("all", _("Add all moves"), Iconos.PuntoAzul())
menu.separador()
menu.opcion(
"parcial", _("Add until current move"), Iconos.PuntoVerde()
)
resp = menu.lanza()
if resp is None:
return
if resp == "parcial":
partida = self.partida.copia(nj)
self.dbop.append(partida)
self.dbop.updateTrainingEngines()
QTUtil2.mensaje(self.pantalla, _("Done"))
elif resp == "auto_analysis":
self.auto_analysis = not self.auto_analysis
self.trainingEngines["AUTO_ANALYSIS"] = self.auto_analysis
self.dbop.setTrainingEngines(self.trainingEngines)
elif resp == "ask_movesdifferent":
self.ask_movesdifferent = not self.ask_movesdifferent
self.trainingEngines["ASK_MOVESDIFFERENT"] = self.ask_movesdifferent
self.dbop.setTrainingEngines(self.trainingEngines)
elif resp == "run_analysis":
self.um = None
self.mensEspera()
self.run_auto_analysis()
self.mensEspera(siFinal=True)
else:
Gestor.Gestor.rutinaAccionDef(self, clave)
def finalX(self):
return self.finPartida()
def finPartida(self):
self.dbop.close()
self.tablero.restoreVisual()
self.procesador.inicio()
self.procesador.openings()
return False
def reiniciar(self):
self.reinicio(self.dbop)
def ponFinJuego(self):
self.estado = kFinJuego
self.desactivaTodas()
liOpciones = [k_mainmenu]
if self.siAprobado:
liOpciones.append(k_siguiente)
liOpciones.append(k_peliculaRepetir)
else:
liOpciones.append(k_reiniciar)
liOpciones.append(k_configurar)
liOpciones.append(k_utilidades)
self.pantalla.ponToolBar(liOpciones)
class GestorOpeningLines(Gestor.Gestor):
def inicio(self, pathFichero, modo, num_linea):
self.tablero.saveVisual()
self.pathFichero = pathFichero
dbop = OpeningLines.Opening(pathFichero)
self.tablero.dbVisual_setFichero(dbop.nomFichero)
self.reinicio(dbop, modo, num_linea)
def reinicio(self, dbop, modo, num_linea):
self.dbop = dbop
self.tipoJuego = kJugOpeningLines
self.modo = modo
self.num_linea = num_linea
self.training = self.dbop.training()
self.liGames = self.training["LIGAMES_%s" % modo.upper()]
self.game = self.liGames[num_linea]
self.liPV = self.game["LIPV"]
self.numPV = len(self.liPV)
self.calc_totalTiempo()
self.dicFENm2 = self.training["DICFENM2"]
li = self.dbop.getNumLinesPV(self.liPV)
if len(li) > 10:
mensLines = ",".join(["%d" % line for line in li[:10]]) + ", ..."
else:
mensLines = ",".join(["%d" % line for line in li])
self.liMensBasic = [
"%d/%d" % (self.num_linea + 1, len(self.liGames)),
"%s: %s" % (_("Lines"), mensLines),
]
self.siAyuda = False
self.tablero.dbVisual_setShowAllways(False)
self.partida = Partida.Partida()
self.ayudas = 9999 # Para que analice sin problemas
self.siJugamosConBlancas = self.training["COLOR"] == "WHITE"
self.siRivalConBlancas = not self.siJugamosConBlancas
self.pantalla.ponToolBar((k_mainmenu, k_ayuda, k_reiniciar))
self.pantalla.activaJuego(True, False, siAyudas=False)
self.ponMensajero(self.mueveHumano)
self.ponPosicion(self.partida.ultPosicion)
self.mostrarIndicador(True)
self.quitaAyudas()
self.ponPiezasAbajo(self.siJugamosConBlancas)
self.pgnRefresh(True)
self.ponCapInfoPorDefecto()
self.estado = kJugando
self.ponPosicionDGT()
self.errores = 0
self.ini_time = time.time()
self.muestraInformacion()
self.siguienteJugada()
def calc_totalTiempo(self):
self.tm = 0
for game in self.liGames:
for tr in game["TRIES"]:
self.tm += tr["TIME"]
def ayuda(self):
self.siAyuda = True
self.pantalla.ponToolBar((k_mainmenu, k_reiniciar, k_configurar, k_utilidades))
self.tablero.dbVisual_setShowAllways(True)
self.muestraAyuda()
self.muestraInformacion()
def muestraInformacion(self):
li = []
li.append("%s: %d" % (_("Errors"), self.errores))
if self.siAyuda:
li.append(_("Help activated"))
self.ponRotulo1("\n".join(li))
tgm = 0
for tr in self.game["TRIES"]:
tgm += tr["TIME"]
mens = "\n" + "\n".join(self.liMensBasic)
mens += "\n%s:\n %s %s\n %s %s" % (
_("Working time"),
time.strftime("%H:%M:%S", time.gmtime(tgm)),
_("Current"),
time.strftime("%H:%M:%S", time.gmtime(self.tm)),
_("Total"),
)
self.ponRotulo2(mens)
if self.siAyuda:
dicNAGs = TrListas.dicNAGs()
mens3 = ""
fenM2 = self.partida.ultPosicion.fenM2()
reg = self.dbop.getfenvalue(fenM2)
if reg:
mens3 = reg.get("COMENTARIO", "")
ventaja = reg.get("VENTAJA", 0)
valoracion = reg.get("VALORACION", 0)
if ventaja:
mens3 += "\n %s" % dicNAGs[ventaja]
if valoracion:
mens3 += "\n %s" % dicNAGs[valoracion]
self.ponRotulo3(mens3 if mens3 else None)
def partidaTerminada(self, siCompleta):
self.estado = kFinJuego
tm = time.time() - self.ini_time
li = [_("Line finished.")]
if self.siAyuda:
li.append(_("Help activated"))
if self.errores > 0:
li.append("%s: %d" % (_("Errors"), self.errores))
if siCompleta:
mensaje = "\n".join(li)
self.mensajeEnPGN(mensaje)
dictry = {
"DATE": Util.hoy(),
"TIME": tm,
"AYUDA": self.siAyuda,
"ERRORS": self.errores,
}
self.game["TRIES"].append(dictry)
sinError = self.errores == 0 and not self.siAyuda
if siCompleta:
if sinError:
self.game["NOERROR"] += 1
noError = self.game["NOERROR"]
if self.modo == "sequential":
salto = 2 ** (noError + 1)
numGames = len(self.liGames)
for x in range(salto, numGames):
game = self.liGames[x]
if game["NOERROR"] != noError:
salto = x
break
liNuevo = self.liGames[1:salto]
liNuevo.append(self.game)
if numGames > salto:
liNuevo.extend(self.liGames[salto:])
self.training["LIGAMES_SEQUENTIAL"] = liNuevo
self.pantalla.ponToolBar((k_mainmenu, k_siguiente))
else:
self.pantalla.ponToolBar(
(k_mainmenu, k_reiniciar, k_configurar, k_utilidades)
)
else:
self.game["NOERROR"] -= 1
self.pantalla.ponToolBar(
(k_mainmenu, k_reiniciar, k_configurar, k_utilidades)
)
else:
if not sinError:
self.game["NOERROR"] -= 1
self.game["NOERROR"] = max(0, self.game["NOERROR"])
self.dbop.setTraining(self.training)
self.estado = kFinJuego
self.calc_totalTiempo()
self.muestraInformacion()
def muestraAyuda(self):
pv = self.liPV[len(self.partida)]
self.tablero.creaFlechaMov(pv[:2], pv[2:4], "mt80")
fenM2 = self.partida.ultPosicion.fenM2()
for pv1 in self.dicFENm2[fenM2]:
if pv1 != pv:
self.tablero.creaFlechaMov(pv1[:2], pv1[2:4], "ms40")
def procesarAccion(self, clave):
if clave == k_mainmenu:
self.finPartida()
elif clave == k_reiniciar:
self.reiniciar()
elif clave == k_configurar:
self.configurar(siSonidos=True)
elif clave == k_utilidades:
self.utilidades()
elif clave == k_siguiente:
self.reinicio(self.dbop, self.modo, self.num_linea)
elif clave == k_ayuda:
self.ayuda()
else:
Gestor.Gestor.rutinaAccionDef(self, clave)
def finalX(self):
return self.finPartida()
def finPartida(self):
self.dbop.close()
self.tablero.restoreVisual()
self.procesador.inicio()
if self.modo == "static":
self.procesador.openingsTrainingStatic(self.pathFichero)
else:
self.procesador.openings()
return False
def reiniciar(self):
if len(self.partida) > 0 and self.estado != kFinJuego:
self.partidaTerminada(False)
self.reinicio(self.dbop, self.modo, self.num_linea)
def siguienteJugada(self):
self.muestraInformacion()
if self.estado == kFinJuego:
return
self.estado = kJugando
self.siJuegaHumano = False
self.ponVista()
siBlancas = self.partida.ultPosicion.siBlancas
self.ponIndicador(siBlancas)
self.refresh()
siRival = siBlancas == self.siRivalConBlancas
numJugadas = len(self.partida)
if numJugadas >= self.numPV:
self.partidaTerminada(True)
return
pv = self.liPV[numJugadas]
if siRival:
self.desactivaTodas()
self.rmRival = XMotorRespuesta.RespuestaMotor(
"Apertura", self.siRivalConBlancas
)
self.rmRival.desde = pv[:2]
self.rmRival.hasta = pv[2:4]
self.rmRival.coronacion = pv[4:]
self.mueveRival(self.rmRival)
self.siguienteJugada()
else:
self.activaColor(siBlancas)
self.siJuegaHumano = True
if self.siAyuda:
self.muestraAyuda()
def mueveHumano(self, desde, hasta, coronacion=""):
jg = self.checkMueveHumano(desde, hasta, coronacion)
if not jg:
return False
pvSel = desde + hasta + coronacion
pvObj = self.liPV[len(self.partida)]
if pvSel != pvObj:
fenM2 = jg.posicionBase.fenM2()
li = self.dicFENm2.get(fenM2, [])
if pvSel in li:
mens = _(
"You have selected a correct move, but this line uses another one."
)
QTUtil2.mensajeTemporal(
self.pantalla, mens, 2, posicion="tb", background="#C3D6E8"
)
self.sigueHumano()
return False
self.errores += 1
mens = "%s: %d" % (_("Error"), self.errores)
QTUtil2.mensajeTemporal(
self.pantalla,
mens,
1.2,
posicion="ad",
background="#FF9B00",
pmImagen=Iconos.pmError(),
)
self.muestraInformacion()
self.sigueHumano()
return False
self.movimientosPiezas(jg.liMovs)
self.masJugada(jg, True)
self.siguienteJugada()
return True
def masJugada(self, jg, siNuestra):
self.partida.append_jg(jg)
if self.partida.pendienteApertura:
self.partida.asignaApertura()
self.ponFlechaSC(jg.desde, jg.hasta)
self.beepExtendido(siNuestra)
self.pgnRefresh(self.partida.ultPosicion.siBlancas)
self.refresh()
self.ponPosicionDGT()
def mueveRival(self, respMotor):
desde = respMotor.desde
hasta = respMotor.hasta
coronacion = respMotor.coronacion
siBien, mens, jg = Jugada.dameJugada(
self.partida.ultPosicion, desde, hasta, coronacion
)
if siBien:
self.partida.ultPosicion = jg.posicion
self.masJugada(jg, False)
self.movimientosPiezas(jg.liMovs, True)
self.error = ""
return True
else:
self.error = mens
return False
class GestorOpeningLinesPositions(Gestor.Gestor):
def inicio(self, pathFichero):
self.pathFichero = pathFichero
dbop = OpeningLines.Opening(pathFichero)
self.reinicio(dbop)
def reinicio(self, dbop):
self.dbop = dbop
self.tipoJuego = kJugOpeningLines
self.training = self.dbop.training()
self.liTrainPositions = self.training["LITRAINPOSITIONS"]
self.trposition = self.liTrainPositions[0]
self.tm = 0
for game in self.liTrainPositions:
for tr in game["TRIES"]:
self.tm += tr["TIME"]
self.liMensBasic = [
"%s: %d" % (_("Moves"), len(self.liTrainPositions)),
]
self.siAyuda = False
self.siSaltoAutomatico = True
cp = ControlPosicion.ControlPosicion()
cp.leeFen(self.trposition["FENM2"] + " 0 1")
self.partida = Partida.Partida(iniPosicion=cp)
self.ayudas = 9999 # Para que analice sin problemas
self.siJugamosConBlancas = self.training["COLOR"] == "WHITE"
self.siRivalConBlancas = not self.siJugamosConBlancas
self.pantalla.ponToolBar((k_mainmenu, k_ayuda, k_configurar))
self.pantalla.activaJuego(True, False, siAyudas=False)
self.ponMensajero(self.mueveHumano)
self.ponPosicion(cp)
self.mostrarIndicador(True)
self.quitaAyudas()
self.ponPiezasAbajo(self.siJugamosConBlancas)
self.pgnRefresh(True)
self.ponCapInfoPorDefecto()
self.estado = kJugando
self.ponPosicionDGT()
self.quitaInformacion()
self.errores = 0
self.ini_time = time.time()
self.muestraInformacion()
self.siguienteJugada()
def ayuda(self):
self.siAyuda = True
self.pantalla.ponToolBar((k_mainmenu, k_configurar))
self.muestraAyuda()
self.muestraInformacion()
def muestraInformacion(self):
li = []
li.append("%s: %d" % (_("Errors"), self.errores))
if self.siAyuda:
li.append(_("Help activated"))
self.ponRotulo1("\n".join(li))
tgm = 0
for tr in self.trposition["TRIES"]:
tgm += tr["TIME"]
mas = time.time() - self.ini_time
mens = "\n" + "\n".join(self.liMensBasic)
mens += "\n%s:\n %s %s\n %s %s" % (
_("Working time"),
time.strftime("%H:%M:%S", time.gmtime(tgm + mas)),
_("Current"),
time.strftime("%H:%M:%S", time.gmtime(self.tm + mas)),
_("Total"),
)
self.ponRotulo2(mens)
def posicionTerminada(self):
tm = time.time() - self.ini_time
siSalta = self.siSaltoAutomatico and self.errores == 0 and self.siAyuda == False
if not siSalta:
li = [_("Finished.")]
if self.siAyuda:
li.append(_("Help activated"))
if self.errores > 0:
li.append("%s: %d" % (_("Errors"), self.errores))
QTUtil2.mensajeTemporal(self.pantalla, "\n".join(li), 1.2)
dictry = {
"DATE": Util.hoy(),
"TIME": tm,
"AYUDA": self.siAyuda,
"ERRORS": self.errores,
}
self.trposition["TRIES"].append(dictry)
sinError = self.errores == 0 and not self.siAyuda
if sinError:
self.trposition["NOERROR"] += 1
else:
self.trposition["NOERROR"] = max(0, self.trposition["NOERROR"] - 1)
noError = self.trposition["NOERROR"]
salto = 2 ** (noError + 1) + 1
numPosics = len(self.liTrainPositions)
for x in range(salto, numPosics):
posic = self.liTrainPositions[x]
if posic["NOERROR"] != noError:
salto = x
break
liNuevo = self.liTrainPositions[1:salto]
liNuevo.append(self.trposition)
if numPosics > salto:
liNuevo.extend(self.liTrainPositions[salto:])
self.training["LITRAINPOSITIONS"] = liNuevo
self.pantalla.ponToolBar((k_mainmenu, k_siguiente, k_configurar))
self.dbop.setTraining(self.training)
self.estado = kFinJuego
self.muestraInformacion()
if siSalta:
self.reinicio(self.dbop)
def muestraAyuda(self):
liMoves = self.trposition["MOVES"]
for pv in liMoves:
self.tablero.creaFlechaMov(pv[:2], pv[2:4], "mt80")
def procesarAccion(self, clave):
if clave == k_mainmenu:
self.finPartida()
elif clave == k_configurar:
base = _("What to do after solving")
if self.siSaltoAutomatico:
liMasOpciones = [
("lmo_stop", "%s: %s" % (base, _("Stop")), Iconos.PuntoRojo())
]
else:
liMasOpciones = [
(
"lmo_jump",
"%s: %s" % (base, _("Jump to the next")),
Iconos.PuntoVerde(),
)
]
resp = self.configurar(
siSonidos=True, siCambioTutor=False, liMasOpciones=liMasOpciones
)
if resp in ("lmo_stop", "lmo_jump"):
self.siSaltoAutomatico = resp == "lmo_jump"
elif clave == k_utilidades:
self.utilidades()
elif clave == k_siguiente:
self.reinicio(self.dbop)
elif clave == k_ayuda:
self.ayuda()
else:
Gestor.Gestor.rutinaAccionDef(self, clave)
def finalX(self):
return self.finPartida()
def finPartida(self):
self.dbop.close()
self.procesador.inicio()
self.procesador.openings()
return False
def siguienteJugada(self):
self.muestraInformacion()
if self.estado == kFinJuego:
return
self.estado = kJugando
self.siJuegaHumano = False
self.ponVista()
siBlancas = self.partida.ultPosicion.siBlancas
self.ponIndicador(siBlancas)
self.refresh()
self.activaColor(siBlancas)
self.siJuegaHumano = True
if self.siAyuda:
self.muestraAyuda()
def mueveHumano(self, desde, hasta, coronacion=""):
jg = self.checkMueveHumano(desde, hasta, coronacion)
if not jg:
return False
pvSel = desde + hasta + coronacion
lipvObj = self.trposition["MOVES"]
if pvSel not in lipvObj:
self.errores += 1
mens = "%s: %d" % (_("Error"), self.errores)
QTUtil2.mensajeTemporal(
self.pantalla, mens, 2, posicion="ad", background="#FF9B00"
)
self.muestraInformacion()
self.sigueHumano()
return False
self.movimientosPiezas(jg.liMovs)
self.masJugada(jg, True)
self.posicionTerminada()
return True
def masJugada(self, jg, siNuestra):
self.partida.append_jg(jg)
if self.partida.pendienteApertura:
self.partida.asignaApertura()
self.ponFlechaSC(jg.desde, jg.hasta)
self.beepExtendido(siNuestra)
self.pgnRefresh(self.partida.ultPosicion.siBlancas)
self.refresh()
self.ponPosicionDGT()
|
midifile | parser | # Python midifile package -- parse, load and play MIDI files.
# Copyright (c) 2011 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
midifile.parser -- parses MIDI file data.
This is a simple module that can parse data from a MIDI file and
its tracks.
A basic event factory returns the MIDI events as simple named tuples,
but you can subclass the event factory for more sophisticated behaviour.
Runs with Python 2.6, 2.7 and 3.
"""
import struct
import sys
from . import event
unpack_midi_header = struct.Struct(b">hhh").unpack
unpack_int = struct.Struct(b">i").unpack
def get_chunks(s):
"""Splits a MIDI file bytes string into chunks.
Yields (b'Name', b'data') tuples.
"""
pos = 0
while pos < len(s):
name = s[pos : pos + 4]
(size,) = unpack_int(s[pos + 4 : pos + 8])
yield name, s[pos + 8 : pos + 8 + size]
pos += size + 8
def parse_midi_data(s):
"""Parses MIDI file data from the bytes string s.
Returns a three tuple (format_type, time_division, tracks).
Every track is an unparsed bytes string.
May raise ValueError or IndexError in case of invalid MIDI data.
"""
chunks = get_chunks(s)
for name, data in chunks:
if name == b"MThd":
fmt, ntracks, division = unpack_midi_header(data[:6])
tracks = [data for name, data in chunks if name == b"MTrk"]
return fmt, division, tracks
break
raise ValueError("invalid midi data")
def read_var_len(s, pos):
"""Reads variable-length integer from byte string s starting on pos.
Returns the value and the new position.
"""
value = 0
while True:
i = s[pos]
pos += 1
value = value * 128 + (i & 0x7F)
if not i & 0x80:
return value, pos
def parse_midi_events(s, factory=None):
"""Parses the bytes string s (typically a track) for MIDI events.
If factory is given, it should be an EventFactory instance that
returns objects describing the event.
Yields two-tuples (delta, event).
Raises ValueError or IndexError on invalid MIDI data.
"""
if factory is None:
factory = event.EventFactory()
running_status = None
pos = 0
while pos < len(s):
delta, pos = read_var_len(s, pos)
status = s[pos]
if status & 0x80:
running_status = status
pos += 1
elif not running_status:
raise ValueError("invalid running status")
else:
status = running_status
ev_type = status >> 4
channel = status & 0x0F
if ev_type <= 0x0A:
# note on, off or aftertouch
note = s[pos]
value = s[pos + 1]
pos += 2
ev = factory.note_event(ev_type, channel, note, value)
elif ev_type >= 0x0F:
running_status = None
if status == 0xFF:
# meta event
meta_type = s[pos]
meta_size, pos = read_var_len(s, pos + 1)
meta_data = s[pos : pos + meta_size]
pos += meta_size
ev = factory.meta_event(meta_type, meta_data)
else:
# some sort of sysex
sysex_size, pos = read_var_len(s, pos)
sysex_data = s[pos : pos + sysex_size]
pos += sysex_size
ev = factory.sysex_event(status, sysex_data)
elif ev_type == 0x0E:
# Pitch Bend
value = s[pos] + s[pos + 1] * 128
pos += 2
ev = factory.pitchbend_event(channel, value)
elif ev_type == 0xD:
# Channel AfterTouch
value = s[pos]
pos += 1
ev = factory.channelaftertouch_event(channel, value)
elif ev_type == 0xB:
# Controller
number = s[pos]
value = s[pos + 1]
pos += 2
ev = factory.controller_event(channel, number, value)
else: # ev_type == 0xC
# Program Change
number = s[pos]
pos += 1
ev = factory.programchange_event(channel, number)
yield delta, ev
def time_events(track, time=0):
"""Yields two-tuples (time, event).
The track is the generator returned by parse_midi_events,
the time is accumulated from the given starting time (defaulting to 0).
"""
for delta, ev in track:
time += delta
yield time, ev
def time_events_grouped(track, time=0):
"""Yields two-tuples (time, event_list).
Every event_list is a Python list of all events happening on that time.
The track is the generator returned by parse_midi_events,
the time is accumulated from the given starting time (defaulting to 0).
"""
evs = []
for delta, ev in track:
if delta:
if evs:
yield time, evs
evs = []
time += delta
evs.append(ev)
if evs:
yield time, evs
if __name__ == "__main__":
"""Test specified MIDI files."""
import sys
files = sys.argv[1:]
for f in files:
with open(f, "rb") as midifile:
s = midifile.read()
ftm, div, tracks = parse_midi_data(s)
try:
for t in tracks:
print(list(parse_midi_events(t)))
except Exception as e:
print("error in:", f)
print(e)
|
Drawing | InitGui | # Drawing gui init module
# (c) 2003 Juergen Riegel
#
# Gathering all the information to start FreeCAD
# This is the second one of three init scripts, the third one
# runs when the gui is up
# ***************************************************************************
# * (c) Juergen Riegel (juergen.riegel@web.de) 2002
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * FreeCAD is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Lesser General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with FreeCAD; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# * Juergen Riegel 2002 *
# ***************************************************************************/
class DrawingWorkbench(Workbench):
"Drawing workbench object"
def __init__(self):
self.__class__.Icon = (
FreeCAD.getResourceDir()
+ "Mod/Drawing/Resources/icons/DrawingWorkbench.svg"
)
self.__class__.MenuText = "Drawing"
self.__class__.ToolTip = "Drawing workbench"
def Initialize(self):
# load the module
import DrawingGui
def Activated(self):
FreeCAD.Console.PrintWarning(
"Drawing became obsolete in 0.17; " "consider using TechDraw instead.\n"
)
def GetClassName(self):
return "DrawingGui::Workbench"
Gui.addWorkbench(DrawingWorkbench())
# Append the open handler
FreeCAD.addImportType("Drawing (*.svg *.svgz)", "DrawingGui")
FreeCAD.addExportType("Drawing (*.svg *.svgz *.dxf)", "DrawingGui")
|
widgets | inputpane | #
# Copyright (C) 2011 Nick Lanham <nick@afternight.org>
# Copyright (C) 2008-2009 Ido Abramovich <ido.deluge@gmail.com>
# Copyright (C) 2009 Andrew Resch <andrewresch@gmail.com>
#
# This file is part of Deluge and is licensed under GNU General Public License 3.0, or later, with
# the additional special exception to link portions of this program with the OpenSSL library.
# See LICENSE for more details.
#
import logging
from deluge.decorators import overrides
from deluge.ui.console.modes.basemode import InputKeyHandler, move_cursor
from deluge.ui.console.utils import curses_util as util
from deluge.ui.console.widgets.fields import (
CheckedInput,
CheckedPlusInput,
ComboInput,
DividerField,
FloatSpinInput,
Header,
InfoField,
IntSpinInput,
NoInputField,
SelectInput,
TextArea,
TextField,
TextInput,
)
try:
import curses
except ImportError:
pass
log = logging.getLogger(__name__)
class BaseInputPane(InputKeyHandler):
def __init__(
self,
mode,
allow_rearrange=False,
immediate_action=False,
set_first_input_active=True,
border_off_west=0,
border_off_north=0,
border_off_east=0,
border_off_south=0,
active_wrap=False,
**kwargs,
):
InputKeyHandler.__init__(self)
self.inputs = []
self.mode = mode
self.active_input = 0
self.set_first_input_active = set_first_input_active
self.allow_rearrange = allow_rearrange
self.immediate_action = immediate_action
self.move_active_many = 4
self.active_wrap = active_wrap
self.lineoff = 0
self.border_off_west = border_off_west
self.border_off_north = border_off_north
self.border_off_east = border_off_east
self.border_off_south = border_off_south
self.last_lineoff_move = 0
if not hasattr(self, "visible_content_pane_height"):
log.error(
'The class "%s" does not have the attribute "%s" required by super class "%s"',
self.__class__.__name__,
"visible_content_pane_height",
BaseInputPane.__name__,
)
raise AttributeError("visible_content_pane_height")
@property
def visible_content_pane_width(self):
return self.mode.width
def add_spaces(self, num):
string = ""
for i in range(num):
string += "\n"
self.add_text_area("space %d" % len(self.inputs), string)
def add_text(self, string):
self.add_text_area("", string)
def move(self, r, c):
self._cursor_row = r
self._cursor_col = c
def get_input(self, name):
for e in self.inputs:
if e.name == name:
return e
def _add_input(self, input_element):
for e in self.inputs:
if isinstance(e, NoInputField):
continue
if e.name == input_element.name:
import traceback
log.warning(
'Input element with name "%s" already exists in input pane (%s):\n%s',
input_element.name,
e,
"".join(traceback.format_stack(limit=5)),
)
return
self.inputs.append(input_element)
if self.set_first_input_active and input_element.selectable():
self.active_input = len(self.inputs) - 1
self.set_first_input_active = False
return input_element
def add_header(self, header, space_above=False, space_below=False, **kwargs):
return self._add_input(Header(self, header, space_above, space_below, **kwargs))
def add_info_field(self, name, label, value):
return self._add_input(InfoField(self, name, label, value))
def add_text_field(self, name, message, selectable=True, col="+1", **kwargs):
return self._add_input(
TextField(self, name, message, selectable=selectable, col=col, **kwargs)
)
def add_text_area(self, name, message, **kwargs):
return self._add_input(TextArea(self, name, message, **kwargs))
def add_divider_field(self, name, message, **kwargs):
return self._add_input(DividerField(self, name, message, **kwargs))
def add_text_input(self, name, message, value="", col="+1", **kwargs):
"""
Add a text input field
:param message: string to display above the input field
:param name: name of the field, for the return callback
:param value: initial value of the field
:param complete: should completion be run when tab is hit and this field is active
"""
return self._add_input(
TextInput(
self,
name,
message,
self.move,
self.visible_content_pane_width,
value,
col=col,
**kwargs,
)
)
def add_select_input(self, name, message, opts, vals, default_index=0, **kwargs):
return self._add_input(
SelectInput(self, name, message, opts, vals, default_index, **kwargs)
)
def add_checked_input(self, name, message, checked=False, col="+1", **kwargs):
return self._add_input(
CheckedInput(self, name, message, checked=checked, col=col, **kwargs)
)
def add_checkedplus_input(
self, name, message, child, checked=False, col="+1", **kwargs
):
return self._add_input(
CheckedPlusInput(
self, name, message, child, checked=checked, col=col, **kwargs
)
)
def add_float_spin_input(self, name, message, value=0.0, col="+1", **kwargs):
return self._add_input(
FloatSpinInput(self, name, message, self.move, value, col=col, **kwargs)
)
def add_int_spin_input(self, name, message, value=0, col="+1", **kwargs):
return self._add_input(
IntSpinInput(self, name, message, self.move, value, col=col, **kwargs)
)
def add_combo_input(self, name, message, choices, col="+1", **kwargs):
return self._add_input(
ComboInput(self, name, message, choices, col=col, **kwargs)
)
@overrides(InputKeyHandler)
def handle_read(self, c):
if not self.inputs: # no inputs added yet
return util.ReadState.IGNORED
ret = self.inputs[self.active_input].handle_read(c)
if ret != util.ReadState.IGNORED:
if self.immediate_action:
self.immediate_action_cb(
state_changed=False if ret == util.ReadState.READ else True
)
return ret
ret = util.ReadState.READ
if c == curses.KEY_UP:
self.move_active_up(1)
elif c == curses.KEY_DOWN:
self.move_active_down(1)
elif c == curses.KEY_HOME:
self.move_active_up(len(self.inputs))
elif c == curses.KEY_END:
self.move_active_down(len(self.inputs))
elif c == curses.KEY_PPAGE:
self.move_active_up(self.move_active_many)
elif c == curses.KEY_NPAGE:
self.move_active_down(self.move_active_many)
elif c == util.KEY_ALT_AND_ARROW_UP:
self.lineoff = max(self.lineoff - 1, 0)
elif c == util.KEY_ALT_AND_ARROW_DOWN:
tot_height = self.get_content_height()
self.lineoff = min(
self.lineoff + 1, tot_height - self.visible_content_pane_height
)
elif c == util.KEY_CTRL_AND_ARROW_UP:
if not self.allow_rearrange:
return ret
val = self.inputs.pop(self.active_input)
self.active_input -= 1
self.inputs.insert(self.active_input, val)
if self.immediate_action:
self.immediate_action_cb(state_changed=True)
elif c == util.KEY_CTRL_AND_ARROW_DOWN:
if not self.allow_rearrange:
return ret
val = self.inputs.pop(self.active_input)
self.active_input += 1
self.inputs.insert(self.active_input, val)
if self.immediate_action:
self.immediate_action_cb(state_changed=True)
else:
ret = util.ReadState.IGNORED
return ret
def get_values(self):
vals = {}
for i, ipt in enumerate(self.inputs):
if not ipt.has_input():
continue
vals[ipt.name] = {
"value": ipt.get_value(),
"order": i,
"active": self.active_input == i,
}
return vals
def immediate_action_cb(self, state_changed=True):
pass
def move_active(self, direction, amount):
"""
direction == -1: Up
direction == 1: Down
"""
self.last_lineoff_move = direction * amount
if direction > 0:
if self.active_wrap:
limit = self.active_input - 1
if limit < 0:
limit = len(self.inputs) + limit
else:
limit = len(self.inputs) - 1
else:
limit = 0
if self.active_wrap:
limit = self.active_input + 1
def next_move(nc, direction, limit):
next_index = nc
while next_index != limit:
next_index += direction
if direction > 0:
next_index %= len(self.inputs)
elif next_index < 0:
next_index = len(self.inputs) + next_index
if self.inputs[next_index].selectable():
return next_index
if next_index == limit:
return nc
return nc
next_sel = self.active_input
for a in range(amount):
cur_sel = next_sel
next_sel = next_move(next_sel, direction, limit)
if cur_sel == next_sel:
tot_height = (
self.get_content_height()
+ self.border_off_north
+ self.border_off_south
)
if direction > 0:
self.lineoff = min(
self.lineoff + 1, tot_height - self.visible_content_pane_height
)
else:
self.lineoff = max(self.lineoff - 1, 0)
if next_sel is not None:
self.active_input = next_sel
def move_active_up(self, amount):
self.move_active(-1, amount)
if self.immediate_action:
self.immediate_action_cb(state_changed=False)
def move_active_down(self, amount):
self.move_active(1, amount)
if self.immediate_action:
self.immediate_action_cb(state_changed=False)
def get_content_height(self):
height = 0
for i, ipt in enumerate(self.inputs):
if ipt.depend_skip():
continue
height += ipt.height
return height
def ensure_active_visible(self):
start_row = 0
end_row = self.border_off_north
for i, ipt in enumerate(self.inputs):
if ipt.depend_skip():
continue
start_row = end_row
end_row += ipt.height
if i != self.active_input or not ipt.has_input():
continue
height = self.visible_content_pane_height
if end_row > height + self.lineoff:
self.lineoff += end_row - (
height + self.lineoff
) # Correct result depends on paranthesis
elif start_row < self.lineoff:
self.lineoff -= self.lineoff - start_row
break
def render_inputs(self, focused=False):
self._cursor_row = -1
self._cursor_col = -1
util.safe_curs_set(util.Curser.INVISIBLE)
self.ensure_active_visible()
crow = self.border_off_north
for i, ipt in enumerate(self.inputs):
if ipt.depend_skip():
continue
col = self.border_off_west
field_width = self.width - self.border_off_east - self.border_off_west
cursor_offset = self.border_off_west
if ipt.default_col != -1:
default_col = int(ipt.default_col)
if isinstance(ipt.default_col, "".__class__) and ipt.default_col[0] in [
"+",
"-",
]:
col += default_col
cursor_offset += default_col
field_width -= default_col # Increase to col must be reflected here
else:
col = default_col
crow += ipt.render(
self.screen,
crow,
width=field_width,
active=i == self.active_input,
focused=focused,
col=col,
cursor_offset=cursor_offset,
)
if self._cursor_row >= 0:
util.safe_curs_set(util.Curser.VERY_VISIBLE)
move_cursor(self.screen, self._cursor_row, self._cursor_col)
|
aliceVision | NormalIntegration | __version__ = "1.0"
from meshroom.core import desc
class NormalIntegration(desc.CommandLineNode):
commandLine = "aliceVision_normalIntegration {allParams}"
category = "Photometry"
documentation = """
TODO.
"""
inputs = [
desc.File(
name="inputPath",
label="Normal Maps Folder",
description="Path to the folder containing the normal maps and the masks.",
value="",
uid=[0],
),
desc.File(
name="sfmDataFile",
label="SfMData",
description="Input SfMData file.",
value="",
uid=[0],
),
desc.IntParam(
name="downscale",
label="Downscale Factor",
description="Downscale factor for faster results.",
value=1,
range=(1, 10, 1),
advanced=True,
uid=[0],
),
desc.ChoiceParam(
name="verboseLevel",
label="Verbose Level",
description="Verbosity level (fatal, error, warning, info, debug, trace).",
value="info",
values=["fatal", "error", "warning", "info", "debug", "trace"],
exclusive=True,
uid=[],
),
]
outputs = [
desc.File(
name="outputPath",
label="Output Path",
description="Path to the output folder.",
value=desc.Node.internalFolder,
uid=[],
)
]
|
gpodder | download | # -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# download.py -- Download queue management
# Thomas Perl <thp@perli.net> 2007-09-15
#
# Based on libwget.py (2005-10-29)
#
import glob
import logging
import mimetypes
import os
import os.path
import shutil
import threading
import time
import urllib.error
from abc import ABC, abstractmethod
import gpodder
import requests
from gpodder import registry, util
from requests.adapters import HTTPAdapter
from requests.exceptions import ConnectionError, HTTPError, RequestException
from requests.packages.urllib3.exceptions import MaxRetryError
from requests.packages.urllib3.util.retry import Retry
logger = logging.getLogger(__name__)
_ = gpodder.gettext
REDIRECT_RETRIES = 3
class CustomDownload(ABC):
"""abstract class for custom downloads. DownloadTask call retrieve_resume() on it"""
@property
@abstractmethod
def partial_filename(self):
"""
Full path to the temporary file actually being downloaded (downloaders
may not support setting a tempname).
"""
...
@partial_filename.setter
@abstractmethod
def partial_filename(self, val):
...
@abstractmethod
def retrieve_resume(self, tempname, reporthook):
"""
:param str tempname: temporary filename for the download
:param func(number, number, number) reporthook: callback for download progress (count, blockSize, totalSize)
:return dict(str, str), str: (headers, real_url)
"""
return {}, None
class CustomDownloader(ABC):
"""
abstract class for custom downloaders.
DownloadTask calls custom_downloader to get a CustomDownload
"""
@abstractmethod
def custom_downloader(self, config, episode):
"""
if this custom downloader has a custom download method (e.g. youtube-dl),
return a CustomDownload. Else return None
:param config: gpodder config (e.g. to get preferred video format)
:param model.PodcastEpisode episode: episode to download
:return CustomDownload: object used to download the episode
"""
return None
class ContentRange(object):
# Based on:
# http://svn.pythonpaste.org/Paste/WebOb/trunk/webob/byterange.py
#
# Copyright (c) 2007 Ian Bicking and Contributors
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
Represents the Content-Range header
This header is ``start-stop/length``, where stop and length can be
``*`` (represented as None in the attributes).
"""
def __init__(self, start, stop, length):
assert start >= 0, "Bad start: %r" % start
assert stop is None or (stop >= 0 and stop >= start), "Bad stop: %r" % stop
self.start = start
self.stop = stop
self.length = length
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, self)
def __str__(self):
if self.stop is None:
stop = "*"
else:
stop = self.stop + 1
if self.length is None:
length = "*"
else:
length = self.length
return "bytes %s-%s/%s" % (self.start, stop, length)
def __iter__(self):
"""
Mostly so you can unpack this, like:
start, stop, length = res.content_range
"""
return iter([self.start, self.stop, self.length])
@classmethod
def parse(cls, value):
"""
Parse the header. May return None if it cannot parse.
"""
if value is None:
return None
value = value.strip()
if not value.startswith("bytes "):
# Unparsable
return None
value = value[len("bytes ") :].strip()
if "/" not in value:
# Invalid, no length given
return None
range, length = value.split("/", 1)
if "-" not in range:
# Invalid, no range
return None
start, end = range.split("-", 1)
try:
start = int(start)
if end == "*":
end = None
else:
end = int(end)
if length == "*":
length = None
else:
length = int(length)
except ValueError:
# Parse problem
return None
if end is None:
return cls(start, None, length)
else:
return cls(start, end - 1, length)
class DownloadCancelledException(Exception):
pass
class DownloadNoURLException(Exception):
pass
class gPodderDownloadHTTPError(Exception):
def __init__(self, url, error_code, error_message):
self.url = url
self.error_code = error_code
self.error_message = error_message
class DownloadURLOpener:
# Sometimes URLs are not escaped correctly - try to fix them
# (see RFC2396; Section 2.4.3. Excluded US-ASCII Characters)
# FYI: The omission of "%" in the list is to avoid double escaping!
ESCAPE_CHARS = dict((ord(c), "%%%x" % ord(c)) for c in ' <>#"{}|\\^[]`')
def __init__(self, channel, max_retries=3):
super().__init__()
self.channel = channel
self.max_retries = max_retries
def init_session(self):
"""init a session with our own retry codes + retry count"""
# I add a few retries for redirects but it means that I will allow max_retries + REDIRECT_RETRIES
# if encountering max_retries connect and REDIRECT_RETRIES read for instance
retry_strategy = Retry(
total=self.max_retries + REDIRECT_RETRIES,
connect=self.max_retries,
read=self.max_retries,
redirect=max(REDIRECT_RETRIES, self.max_retries),
status=self.max_retries,
status_forcelist=Retry.RETRY_AFTER_STATUS_CODES.union(
(
408,
418,
504,
598,
599,
)
),
)
adapter = HTTPAdapter(max_retries=retry_strategy)
http = requests.Session()
http.mount("https://", adapter)
http.mount("http://", adapter)
return http
# The following is based on Python's urllib.py "URLopener.retrieve"
# Also based on http://mail.python.org/pipermail/python-list/2001-October/110069.html
def retrieve_resume(
self, url, filename, reporthook=None, data=None, disable_auth=False
):
"""Download files from an URL; return (headers, real_url)
Resumes a download if the local filename exists and
the server supports download resuming.
"""
current_size = 0
tfp = None
headers = {"User-agent": gpodder.user_agent}
if (
self.channel.auth_username or self.channel.auth_password
) and not disable_auth:
logger.debug('Authenticating as "%s"', self.channel.auth_username)
auth = (self.channel.auth_username, self.channel.auth_password)
else:
auth = None
if os.path.exists(filename):
try:
current_size = os.path.getsize(filename)
tfp = open(filename, "ab")
# If the file exists, then only download the remainder
if current_size > 0:
headers["Range"] = "bytes=%s-" % (current_size)
except:
logger.warning("Cannot resume download: %s", filename, exc_info=True)
tfp = None
current_size = 0
if tfp is None:
tfp = open(filename, "wb")
# Fix a problem with bad URLs that are not encoded correctly (bug 549)
url = url.translate(self.ESCAPE_CHARS)
session = self.init_session()
with session.get(
url, headers=headers, stream=True, auth=auth, timeout=gpodder.SOCKET_TIMEOUT
) as resp:
try:
resp.raise_for_status()
except HTTPError as e:
if auth is not None:
# Try again without authentication (bug 1296)
return self.retrieve_resume(url, filename, reporthook, data, True)
else:
raise gPodderDownloadHTTPError(url, resp.status_code, str(e))
headers = resp.headers
if current_size > 0:
# We told the server to resume - see if she agrees
# See RFC2616 (206 Partial Content + Section 14.16)
# XXX check status code here, too...
range = ContentRange.parse(headers.get("content-range", ""))
if range is None or range.start != current_size:
# Ok, that did not work. Reset the download
# TODO: seek and truncate if content-range differs from request
tfp.close()
tfp = open(filename, "wb")
current_size = 0
logger.warning("Cannot resume: Invalid Content-Range (RFC2616).")
result = headers, resp.url
bs = 1024 * 8
size = -1
read = current_size
blocknum = current_size // bs
if reporthook:
if "content-length" in headers:
size = int(headers["content-length"]) + current_size
reporthook(blocknum, bs, size)
for block in resp.iter_content(bs):
read += len(block)
tfp.write(block)
blocknum += 1
if reporthook:
reporthook(blocknum, bs, size)
tfp.close()
del tfp
# raise exception if actual size does not match content-length header
if size >= 0 and read < size:
raise urllib.error.ContentTooShortError(
"retrieval incomplete: got only %i out " "of %i bytes" % (read, size),
result,
)
return result
# end code based on urllib.py
class DefaultDownload(CustomDownload):
def __init__(self, config, episode, url):
self._config = config
self.__episode = episode
self._url = url
self.__partial_filename = None
@property
def partial_filename(self):
return self.__partial_filename
@partial_filename.setter
def partial_filename(self, val):
self.__partial_filename = val
def retrieve_resume(self, tempname, reporthook):
url = self._url
logger.info("Downloading %s", url)
max_retries = max(0, self._config.auto.retries)
downloader = DownloadURLOpener(self.__episode.channel, max_retries=max_retries)
self.partial_filename = tempname
# Retry the download on incomplete download (other retries are done by the Retry strategy)
for retry in range(max_retries + 1):
if retry > 0:
logger.info("Retrying download of %s (%d)", url, retry)
time.sleep(1)
try:
headers, real_url = downloader.retrieve_resume(
url, tempname, reporthook=reporthook
)
# If we arrive here, the download was successful
break
except urllib.error.ContentTooShortError as ctse:
if retry < max_retries:
logger.info("Content too short: %s - will retry.", url)
continue
raise
return (headers, real_url)
class DefaultDownloader(CustomDownloader):
@staticmethod
def custom_downloader(config, episode):
url = episode.url
# Resolve URL and start downloading the episode
res = registry.download_url.resolve(config, None, episode, False)
if res:
url = res
if url == episode.url:
# don't modify custom urls (#635 - vimeo breaks if * is unescaped)
url = url.strip()
url = util.iri_to_url(url)
return DefaultDownload(config, episode, url)
class DownloadQueueWorker(object):
def __init__(self, queue, exit_callback, continue_check_callback):
self.queue = queue
self.exit_callback = exit_callback
self.continue_check_callback = continue_check_callback
def __repr__(self):
return threading.current_thread().getName()
def run(self):
logger.info("Starting new thread: %s", self)
while True:
if not self.continue_check_callback(self):
return
task = self.queue.get_next() if self.queue.enabled else None
if not task:
logger.info("No more tasks for %s to carry out.", self)
break
logger.info("%s is processing: %s", self, task)
task.run()
task.recycle()
self.exit_callback(self)
class ForceDownloadWorker(object):
def __init__(self, task):
self.task = task
def __repr__(self):
return threading.current_thread().getName()
def run(self):
logger.info("Starting new thread: %s", self)
logger.info("%s is processing: %s", self, self.task)
self.task.run()
self.task.recycle()
class DownloadQueueManager(object):
def __init__(self, config, queue):
self._config = config
self.tasks = queue
self.worker_threads_access = threading.RLock()
self.worker_threads = []
def disable(self):
self.tasks.enabled = False
def enable(self):
self.tasks.enabled = True
self.__spawn_threads()
def __exit_callback(self, worker_thread):
with self.worker_threads_access:
self.worker_threads.remove(worker_thread)
def __continue_check_callback(self, worker_thread):
with self.worker_threads_access:
if (
len(self.worker_threads) > self._config.limit.downloads.concurrent
and self._config.limit.downloads.enabled
):
self.worker_threads.remove(worker_thread)
return False
else:
return True
def __spawn_threads(self):
"""Spawn new worker threads if necessary"""
if not self.tasks.enabled:
return
with self.worker_threads_access:
work_count = self.tasks.available_work_count()
if self._config.limit.downloads.enabled:
# always allow at least 1 download
spawn_limit = max(int(self._config.limit.downloads.concurrent), 1)
else:
spawn_limit = self._config.limit.downloads.concurrent_max
running = len(self.worker_threads)
logger.info(
"%r tasks to do, can start at most %r threads, %r threads currently running",
work_count,
spawn_limit,
running,
)
for i in range(0, min(work_count, spawn_limit - running)):
# We have to create a new thread here, there's work to do
logger.info("Starting new worker thread.")
worker = DownloadQueueWorker(
self.tasks, self.__exit_callback, self.__continue_check_callback
)
self.worker_threads.append(worker)
util.run_in_background(worker.run)
def update_max_downloads(self):
self.__spawn_threads()
def force_start_task(self, task):
with task:
if task.status in (task.QUEUED, task.PAUSED, task.CANCELLED, task.FAILED):
task.status = task.DOWNLOADING
worker = ForceDownloadWorker(task)
util.run_in_background(worker.run)
def queue_task(self, task):
"""Marks a task as queued"""
self.tasks.queue_task(task)
self.__spawn_threads()
def has_workers(self):
return len(self.worker_threads) > 0
class DownloadTask(object):
"""An object representing the download task of an episode
You can create a new download task like this:
task = DownloadTask(episode, gpodder.config.Config(CONFIGFILE))
task.status = DownloadTask.QUEUED
task.run()
While the download is in progress, you can access its properties:
task.total_size # in bytes
task.progress # from 0.0 to 1.0
task.speed # in bytes per second
str(task) # name of the episode
task.status # current status
task.status_changed # True if the status has been changed (see below)
task.url # URL of the episode being downloaded
task.podcast_url # URL of the podcast this download belongs to
task.episode # Episode object of this task
You can cancel a running download task by setting its status:
with task:
task.status = DownloadTask.CANCELLING
The task will then abort as soon as possible (due to the nature
of downloading data, this can take a while when the Internet is
busy).
The "status_changed" attribute gets set to True every time the
"status" attribute changes its value. After you get the value of
the "status_changed" attribute, it is always reset to False:
if task.status_changed:
new_status = task.status
# .. update the UI accordingly ..
Obviously, this also means that you must have at most *one*
place in your UI code where you check for status changes and
broadcast the status updates from there.
While the download is taking place and after the .run() method
has finished, you can get the final status to check if the download
was successful:
if task.status == DownloadTask.DONE:
# .. everything ok ..
elif task.status == DownloadTask.FAILED:
# .. an error happened, and the
# error_message attribute is set ..
print task.error_message
elif task.status == DownloadTask.PAUSED:
# .. user paused the download ..
elif task.status == DownloadTask.CANCELLED:
# .. user cancelled the download ..
The difference between cancelling and pausing a DownloadTask is
that the temporary file gets deleted when cancelling, but does
not get deleted when pausing.
Be sure to call .removed_from_list() on this task when removing
it from the UI, so that it can carry out any pending clean-up
actions (e.g. removing the temporary file when the task has not
finished successfully; i.e. task.status != DownloadTask.DONE).
The UI can call the method "notify_as_finished()" to determine if
this episode still has still to be shown as "finished" download
in a notification window. This will return True only the first time
it is called when the status is DONE. After returning True once,
it will always return False afterwards.
The same thing works for failed downloads ("notify_as_failed()").
"""
# Possible states this download task can be in
STATUS_MESSAGE = (
_("Queued"),
_("Queued"),
_("Downloading"),
_("Finished"),
_("Failed"),
_("Cancelling"),
_("Cancelled"),
_("Pausing"),
_("Paused"),
)
(
NEW,
QUEUED,
DOWNLOADING,
DONE,
FAILED,
CANCELLING,
CANCELLED,
PAUSING,
PAUSED,
) = list(range(9))
# Whether this task represents a file download or a device sync operation
ACTIVITY_DOWNLOAD, ACTIVITY_SYNCHRONIZE = list(range(2))
# Minimum time between progress updates (in seconds)
MIN_TIME_BETWEEN_UPDATES = 1.0
def __str__(self):
return self.__episode.title
def __enter__(self):
return self.__lock.acquire()
def __exit__(self, type, value, traceback):
self.__lock.release()
def __get_status(self):
return self.__status
def __set_status(self, status):
if status != self.__status:
self.__status_changed = True
self.__status = status
status = property(fget=__get_status, fset=__set_status)
def __get_status_changed(self):
if self.__status_changed:
self.__status_changed = False
return True
else:
return False
status_changed = property(fget=__get_status_changed)
def __get_activity(self):
return self.__activity
def __set_activity(self, activity):
self.__activity = activity
activity = property(fget=__get_activity, fset=__set_activity)
def __get_url(self):
return self.__episode.url
url = property(fget=__get_url)
def __get_podcast_url(self):
return self.__episode.channel.url
podcast_url = property(fget=__get_podcast_url)
def __get_episode(self):
return self.__episode
episode = property(fget=__get_episode)
def __get_downloader(self):
return self.__downloader
def __set_downloader(self, downloader):
# modifying the downloader will only have effect before the download is started
self.__downloader = downloader
downloader = property(fget=__get_downloader, fset=__set_downloader)
def can_queue(self):
return self.status in (self.CANCELLED, self.PAUSED, self.FAILED)
def unpause(self):
with self:
# Resume a downloading task that was transitioning to paused
if self.status == self.PAUSING:
self.status = self.DOWNLOADING
def can_pause(self):
return self.status in (self.DOWNLOADING, self.QUEUED)
def pause(self):
with self:
# Pause a queued download
if self.status == self.QUEUED:
self.status = self.PAUSED
# Request pause of a running download
elif self.status == self.DOWNLOADING:
self.status = self.PAUSING
# download rate limited tasks sleep and take longer to transition from the PAUSING state to the PAUSED state
def can_cancel(self):
return self.status in (self.DOWNLOADING, self.QUEUED, self.PAUSED, self.FAILED)
def cancel(self):
with self:
# Cancelling directly is allowed if the task isn't currently downloading
if self.status in (self.QUEUED, self.PAUSED, self.FAILED):
self.status = self.CANCELLING
# Call run, so the partial file gets deleted, and task recycled
self.run()
# Otherwise request cancellation
elif self.status == self.DOWNLOADING:
self.status = self.CANCELLING
def can_remove(self):
return self.status in (self.CANCELLED, self.FAILED, self.DONE)
def delete_partial_files(self):
temporary_files = [self.tempname]
# youtube-dl creates .partial.* files for adaptive formats
temporary_files += glob.glob("%s.*" % self.tempname)
for tempfile in temporary_files:
util.delete_file(tempfile)
def removed_from_list(self):
if self.status != self.DONE:
self.delete_partial_files()
def __init__(self, episode, config, downloader=None):
assert episode.download_task is None
self.__lock = threading.RLock()
self.__status = DownloadTask.NEW
self.__activity = DownloadTask.ACTIVITY_DOWNLOAD
self.__status_changed = True
self.__episode = episode
self._config = config
# specify a custom downloader to be used for this download
self.__downloader = downloader
# Create the target filename and save it in the database
self.filename = self.__episode.local_filename(create=True)
self.tempname = self.filename + ".partial"
self.total_size = self.__episode.file_size
self.speed = 0.0
self.progress = 0.0
self.error_message = None
self.custom_downloader = None
# Have we already shown this task in a notification?
self._notification_shown = False
# Variables for speed limit and speed calculation
self.__start_time = 0
self.__start_blocks = 0
self.__limit_rate_value = self._config.limit.bandwidth.kbps
self.__limit_rate = self._config.limit.bandwidth.enabled
# Progress update functions
self._progress_updated = None
self._last_progress_updated = 0.0
# If the tempname already exists, set progress accordingly
if os.path.exists(self.tempname):
try:
already_downloaded = os.path.getsize(self.tempname)
if self.total_size > 0:
self.progress = max(
0.0, min(1.0, already_downloaded / self.total_size)
)
except OSError as os_error:
logger.error("Cannot get size for %s", os_error)
else:
# "touch self.tempname", so we also get partial
# files for resuming when the file is queued
open(self.tempname, "w").close()
# Store a reference to this task in the episode
episode.download_task = self
def reuse(self):
if not os.path.exists(self.tempname):
# partial file was deleted when cancelled, recreate it
open(self.tempname, "w").close()
def notify_as_finished(self):
if self.status == DownloadTask.DONE:
if self._notification_shown:
return False
else:
self._notification_shown = True
return True
return False
def notify_as_failed(self):
if self.status == DownloadTask.FAILED:
if self._notification_shown:
return False
else:
self._notification_shown = True
return True
return False
def add_progress_callback(self, callback):
self._progress_updated = callback
def status_updated(self, count, blockSize, totalSize):
# We see a different "total size" while downloading,
# so correct the total size variable in the thread
if totalSize != self.total_size and totalSize > 0:
self.total_size = float(totalSize)
if self.__episode.file_size != self.total_size:
logger.debug(
"Updating file size of %s to %s", self.filename, self.total_size
)
self.__episode.file_size = self.total_size
self.__episode.save()
if self.total_size > 0:
self.progress = max(0.0, min(1.0, count * blockSize / self.total_size))
if self._progress_updated is not None:
diff = time.time() - self._last_progress_updated
if diff > self.MIN_TIME_BETWEEN_UPDATES or self.progress == 1.0:
self._progress_updated(self.progress)
self._last_progress_updated = time.time()
self.calculate_speed(count, blockSize)
if self.status == DownloadTask.CANCELLING:
raise DownloadCancelledException()
if self.status == DownloadTask.PAUSING:
raise DownloadCancelledException()
def calculate_speed(self, count, blockSize):
if count % 5 == 0:
now = time.time()
if self.__start_time > 0:
# Has rate limiting been enabled or disabled?
if self.__limit_rate != self._config.limit.bandwidth.enabled:
# If it has been enabled then reset base time and block count
if self._config.limit.bandwidth.enabled:
self.__start_time = now
self.__start_blocks = count
self.__limit_rate = self._config.limit.bandwidth.enabled
# Has the rate been changed and are we currently limiting?
if (
self.__limit_rate_value != self._config.limit.bandwidth.kbps
and self.__limit_rate
):
self.__start_time = now
self.__start_blocks = count
self.__limit_rate_value = self._config.limit.bandwidth.kbps
passed = now - self.__start_time
if passed > 0:
speed = ((count - self.__start_blocks) * blockSize) / passed
else:
speed = 0
else:
self.__start_time = now
self.__start_blocks = count
passed = now - self.__start_time
speed = count * blockSize
self.speed = float(speed)
if (
self._config.limit.bandwidth.enabled
and speed > self._config.limit.bandwidth.kbps
):
# calculate the time that should have passed to reach
# the desired download rate and wait if necessary
should_have_passed = (
(count - self.__start_blocks)
* blockSize
/ (self._config.limit.bandwidth.kbps * 1024.0)
)
if should_have_passed > passed:
# sleep a maximum of 10 seconds to not cause time-outs
delay = min(10.0, float(should_have_passed - passed))
time.sleep(delay)
def recycle(self):
if self.status not in (self.FAILED, self.PAUSED):
self.episode.download_task = None
def set_episode_download_task(self):
if not self.episode.download_task:
self.episode.download_task = self
def run(self):
# Speed calculation (re-)starts here
self.__start_time = 0
self.__start_blocks = 0
# If the download has already been cancelled/paused, skip it
with self:
if self.status == DownloadTask.CANCELLING:
self.status = DownloadTask.CANCELLED
self.__episode._download_error = None
self.delete_partial_files()
self.progress = 0.0
self.speed = 0.0
self.recycle()
return False
if self.status == DownloadTask.PAUSING:
self.status = DownloadTask.PAUSED
return False
# We only start this download if its status is downloading
if self.status != DownloadTask.DOWNLOADING:
return False
# We are downloading this file right now
self._notification_shown = False
# Restore a reference to this task in the episode
# when running a recycled task following a pause or failed
# see #649
self.set_episode_download_task()
url = self.__episode.url
result = DownloadTask.DOWNLOADING
try:
if url == "":
raise DownloadNoURLException()
if self.downloader:
downloader = self.downloader.custom_downloader(
self._config, self.episode
)
else:
downloader = registry.custom_downloader.resolve(
self._config, None, self.episode
)
if downloader:
logger.info("Downloading %s with %s", url, downloader)
else:
downloader = DefaultDownloader.custom_downloader(
self._config, self.episode
)
self.custom_downloader = downloader
headers, real_url = downloader.retrieve_resume(
self.tempname, self.status_updated
)
new_mimetype = headers.get("content-type", self.__episode.mime_type)
old_mimetype = self.__episode.mime_type
_basename, ext = os.path.splitext(self.filename)
if new_mimetype != old_mimetype or util.wrong_extension(ext):
logger.info("Updating mime type: %s => %s", old_mimetype, new_mimetype)
old_extension = self.__episode.extension()
self.__episode.mime_type = new_mimetype
# don't call local_filename because we'll get the old download name
new_extension = self.__episode.extension(may_call_local_filename=False)
# If the desired filename extension changed due to the new
# mimetype, we force an update of the local filename to fix the
# extension.
if old_extension != new_extension or util.wrong_extension(ext):
self.filename = self.__episode.local_filename(
create=True, force_update=True
)
# In some cases, the redirect of a URL causes the real filename to
# be revealed in the final URL (e.g. http://gpodder.org/bug/1423)
if real_url != url and not util.is_known_redirecter(real_url):
realname, realext = util.filename_from_url(real_url)
# Only update from redirect if the redirected-to filename has
# a proper extension (this is needed for e.g. YouTube)
if not util.wrong_extension(realext):
real_filename = "".join((realname, realext))
self.filename = self.__episode.local_filename(
create=True, force_update=True, template=real_filename
)
logger.info(
"Download was redirected (%s). New filename: %s",
real_url,
os.path.basename(self.filename),
)
# Look at the Content-disposition header; use if if available
disposition_filename = util.get_header_param(
headers, "filename", "content-disposition"
)
# Some servers do send the content-disposition header, but provide
# an empty filename, resulting in an empty string here (bug 1440)
if disposition_filename is not None and disposition_filename != "":
# The server specifies a download filename - try to use it
# filename_from_url to remove query string; see #591
fn, ext = util.filename_from_url(disposition_filename)
logger.debug(
"converting disposition filename '%s' to local filename '%s%s'",
disposition_filename,
fn,
ext,
)
disposition_filename = fn + ext
self.filename = self.__episode.local_filename(
create=True, force_update=True, template=disposition_filename
)
new_mimetype, encoding = mimetypes.guess_type(self.filename)
if new_mimetype is not None:
logger.info("Using content-disposition mimetype: %s", new_mimetype)
self.__episode.mime_type = new_mimetype
# Re-evaluate filename and tempname to take care of podcast renames
# while downloads are running (which will change both file names)
self.filename = self.__episode.local_filename(create=False)
self.tempname = os.path.join(
os.path.dirname(self.filename), os.path.basename(self.tempname)
)
shutil.move(self.tempname, self.filename)
# Model- and database-related updates after a download has finished
self.__episode.on_downloaded(self.filename)
except DownloadCancelledException:
logger.info("Download has been cancelled/paused: %s", self)
if self.status == DownloadTask.CANCELLING:
self.__episode._download_error = None
self.delete_partial_files()
self.progress = 0.0
self.speed = 0.0
result = DownloadTask.CANCELLED
except DownloadNoURLException:
result = DownloadTask.FAILED
self.error_message = _("Episode has no URL to download")
except urllib.error.ContentTooShortError as ctse:
result = DownloadTask.FAILED
self.error_message = _("Missing content from server")
except ConnectionError as ce:
# special case request exception
result = DownloadTask.FAILED
logger.error("Download failed: %s", str(ce))
d = {"host": ce.args[0].pool.host, "port": ce.args[0].pool.port}
self.error_message = _("Couldn't connect to server %(host)s:%(port)s" % d)
except RequestException as re:
# extract MaxRetryError to shorten the exception message
if isinstance(re.args[0], MaxRetryError):
re = re.args[0]
logger.error('%s while downloading "%s"', str(re), self.__episode.title)
result = DownloadTask.FAILED
d = {"error": str(re)}
self.error_message = _("Request Error: %(error)s") % d
except IOError as ioe:
logger.error(
'%s while downloading "%s": %s',
ioe.strerror,
self.__episode.title,
ioe.filename,
)
result = DownloadTask.FAILED
d = {"error": ioe.strerror, "filename": ioe.filename}
self.error_message = _("I/O Error: %(error)s: %(filename)s") % d
except gPodderDownloadHTTPError as gdhe:
logger.error(
'HTTP %s while downloading "%s": %s',
gdhe.error_code,
self.__episode.title,
gdhe.error_message,
)
result = DownloadTask.FAILED
d = {"code": gdhe.error_code, "message": gdhe.error_message}
self.error_message = _("HTTP Error %(code)s: %(message)s") % d
except Exception as e:
result = DownloadTask.FAILED
logger.error("Download failed: %s", str(e), exc_info=True)
self.error_message = _("Error: %s") % (str(e),)
with self:
if result == DownloadTask.DOWNLOADING:
# Everything went well - we're done (even if the task was cancelled/paused,
# since it's finished we might as well mark it done)
self.status = DownloadTask.DONE
if self.total_size <= 0:
self.total_size = util.calculate_size(self.filename)
logger.info("Total size updated to %d", self.total_size)
self.progress = 1.0
gpodder.user_extensions.on_episode_downloaded(self.__episode)
return True
self.speed = 0.0
if result == DownloadTask.FAILED:
self.status = DownloadTask.FAILED
self.__episode._download_error = self.error_message
# cancelled/paused -- update state to mark it as safe to manipulate this task again
elif self.status == DownloadTask.PAUSING:
self.status = DownloadTask.PAUSED
elif self.status == DownloadTask.CANCELLING:
self.status = DownloadTask.CANCELLED
# We finished, but not successfully (at least not really)
return False
|
Models | MaterialBrandsModel | # Copyright (c) 2019 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
from cura.Machines.Models.BaseMaterialsModel import BaseMaterialsModel
from PyQt6.QtCore import Qt, pyqtSignal
from PyQt6.QtQml import QQmlEngine
from UM.Qt.ListModel import ListModel
class MaterialTypesModel(ListModel):
def __init__(self, parent=None):
super().__init__(parent)
QQmlEngine.setObjectOwnership(self, QQmlEngine.ObjectOwnership.CppOwnership)
self.addRoleName(Qt.ItemDataRole.UserRole + 1, "name")
self.addRoleName(Qt.ItemDataRole.UserRole + 2, "brand")
self.addRoleName(Qt.ItemDataRole.UserRole + 3, "colors")
class MaterialBrandsModel(BaseMaterialsModel):
extruderPositionChanged = pyqtSignal()
def __init__(self, parent=None):
super().__init__(parent)
QQmlEngine.setObjectOwnership(self, QQmlEngine.ObjectOwnership.CppOwnership)
self.addRoleName(Qt.ItemDataRole.UserRole + 1, "name")
self.addRoleName(Qt.ItemDataRole.UserRole + 2, "material_types")
self._update()
def _update(self):
if not self._canUpdate():
return
super()._update()
brand_item_list = []
brand_group_dict = {}
# Part 1: Generate the entire tree of brands -> material types -> specific materials
for root_material_id, container_node in self._available_materials.items():
# Do not include the materials from a to-be-removed package
if bool(container_node.getMetaDataEntry("removed", False)):
continue
# Ignore materials that are marked as not visible for whatever reason
if not bool(container_node.getMetaDataEntry("visible", True)):
continue
# Add brands we haven't seen yet to the dict, skipping generics
brand = container_node.getMetaDataEntry("brand", "")
if brand.lower() == "generic":
continue
if brand not in brand_group_dict:
brand_group_dict[brand] = {}
# Add material types we haven't seen yet to the dict
material_type = container_node.getMetaDataEntry("material", "")
if material_type not in brand_group_dict[brand]:
brand_group_dict[brand][material_type] = []
# Now handle the individual materials
item = self._createMaterialItem(root_material_id, container_node)
if item:
brand_group_dict[brand][material_type].append(item)
# Part 2: Organize the tree into models
#
# Normally, the structure of the menu looks like this:
# Brand -> Material Type -> Specific Material
#
# To illustrate, a branded material menu may look like this:
# Ultimaker ┳ PLA ┳ Yellow PLA
# ┃ ┣ Black PLA
# ┃ ┗ ...
# ┃
# ┗ ABS ┳ White ABS
# ┗ ...
for brand, material_dict in brand_group_dict.items():
material_type_item_list = []
brand_item = {"name": brand, "material_types": MaterialTypesModel()}
for material_type, material_list in material_dict.items():
material_type_item = {
"name": material_type,
"brand": brand,
"colors": BaseMaterialsModel(),
}
# Sort materials by name
material_list = sorted(material_list, key=lambda x: x["name"].upper())
material_type_item["colors"].setItems(material_list)
material_type_item_list.append(material_type_item)
# Sort material type by name
material_type_item_list = sorted(
material_type_item_list, key=lambda x: x["name"].upper()
)
brand_item["material_types"].setItems(material_type_item_list)
brand_item_list.append(brand_item)
# Sort brand by name
brand_item_list = sorted(brand_item_list, key=lambda x: x["name"].upper())
self.setItems(brand_item_list)
|
chardet | utf8prober | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
from .charsetprober import CharSetProber
from .codingstatemachine import CodingStateMachine
from .mbcssm import UTF8SMModel
ONE_CHAR_PROB = 0.5
class UTF8Prober(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(UTF8SMModel)
self.reset()
def reset(self):
CharSetProber.reset(self)
self._mCodingSM.reset()
self._mNumOfMBChar = 0
def get_charset_name(self):
return "utf-8"
def feed(self, aBuf):
for c in aBuf:
codingState = self._mCodingSM.next_state(c)
if codingState == constants.eError:
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
if self._mCodingSM.get_current_charlen() >= 2:
self._mNumOfMBChar += 1
if self.get_state() == constants.eDetecting:
if self.get_confidence() > constants.SHORTCUT_THRESHOLD:
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
unlike = 0.99
if self._mNumOfMBChar < 6:
for i in range(0, self._mNumOfMBChar):
unlike = unlike * ONE_CHAR_PROB
return 1.0 - unlike
else:
return unlike
|
httpie | encoding | from typing import Tuple, Union
from charset_normalizer import from_bytes
from charset_normalizer.constant import TOO_SMALL_SEQUENCE
UTF8 = "utf-8"
ContentBytes = Union[bytearray, bytes]
def detect_encoding(content: ContentBytes) -> str:
"""
We default to UTF-8 if text too short, because the detection
can return a random encoding leading to confusing results
given the `charset_normalizer` version (< 2.0.5).
>>> too_short = ']"foo"'
>>> detected = from_bytes(too_short.encode()).best().encoding
>>> detected
'ascii'
>>> too_short.encode().decode(detected)
']"foo"'
"""
encoding = UTF8
if len(content) > TOO_SMALL_SEQUENCE:
match = from_bytes(bytes(content)).best()
if match:
encoding = match.encoding
return encoding
def smart_decode(content: ContentBytes, encoding: str) -> Tuple[str, str]:
"""Decode `content` using the given `encoding`.
If no `encoding` is provided, the best effort is to guess it from `content`.
Unicode errors are replaced.
"""
if not encoding:
encoding = detect_encoding(content)
return content.decode(encoding, "replace"), encoding
def smart_encode(content: str, encoding: str) -> bytes:
"""Encode `content` using the given `encoding`.
Unicode errors are replaced.
"""
return content.encode(encoding, "replace")
|
service | esiAccess | # noinspection PyPackageRequirements
import base64
import datetime
import hashlib
import json
import os
import secrets
import time
import uuid
from collections import namedtuple
from datetime import timedelta
from urllib.parse import urlencode
import config
from jose import jwt
from jose.exceptions import ExpiredSignatureError, JWTClaimsError, JWTError
from logbook import Logger
from requests import Session
from requests_cache import CachedSession
from service.const import EsiEndpoints, EsiSsoMode
from service.settings import EsiSettings, NetworkSettings
pyfalog = Logger(__name__)
scopes = [
"esi-skills.read_skills.v1",
"esi-fittings.read_fittings.v1",
"esi-fittings.write_fittings.v1",
]
ApiBase = namedtuple("ApiBase", ["sso", "esi"])
supported_servers = {
"Tranquility": ApiBase("login.eveonline.com", "esi.evetech.net"),
"Singularity": ApiBase("sisilogin.testeveonline.com", "esi.evetech.net"),
"Serenity": ApiBase("login.evepc.163.com", "esi.evepc.163.com"),
}
class GenericSsoError(Exception):
"""Exception used for generic SSO errors that aren't directly related to an API call"""
pass
class APIException(Exception):
"""Exception for API related errors"""
def __init__(self, url, code, json_response):
self.url = url
self.status_code = code
self.response = json_response
super(APIException, self).__init__(str(self))
def __str__(self):
if "error_description" in self.response:
return "HTTP Error %s: %s" % (
self.status_code,
self.response["error_description"],
)
elif "message" in self.response:
return "HTTP Error %s: %s" % (self.status_code, self.response["message"])
return "HTTP Error %s" % self.status_code
class EsiAccess:
def __init__(self):
self.settings = EsiSettings.getInstance()
self.server_base: ApiBase = supported_servers[self.settings.get("server")]
# session request stuff
self._session = Session()
self._basicHeaders = {
"Accept": "application/json",
"User-Agent": ("pyfa v{}".format(config.version)),
}
self._session.headers.update(self._basicHeaders)
self._session.proxies = (
NetworkSettings.getInstance().getProxySettingsInRequestsFormat()
)
# Set up cached session. This is only used for SSO meta data for now, but can be expanded to actually handle
# various ESI caching (using ETag, for example) in the future
cached_session = CachedSession(
os.path.join(config.savePath, config.ESI_CACHE),
backend="sqlite",
cache_control=True, # Use Cache-Control headers for expiration, if available
expire_after=timedelta(days=1), # Otherwise expire responses after one day
stale_if_error=True, # In case of request errors, use stale cache data if possible
)
cached_session.headers.update(self._basicHeaders)
cached_session.proxies = (
NetworkSettings.getInstance().getProxySettingsInRequestsFormat()
)
meta_call = cached_session.get(
"https://%s/.well-known/oauth-authorization-server" % self.server_base.sso
)
meta_call.raise_for_status()
self.server_meta = meta_call.json()
jwks_call = cached_session.get(self.server_meta["jwks_uri"])
jwks_call.raise_for_status()
self.jwks = jwks_call.json()
@property
def sso_url(self):
return "https://%s/v2" % self.server_base.sso
@property
def esi_url(self):
return "https://%s" % self.server_base.esi
@property
def oauth_authorize(self):
return self.server_meta["authorization_endpoint"]
@property
def oauth_token(self):
return self.server_meta["token_endpoint"]
@property
def client_id(self):
return self.settings.get("clientID") or config.API_CLIENT_ID
@staticmethod
def update_token(char, tokenResponse):
"""helper function to update token data from SSO response"""
char.accessToken = tokenResponse["access_token"]
char.accessTokenExpires = datetime.datetime.fromtimestamp(
time.time() + tokenResponse["expires_in"]
)
if "refresh_token" in tokenResponse:
char.refreshToken = config.cipher.encrypt(
tokenResponse["refresh_token"].encode()
)
def get_login_uri(self, redirect=None):
self.state = str(uuid.uuid4())
# Generate the PKCE code challenge
self.code_verifier = base64.urlsafe_b64encode(secrets.token_bytes(32))
m = hashlib.sha256()
m.update(self.code_verifier)
d = m.digest()
code_challenge = base64.urlsafe_b64encode(d).decode().replace("=", "")
state_arg = {
"mode": self.settings.get("loginMode"),
"redirect": redirect,
"state": self.state,
}
args = {
"response_type": "code",
"redirect_uri": config.SSO_CALLBACK,
"client_id": self.client_id,
"scope": " ".join(scopes),
"code_challenge": code_challenge,
"code_challenge_method": "S256",
"state": base64.b64encode(bytes(json.dumps(state_arg), "utf-8")),
}
return "%s?%s" % (self.oauth_authorize, urlencode(args))
def get_oauth_header(self, token):
"""Return the Bearer Authorization header required in oauth calls
:return: a dict with the authorization header
"""
return {"Authorization": "Bearer %s" % token}
def auth(self, code):
values = {
"grant_type": "authorization_code",
"code": code,
"client_id": self.client_id,
"code_verifier": self.code_verifier,
}
res = self.token_call(values)
json_res = res.json()
decoded_jwt = self.validate_eve_jwt(json_res["access_token"])
return json_res, decoded_jwt
def refresh(self, ssoChar):
# todo: properly handle invalid refresh token
values = {
"grant_type": "refresh_token",
"refresh_token": config.cipher.decrypt(ssoChar.refreshToken).decode(),
"client_id": self.client_id,
}
res = self.token_call(values)
json_res = res.json()
self.update_token(ssoChar, json_res)
return json_res
def token_call(self, values):
headers = {
"Content-Type": "application/x-www-form-urlencoded",
"Host": self.server_base.sso,
}
res = self._session.post(
self.server_meta["token_endpoint"],
data=values,
headers=headers,
)
if res.status_code != 200:
raise APIException(
self.server_meta["token_endpoint"], res.status_code, res.json()
)
return res
def validate_eve_jwt(self, jwt_token):
"""Validate a JWT token retrieved from the EVE SSO.
Ignores the `aud` claim in token due to avoid unexpected breaking
changes to ESI.
Args:
jwt_token: A JWT token originating from the EVE SSO
Returns
dict: The contents of the validated JWT token if there are no
validation errors
"""
try:
jwk_sets = self.jwks["keys"]
except KeyError as e:
raise GenericSsoError(
"Something went wrong when retrieving the JWK set. The returned "
"payload did not have the expected key {}. \nPayload returned "
"from the SSO looks like: {}".format(e, self.jwks)
)
jwk_set = next((item for item in jwk_sets if item["alg"] == "RS256"))
try:
return jwt.decode(
jwt_token,
jwk_set,
algorithms=jwk_set["alg"],
issuer=[self.server_base.sso, "https://%s" % self.server_base.sso],
# ignore "aud" claim: https://tweetfleet.slack.com/archives/C30KX8UUX/p1648495011905969
options={
"verify_aud": False,
"verify_exp": self.settings.get("enforceJwtExpiration"),
},
)
except ExpiredSignatureError as e:
raise GenericSsoError("The JWT token has expired: {}".format(str(e)))
except JWTError as e:
raise GenericSsoError("The JWT signature was invalid: {}".format(str(e)))
except JWTClaimsError as e:
raise GenericSsoError(
"The issuer claim was not from login.eveonline.com or "
"https://login.eveonline.com: {}".format(str(e))
)
def _before_request(self, ssoChar):
self._session.headers.clear()
self._session.headers.update(self._basicHeaders)
if ssoChar is None:
return
if ssoChar.is_token_expired():
pyfalog.info("Refreshing token for {}".format(ssoChar.characterName))
self.refresh(ssoChar)
if ssoChar.accessToken is not None:
self._session.headers.update(self.get_oauth_header(ssoChar.accessToken))
def _after_request(self, resp):
if "warning" in resp.headers:
pyfalog.warn("{} - {}".format(resp.headers["warning"], resp.url))
if resp.status_code >= 400:
raise APIException(resp.url, resp.status_code, resp.json())
return resp
def get(self, ssoChar, endpoint, **kwargs):
self._before_request(ssoChar)
endpoint = endpoint.format(**kwargs)
return self._after_request(
self._session.get("{}{}".format(self.esi_url, endpoint))
)
def post(self, ssoChar, endpoint, json, **kwargs):
self._before_request(ssoChar)
endpoint = endpoint.format(**kwargs)
return self._after_request(
self._session.post("{}{}".format(self.esi_url, endpoint), data=json)
)
def delete(self, ssoChar, endpoint, **kwargs):
self._before_request(ssoChar)
endpoint = endpoint.format(**kwargs)
return self._after_request(
self._session.delete("{}{}".format(self.esi_url, endpoint))
)
# todo: move these off to another class which extends this one. This class should only handle the low level
# authentication and
def getDynamicItem(self, typeID, itemID):
return self.get(
None, EsiEndpoints.DYNAMIC_ITEM.value, type_id=typeID, item_id=itemID
)
def getSkills(self, char):
return self.get(
char, EsiEndpoints.CHAR_SKILLS.value, character_id=char.characterID
)
def getSecStatus(self, char):
return self.get(char, EsiEndpoints.CHAR.value, character_id=char.characterID)
def getFittings(self, char):
return self.get(
char, EsiEndpoints.CHAR_FITTINGS.value, character_id=char.characterID
)
def postFitting(self, char, json_str):
# @todo: new fitting ID can be recovered from resp.data,
return self.post(
char,
EsiEndpoints.CHAR_FITTINGS.value,
json_str,
character_id=char.characterID,
)
def delFitting(self, char, fittingID):
return self.delete(
char,
EsiEndpoints.CHAR_DEL_FIT.value,
character_id=char.characterID,
fitting_id=fittingID,
)
|
tz | win | # This code was originally contributed by Jeffrey Harris.
import datetime
import struct
from six import text_type
from six.moves import winreg
try:
import ctypes
from ctypes import wintypes
except ValueError:
# ValueError is raised on non-Windows systems for some horrible reason.
raise ImportError("Running tzwin on non-Windows system")
from ._common import _tzinfo, tzname_in_python2, tzrangebase
__all__ = ["tzwin", "tzwinlocal", "tzres"]
ONEWEEK = datetime.timedelta(7)
TZKEYNAMENT = r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time Zones"
TZKEYNAME9X = r"SOFTWARE\Microsoft\Windows\CurrentVersion\Time Zones"
TZLOCALKEYNAME = r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation"
def _settzkeyname():
handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
try:
winreg.OpenKey(handle, TZKEYNAMENT).Close()
TZKEYNAME = TZKEYNAMENT
except WindowsError:
TZKEYNAME = TZKEYNAME9X
handle.Close()
return TZKEYNAME
TZKEYNAME = _settzkeyname()
class tzres(object):
"""
Class for accessing `tzres.dll`, which contains timezone name related
resources.
.. versionadded:: 2.5.0
"""
p_wchar = ctypes.POINTER(wintypes.WCHAR) # Pointer to a wide char
def __init__(self, tzres_loc="tzres.dll"):
# Load the user32 DLL so we can load strings from tzres
user32 = ctypes.WinDLL("user32")
# Specify the LoadStringW function
user32.LoadStringW.argtypes = (
wintypes.HINSTANCE,
wintypes.UINT,
wintypes.LPWSTR,
ctypes.c_int,
)
self.LoadStringW = user32.LoadStringW
self._tzres = ctypes.WinDLL(tzres_loc)
self.tzres_loc = tzres_loc
def load_name(self, offset):
"""
Load a timezone name from a DLL offset (integer).
>>> from dateutil.tzwin import tzres
>>> tzr = tzres()
>>> print(tzr.load_name(112))
'Eastern Standard Time'
:param offset:
A positive integer value referring to a string from the tzres dll.
..note:
Offsets found in the registry are generally of the form
`@tzres.dll,-114`. The offset in this case if 114, not -114.
"""
resource = self.p_wchar()
lpBuffer = ctypes.cast(ctypes.byref(resource), wintypes.LPWSTR)
nchar = self.LoadStringW(self._tzres._handle, offset, lpBuffer, 0)
return resource[:nchar]
def name_from_string(self, tzname_str):
"""
Parse strings as returned from the Windows registry into the time zone
name as defined in the registry.
>>> from dateutil.tzwin import tzres
>>> tzr = tzres()
>>> print(tzr.name_from_string('@tzres.dll,-251'))
'Dateline Daylight Time'
>>> print(tzr.name_from_string('Eastern Standard Time'))
'Eastern Standard Time'
:param tzname_str:
A timezone name string as returned from a Windows registry key.
:return:
Returns the localized timezone string from tzres.dll if the string
is of the form `@tzres.dll,-offset`, else returns the input string.
"""
if not tzname_str.startswith("@"):
return tzname_str
name_splt = tzname_str.split(",-")
try:
offset = int(name_splt[1])
except:
raise ValueError("Malformed timezone string.")
return self.load_name(offset)
class tzwinbase(tzrangebase):
"""tzinfo class based on win32's timezones available in the registry."""
def __init__(self):
raise NotImplementedError("tzwinbase is an abstract base class")
def __eq__(self, other):
# Compare on all relevant dimensions, including name.
if not isinstance(other, tzwinbase):
return NotImplemented
return (
self._std_offset == other._std_offset
and self._dst_offset == other._dst_offset
and self._stddayofweek == other._stddayofweek
and self._dstdayofweek == other._dstdayofweek
and self._stdweeknumber == other._stdweeknumber
and self._dstweeknumber == other._dstweeknumber
and self._stdhour == other._stdhour
and self._dsthour == other._dsthour
and self._stdminute == other._stdminute
and self._dstminute == other._dstminute
and self._std_abbr == other._std_abbr
and self._dst_abbr == other._dst_abbr
)
@staticmethod
def list():
"""Return a list of all time zones known to the system."""
with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle:
with winreg.OpenKey(handle, TZKEYNAME) as tzkey:
result = [
winreg.EnumKey(tzkey, i)
for i in range(winreg.QueryInfoKey(tzkey)[0])
]
return result
def display(self):
return self._display
def transitions(self, year):
"""
For a given year, get the DST on and off transition times, expressed
always on the standard time side. For zones with no transitions, this
function returns ``None``.
:param year:
The year whose transitions you would like to query.
:return:
Returns a :class:`tuple` of :class:`datetime.datetime` objects,
``(dston, dstoff)`` for zones with an annual DST transition, or
``None`` for fixed offset zones.
"""
if not self.hasdst:
return None
dston = picknthweekday(
year,
self._dstmonth,
self._dstdayofweek,
self._dsthour,
self._dstminute,
self._dstweeknumber,
)
dstoff = picknthweekday(
year,
self._stdmonth,
self._stddayofweek,
self._stdhour,
self._stdminute,
self._stdweeknumber,
)
# Ambiguous dates default to the STD side
dstoff -= self._dst_base_offset
return dston, dstoff
def _get_hasdst(self):
return self._dstmonth != 0
@property
def _dst_base_offset(self):
return self._dst_base_offset_
class tzwin(tzwinbase):
def __init__(self, name):
self._name = name
# multiple contexts only possible in 2.7 and 3.1, we still support 2.6
with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle:
tzkeyname = text_type("{kn}\{name}").format(kn=TZKEYNAME, name=name)
with winreg.OpenKey(handle, tzkeyname) as tzkey:
keydict = valuestodict(tzkey)
self._std_abbr = keydict["Std"]
self._dst_abbr = keydict["Dlt"]
self._display = keydict["Display"]
# See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm
tup = struct.unpack("=3l16h", keydict["TZI"])
stdoffset = -tup[0] - tup[1] # Bias + StandardBias * -1
dstoffset = stdoffset - tup[2] # + DaylightBias * -1
self._std_offset = datetime.timedelta(minutes=stdoffset)
self._dst_offset = datetime.timedelta(minutes=dstoffset)
# for the meaning see the win32 TIME_ZONE_INFORMATION structure docs
# http://msdn.microsoft.com/en-us/library/windows/desktop/ms725481(v=vs.85).aspx
(
self._stdmonth,
self._stddayofweek, # Sunday = 0
self._stdweeknumber, # Last = 5
self._stdhour,
self._stdminute,
) = tup[4:9]
(
self._dstmonth,
self._dstdayofweek, # Sunday = 0
self._dstweeknumber, # Last = 5
self._dsthour,
self._dstminute,
) = tup[12:17]
self._dst_base_offset_ = self._dst_offset - self._std_offset
self.hasdst = self._get_hasdst()
def __repr__(self):
return "tzwin(%s)" % repr(self._name)
def __reduce__(self):
return (self.__class__, (self._name,))
class tzwinlocal(tzwinbase):
def __init__(self):
with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle:
with winreg.OpenKey(handle, TZLOCALKEYNAME) as tzlocalkey:
keydict = valuestodict(tzlocalkey)
self._std_abbr = keydict["StandardName"]
self._dst_abbr = keydict["DaylightName"]
try:
tzkeyname = text_type("{kn}\{sn}").format(
kn=TZKEYNAME, sn=self._std_abbr
)
with winreg.OpenKey(handle, tzkeyname) as tzkey:
_keydict = valuestodict(tzkey)
self._display = _keydict["Display"]
except OSError:
self._display = None
stdoffset = -keydict["Bias"] - keydict["StandardBias"]
dstoffset = stdoffset - keydict["DaylightBias"]
self._std_offset = datetime.timedelta(minutes=stdoffset)
self._dst_offset = datetime.timedelta(minutes=dstoffset)
# For reasons unclear, in this particular key, the day of week has been
# moved to the END of the SYSTEMTIME structure.
tup = struct.unpack("=8h", keydict["StandardStart"])
(
self._stdmonth,
self._stdweeknumber, # Last = 5
self._stdhour,
self._stdminute,
) = tup[1:5]
self._stddayofweek = tup[7]
tup = struct.unpack("=8h", keydict["DaylightStart"])
(
self._dstmonth,
self._dstweeknumber, # Last = 5
self._dsthour,
self._dstminute,
) = tup[1:5]
self._dstdayofweek = tup[7]
self._dst_base_offset_ = self._dst_offset - self._std_offset
self.hasdst = self._get_hasdst()
def __repr__(self):
return "tzwinlocal()"
def __str__(self):
# str will return the standard name, not the daylight name.
return "tzwinlocal(%s)" % repr(self._std_abbr)
def __reduce__(self):
return (self.__class__, ())
def picknthweekday(year, month, dayofweek, hour, minute, whichweek):
"""dayofweek == 0 means Sunday, whichweek 5 means last instance"""
first = datetime.datetime(year, month, 1, hour, minute)
# This will work if dayofweek is ISO weekday (1-7) or Microsoft-style (0-6),
# Because 7 % 7 = 0
weekdayone = first.replace(day=((dayofweek - first.isoweekday()) % 7) + 1)
wd = weekdayone + ((whichweek - 1) * ONEWEEK)
if wd.month != month:
wd -= ONEWEEK
return wd
def valuestodict(key):
"""Convert a registry key's values to a dictionary."""
dout = {}
size = winreg.QueryInfoKey(key)[1]
tz_res = None
for i in range(size):
key_name, value, dtype = winreg.EnumValue(key, i)
if dtype == winreg.REG_DWORD or dtype == winreg.REG_DWORD_LITTLE_ENDIAN:
# If it's a DWORD (32-bit integer), it's stored as unsigned - convert
# that to a proper signed integer
if value & (1 << 31):
value = value - (1 << 32)
elif dtype == winreg.REG_SZ:
# If it's a reference to the tzres DLL, load the actual string
if value.startswith("@tzres"):
tz_res = tz_res or tzres()
value = tz_res.name_from_string(value)
value = value.rstrip("\x00") # Remove trailing nulls
dout[key_name] = value
return dout
|
context | resize | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013-2021 by Ihor E. Novikov
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
from sk1 import _, events
from sk1.pwidgets import RatioToggle, UnitSpin
from sk1.resources import get_bmp, icons
from .base import CtxPlugin
class ResizePlugin(CtxPlugin):
name = "ResizePlugin"
update_flag = False
width_spin = None
height_spin = None
keep_ratio = None
def __init__(self, app, parent):
CtxPlugin.__init__(self, app, parent)
events.connect(events.DOC_CHANGED, self.update)
events.connect(events.SELECTION_CHANGED, self.update)
def build(self):
bmp = get_bmp(self, icons.CTX_OBJECT_RESIZE, _("Selection size"))
self.pack(bmp, padding=2)
self.pack((2, 2))
self.width_spin = UnitSpin(self.app, self, onchange=self.w_changes)
self.pack(self.width_spin, padding=2)
self.pack(get_bmp(self, icons.CTX_W_ON_H), padding=1)
self.height_spin = UnitSpin(self.app, self, onchange=self.h_changes)
self.pack(self.height_spin, padding=2)
self.pack((2, 2))
self.keep_ratio = RatioToggle(self)
self.pack(self.keep_ratio, padding=2)
def update(self, *args):
if self.insp.is_selection():
bbox = self.app.current_doc.selection.bbox
w = bbox[2] - bbox[0]
h = bbox[3] - bbox[1]
self.update_flag = True
self.width_spin.set_point_value(w)
self.height_spin.set_point_value(h)
self.update_flag = False
def w_changes(self, *_args):
self.user_changes(True)
def h_changes(self, *_args):
self.user_changes(False)
def user_changes(self, hr=True):
if not self.update_flag and self.insp.is_selection():
doc = self.app.current_doc
bbox = doc.selection.bbox
w = bbox[2] - bbox[0]
h = bbox[3] - bbox[1]
center_x = bbox[0] + w / 2.0
center_y = bbox[1] + h / 2.0
new_w = self.width_spin.get_point_value()
new_h = self.height_spin.get_point_value()
if not w == new_w or not h == new_h:
if not new_w:
self.width_spin.set_point_value(w)
return
if not new_h:
self.height_spin.set_point_value(h)
return
m11 = new_w / w
m22 = new_h / h
if m11 == m22 == 1.0:
return
if self.keep_ratio.get_active():
if hr:
dx = center_x * (1 - m11)
dy = center_y * (1 - m11)
trafo = [m11, 0.0, 0.0, m11, dx, dy]
else:
dx = center_x * (1 - m22)
dy = center_y * (1 - m22)
trafo = [m22, 0.0, 0.0, m22, dx, dy]
else:
dx = center_x * (1 - m11)
dy = center_y * (1 - m22)
trafo = [m11, 0.0, 0.0, m22, dx, dy]
doc.api.transform_selected(trafo)
|
PartDesignTests | TestBoolean | # ***************************************************************************
# * Copyright (c) 2011 Juergen Riegel <FreeCAD@juergen-riegel.net> *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
import unittest
import FreeCAD
App = FreeCAD
class TestBoolean(unittest.TestCase):
def setUp(self):
self.Doc = FreeCAD.newDocument("PartDesignTestBoolean")
def testBooleanFuseCase(self):
self.Body = self.Doc.addObject("PartDesign::Body", "Body")
self.Box = self.Doc.addObject("PartDesign::AdditiveBox", "Box")
self.Box.Length = 10
self.Box.Width = 10
self.Box.Height = 10
self.Body.addObject(self.Box)
self.Doc.recompute()
self.Body001 = self.Doc.addObject("PartDesign::Body", "Body001")
self.Box001 = self.Doc.addObject("PartDesign::AdditiveBox", "Box001")
self.Box001.Length = 10
self.Box001.Width = 10
self.Box001.Height = 10
self.Box001.Placement.Base = App.Vector(-5, 0, 0)
self.Body001.addObject(self.Box001)
self.Doc.recompute()
self.BooleanFuse = self.Doc.addObject("PartDesign::Boolean", "BooleanFuse")
self.Body001.addObject(self.BooleanFuse)
self.Doc.recompute()
self.BooleanFuse.setObjects(
[
self.Body,
]
)
self.BooleanFuse.Type = 0
self.Doc.recompute()
self.assertAlmostEqual(self.BooleanFuse.Shape.Volume, 1500)
def testBooleanCutCase(self):
self.Body = self.Doc.addObject("PartDesign::Body", "Body")
self.Box = self.Doc.addObject("PartDesign::AdditiveBox", "Box")
self.Box.Length = 10
self.Box.Width = 10
self.Box.Height = 10
self.Body.addObject(self.Box)
self.Doc.recompute()
self.Body001 = self.Doc.addObject("PartDesign::Body", "Body001")
self.Box001 = self.Doc.addObject("PartDesign::AdditiveBox", "Box001")
self.Box001.Length = 10
self.Box001.Width = 10
self.Box001.Height = 10
self.Box001.Placement.Base = App.Vector(-5, 0, 0)
self.Body001.addObject(self.Box001)
self.Doc.recompute()
self.BooleanCut = self.Doc.addObject("PartDesign::Boolean", "BooleanCut")
self.Body001.addObject(self.BooleanCut)
self.Doc.recompute()
self.BooleanCut.setObjects(
[
self.Body,
]
)
self.BooleanCut.Type = 1
self.Doc.recompute()
self.assertAlmostEqual(self.BooleanCut.Shape.Volume, 500)
def testBooleanCommonCase(self):
self.Body = self.Doc.addObject("PartDesign::Body", "Body")
self.Box = self.Doc.addObject("PartDesign::AdditiveBox", "Box")
self.Box.Length = 10
self.Box.Width = 10
self.Box.Height = 10
self.Body.addObject(self.Box)
self.Doc.recompute()
self.Body001 = self.Doc.addObject("PartDesign::Body", "Body001")
self.Box001 = self.Doc.addObject("PartDesign::AdditiveBox", "Box001")
self.Box001.Length = 10
self.Box001.Width = 10
self.Box001.Height = 10
self.Box001.Placement.Base = App.Vector(-5, 0, 0)
self.Body001.addObject(self.Box001)
self.Doc.recompute()
self.BooleanCommon = self.Doc.addObject("PartDesign::Boolean", "BooleanCommon")
self.Body001.addObject(self.BooleanCommon)
self.Doc.recompute()
self.BooleanCommon.setObjects(
[
self.Body,
]
)
self.BooleanCommon.Type = 2
self.Doc.recompute()
self.assertAlmostEqual(self.BooleanCommon.Shape.Volume, 500)
def tearDown(self):
# closing doc
FreeCAD.closeDocument("PartDesignTestBoolean")
# print ("omit closing document for debugging")
|
CDDA-Game-Launcher | setup | #!/usr/bin/env python
import os
import os.path
import pathlib
import shutil
import subprocess
import sys
import winreg
from distutils.cmd import Command
from distutils.core import setup
from os import scandir
from pathlib import Path
from subprocess import DEVNULL, CalledProcessError, call, check_output
from zipfile import ZipFile
import httpx
try:
import txclib.commands
import txclib.utils
except ImportError:
print(
"Transifex client not found. Consider installing it (`pip install transifex-client`) "
"if you need to interact with Transifex.",
file=sys.stderr,
)
from babel.messages import frontend as babel
def get_setup_dir():
"""Return an absolute path to setup.py directory
Useful to find project files no matter where setup.py is invoked.
"""
try:
return get_setup_dir.setup_base_dir
except AttributeError:
get_setup_dir.setup_base_dir = pathlib.Path(__file__).absolute().parent
return get_setup_dir.setup_base_dir
def get_version():
with open(get_setup_dir() / "cddagl" / "VERSION") as version_file:
return version_file.read().strip()
def log(msg):
print(msg)
class ExtendedCommand(Command):
def run_other_command(self, command_name, **kwargs):
"""Runs another command with specified parameters."""
command = self.reinitialize_command(command_name)
vars(command).update(**kwargs)
command.ensure_finalized()
self.announce(f"running {command_name}", 2)
command.run()
return command
def include_requirements(target_path):
# Install packages from requirements.txt file into build dir
requirements_path = Path("requirements.txt")
subprocess.run(
[
"python",
"-m",
"pip",
"install",
"-r",
requirements_path,
"--target",
target_path,
]
)
# Clean __pycache__ directories
dir_list = []
dir_list.append(target_path)
while len(dir_list) > 0:
next_dir = dir_list.pop()
with os.scandir(next_dir) as it:
for entry in it:
if entry.name.startswith("."):
continue
if entry.is_dir():
if entry.name == "__pycache__":
shutil.rmtree(entry.path)
else:
dir_list.append(entry.path)
# Clean .dist-info directories
with os.scandir(target_path) as dir_it:
for entry in dir_it:
if entry.name.startswith(".") or not entry.is_dir():
continue
if entry.name.endswith(".dist-info"):
shutil.rmtree(entry.path)
class Bundle(ExtendedCommand):
description = "Bundle CDDAGL with Python"
user_options = []
def initialize_options(self):
self.locale_dir = os.path.join("cddagl", "locale")
def finalize_options(self):
pass
def run(self):
print("Bundling with Python for Windows")
src_package_path = Path("cddagl")
build_path = Path("build")
if build_path.is_dir():
shutil.rmtree(build_path)
build_path.mkdir(parents=True, exist_ok=True)
download_path = build_path.joinpath("downloads")
download_path.mkdir(parents=True, exist_ok=True)
# Download Python embeddable package
python_embed_url = (
"https://www.python.org/ftp/python/3.8.10/python-3.8.10-embed-amd64.zip"
)
python_embed_name = "python-3.8.10-embed-amd64.zip"
python_embed_archive = download_path.joinpath(python_embed_name)
try:
with open(python_embed_archive, "wb") as binary_file:
print(f"Downloading python archive {python_embed_name}...")
with httpx.stream("GET", python_embed_url) as http_stream:
if http_stream.status_code != 200:
print(
f"Cannot download python archive {python_embed_name}.\n"
f"Unexpected status code {http_stream.status_code}"
)
return False
for data in http_stream.iter_bytes():
binary_file.write(data)
except httpx.RequestError as exception:
print(f"Exception while downloading python archive. Exception: {exception}")
return False
archive_dir_path = build_path.joinpath("archive")
archive_dir_path.mkdir(parents=True, exist_ok=True)
# Extracting Python embeddable package
print(f"Extracting python archive {python_embed_name}...")
with ZipFile(python_embed_archive, "r") as zip_file:
zip_file.extractall(archive_dir_path)
# Add mo files for localization
self.run_other_command("compile_catalog")
# Copy package into archive dir
archive_package_path = archive_dir_path.joinpath("cddagl")
shutil.copytree(src_package_path, archive_package_path)
# Additional directories
src_data_path = Path("data")
target_data_path = archive_dir_path.joinpath("data")
shutil.copytree(src_data_path, target_data_path)
src_alembicrepo_path = Path("alembicrepo")
target_alembicrepo_path = archive_dir_path.joinpath("alembicrepo")
shutil.copytree(src_alembicrepo_path, target_alembicrepo_path)
include_requirements(archive_dir_path)
# Move pywin32_system32 dlls into archive root
pywin32_system32_path = archive_dir_path.joinpath("pywin32_system32")
with os.scandir(pywin32_system32_path) as it:
for entry in it:
if entry.is_file():
shutil.move(entry.path, archive_dir_path)
shutil.rmtree(pywin32_system32_path)
# Let's find and add unrar if available
try:
unrar_path = check_output(["where", "unrar.exe"], stderr=DEVNULL)
unrar_path = unrar_path.strip().decode("cp437")
shutil.copy(unrar_path, archive_dir_path)
except CalledProcessError:
log("'unrar.exe' couldn't be found.")
# Remove unneeded files in archive
paths_to_remove = [
["bin"],
["adodbapi"],
["pythonwin"],
["PyQt5", "Qt5", "qml"],
]
for path in paths_to_remove:
target_path = archive_dir_path.joinpath(*path)
if target_path.is_dir():
shutil.rmtree(target_path)
# Create batch file for starting the launcher easily
batch_file_path = archive_dir_path.joinpath("Launcher.bat")
with open(batch_file_path, "w", encoding="utf8") as batch_file:
batch_file.write(
"""
@echo off
start pythonw.exe -m cddagl
"""
)
class FreezeWithPyInstaller(ExtendedCommand):
description = "Build CDDAGL with PyInstaller"
user_options = [
("debug=", None, "Specify if we are using a debug build with PyInstaller.")
]
def initialize_options(self):
self.debug = None
self.locale_dir = os.path.join("cddagl", "locale")
def finalize_options(self):
pass
def run(self):
# -w for no console and -c for console
window_mode = "-c" if bool(self.debug) else "-w"
makespec_call = [
"pyi-makespec",
"-D",
window_mode,
"--noupx",
"--hidden-import=lxml.cssselect",
"--hidden-import=babel.numbers",
"--hidden-import=pkg_resources.py2_warn",
"cddagl\launcher.py",
"-i",
r"cddagl\resources\launcher.ico",
]
# Check if we have Windows Kits 10 path and add ucrt path
windowskit_path = None
try:
key = winreg.OpenKey(
winreg.HKEY_LOCAL_MACHINE,
r"SOFTWARE\Microsoft\Windows Kits\Installed Roots",
access=winreg.KEY_READ | winreg.KEY_WOW64_32KEY,
)
value = winreg.QueryValueEx(key, "KitsRoot10")
windowskit_path = value[0]
winreg.CloseKey(key)
except OSError:
pass
if windowskit_path is not None:
ucrt_path = windowskit_path + "Redist\\ucrt\\DLLs\\x86\\"
makespec_call.extend(("-p", ucrt_path))
# Additional files
added_files = [
("alembic", "alembic"),
("alembicrepo", "alembicrepo"),
("data", "data"),
("cddagl/resources", "cddagl/resources"),
("cddagl/VERSION", "cddagl"),
]
added_binaries = []
# Let's find and add unrar if available
try:
unrar_path = check_output(["where", "unrar.exe"], stderr=DEVNULL)
unrar_path = unrar_path.strip().decode("cp437")
added_files.append((unrar_path, "."))
except CalledProcessError:
log("'unrar.exe' couldn't be found.")
# Add mo files for localization
self.run_other_command("compile_catalog")
if os.path.isdir(self.locale_dir):
for entry in scandir(self.locale_dir):
if entry.is_dir():
mo_path = os.path.join(entry.path, "LC_MESSAGES", "cddagl.mo")
if os.path.isfile(mo_path):
mo_dir = os.path.dirname(mo_path).replace("\\", "/")
mo_path = mo_path.replace("\\", "/")
added_files.append((mo_path, mo_dir))
# Include additional files
for src, dest in added_files:
src_dest = src + ";" + dest
makespec_call.extend(("--add-data", src_dest))
for src, dest in added_binaries:
src_dest = src + ";" + dest
makespec_call.extend(("--add-binary", src_dest))
# Add debug build
if bool(self.debug):
makespec_call.extend(("-d", "all"))
# Call the makespec util
log(f"executing {makespec_call}")
call(makespec_call)
# Call pyinstaller
pyinstaller_call = ["pyinstaller"]
# Add debug info for PyInstaller
if bool(self.debug):
pyinstaller_call.append("--clean")
pyinstaller_call.extend(("--log-level", "DEBUG"))
pyinstaller_call.append("--noconfirm")
pyinstaller_call.append("launcher.spec")
log(f"executing {pyinstaller_call}")
call(pyinstaller_call)
class CreateInnoSetupInstaller(ExtendedCommand):
description = "Creates a Windows Installer for the project"
user_options = [
("compiler=", None, "Specify the path to Inno Setup Compiler (Compil32.exe)."),
]
def initialize_options(self):
self.compiler = r"C:\Program Files (x86)\Inno Setup 6\Compil32.exe"
def finalize_options(self):
if not pathlib.Path(self.compiler).exists():
raise Exception("Inno Setup Compiler (Compil32.exe) not found.")
def run(self):
#### Make sure we are running Inno Setup from the project directory
os.chdir(get_setup_dir())
self.run_other_command("bundle")
inno_call = [self.compiler, "/cc", "launcher.iss"]
log(f"executing {inno_call}")
call(inno_call)
class TransifexPull(ExtendedCommand):
description = "Download translated strings from Transifex service."
user_options = [
("reviewed-only", None, "Download only reviewed translations."),
]
def initialize_options(self):
self.reviewed_only = False
def finalize_options(self):
pass
def run(self):
### Make sure we are running the commands from project directory
os.chdir(get_setup_dir())
args = ["--no-interactive", "--all", "--force"]
if self.reviewed_only:
args.extend(["--mode", "onlyreviewed"])
else:
args.extend(["--mode", "onlytranslated"])
txclib.utils.DISABLE_COLORS = True
txclib.commands.cmd_pull(args, get_setup_dir())
self.run_other_command("compile_catalog")
class TransifexPush(Command):
description = "Push untranslated project strings to Transifex service."
user_options = [
(
"push-translations",
None,
"Push translations too, this will try to merge translations.",
),
]
def initialize_options(self):
self.push_translations = False
def finalize_options(self):
pass
def run(self):
### Make sure we are running the commands from project directory
os.chdir(get_setup_dir())
args = ["--no-interactive", "--source", "--force"]
if self.push_translations:
args.append("--translations")
txclib.utils.DISABLE_COLORS = True
txclib.commands.cmd_push(args, get_setup_dir())
class TransifexExtractPush(ExtendedCommand):
description = "Extract all translatable strings and push them to Transifex service."
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
### Make sure we are running the commands from project directory
os.chdir(get_setup_dir())
self.run_other_command("extract_messages")
self.run_other_command("translation_push")
class ExtractMessagesWithDefaults(babel.extract_messages):
def initialize_options(self):
super().initialize_options()
self.output_file = r"cddagl\locale\cddagl.pot"
self.mapping_file = r"cddagl\locale\mapping.cfg"
class UpdateCatalogWithDefaults(babel.update_catalog):
def initialize_options(self):
super().initialize_options()
self.input_file = r"cddagl\locale\cddagl.pot"
self.output_dir = r"cddagl\locale"
self.domain = "cddagl"
class CompileCatalogWithDefauls(babel.compile_catalog):
def initialize_options(self):
super().initialize_options()
self.directory = r"cddagl\locale"
self.domain = "cddagl"
self.use_fuzzy = True
class ExtractUpdateMessages(ExtendedCommand):
description = (
"Extract all project strings that require translation and update catalogs."
)
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
self.run_other_command("extract_messages")
self.run_other_command("update_catalog")
setup(
name="cddagl",
version=get_version(),
description=("A Cataclysm: Dark Days Ahead launcher with additional features"),
author="Rémy Roy",
author_email="remyroy@remyroy.com",
url="https://github.com/remyroy/CDDA-Game-Launcher",
packages=["cddagl"],
package_data={"cddagl": ["VERSION"]},
cmdclass={
### freeze & installer commands
"bundle": Bundle,
"freeze": FreezeWithPyInstaller,
"create_installer": CreateInnoSetupInstaller,
### babel commands
"extract_messages": ExtractMessagesWithDefaults,
"compile_catalog": CompileCatalogWithDefauls,
"init_catalog": babel.init_catalog,
"update_catalog": UpdateCatalogWithDefaults,
"exup_messages": ExtractUpdateMessages,
### transifex related commands
"translation_push": TransifexPush,
"translation_expush": TransifexExtractPush,
"translation_pull": TransifexPull,
},
)
|
service | precalcImplantSet | # =============================================================================
# Copyright (C) 2010 Diego Duclos
#
# This file is part of pyfa.
#
# pyfa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyfa. If not, see <http://www.gnu.org/licenses/>.
# =============================================================================
import eos.db
from eos.db.saveddata.implant import Implant
from service.market import Market
class PrecalcedImplantSets:
instance = None
@classmethod
def getInstance(cls):
if cls.instance is None:
cls.instance = PrecalcedImplantSets()
return cls.instance
@staticmethod
def getImplantSets():
return eos.db.getAllImplantSets()
@staticmethod
def getStructuredSets():
structured = {}
for implantSet in PrecalcedImplantSets.getImplantSets():
structured.setdefault(implantSet.setName, {})[
implantSet.gradeName
] = implantSet.implants
return structured
@staticmethod
def stringToImplants(string):
sMkt = Market.getInstance()
implants = []
for typeID in (int(tid) for tid in string.split(",")):
item = sMkt.getItem(typeID)
if item is None:
continue
try:
implant = Implant(item)
except ValueError:
continue
implants.append(implant)
return implants
|
CloudSync | SyncOrchestrator | # Copyright (c) 2022 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
import os
from typing import Any, Dict, List, cast
from cura.CuraApplication import CuraApplication
from UM import i18n_catalog
from UM.Extension import Extension
from UM.Logger import Logger
from UM.Message import Message
from UM.PluginRegistry import PluginRegistry
from .CloudApiClient import CloudApiClient
from .CloudPackageChecker import CloudPackageChecker
from .DiscrepanciesPresenter import DiscrepanciesPresenter
from .DownloadPresenter import DownloadPresenter
from .LicensePresenter import LicensePresenter
from .RestartApplicationPresenter import RestartApplicationPresenter
from .SubscribedPackagesModel import SubscribedPackagesModel
class SyncOrchestrator(Extension):
"""Orchestrates the synchronizing of packages from the user account to the installed packages
Example flow:
- CloudPackageChecker compares a list of packages the user `subscribed` to in their account
If there are `discrepancies` between the account and locally installed packages, they are emitted
- DiscrepanciesPresenter shows a list of packages to be added or removed to the user. It emits the `packageMutations`
the user selected to be performed
- The SyncOrchestrator uses PackageManager to remove local packages the users wants to see removed
- The DownloadPresenter shows a download progress dialog. It emits A tuple of succeeded and failed downloads
- The LicensePresenter extracts licenses from the downloaded packages and presents a license for each package to
be installed. It emits the `licenseAnswers` signal for accept or declines
- The CloudApiClient removes the declined packages from the account
- The SyncOrchestrator uses PackageManager to install the downloaded packages and delete temp files.
- The RestartApplicationPresenter notifies the user that a restart is required for changes to take effect
"""
def __init__(self, app: CuraApplication) -> None:
super().__init__()
# Differentiate This PluginObject from the Marketplace. self.getId() includes _name.
# getPluginId() will return the same value for The Marketplace extension and this one
self._name = "SyncOrchestrator"
self._package_manager = app.getPackageManager()
# Keep a reference to the CloudApiClient. it watches for installed packages and subscribes to them
self._cloud_api: CloudApiClient = CloudApiClient.getInstance(app)
self._checker: CloudPackageChecker = CloudPackageChecker(app)
self._checker.discrepancies.connect(self._onDiscrepancies)
self._discrepancies_presenter: DiscrepanciesPresenter = DiscrepanciesPresenter(
app
)
self._discrepancies_presenter.packageMutations.connect(self._onPackageMutations)
self._download_presenter: DownloadPresenter = DownloadPresenter(app)
self._license_presenter: LicensePresenter = LicensePresenter(app)
self._license_presenter.licenseAnswers.connect(self._onLicenseAnswers)
self._restart_presenter = RestartApplicationPresenter(app)
def _onDiscrepancies(self, model: SubscribedPackagesModel) -> None:
plugin_path = cast(
str, PluginRegistry.getInstance().getPluginPath(self.getPluginId())
)
self._discrepancies_presenter.present(plugin_path, model)
def _onPackageMutations(self, mutations: SubscribedPackagesModel) -> None:
self._download_presenter = self._download_presenter.resetCopy()
self._download_presenter.done.connect(self._onDownloadFinished)
self._download_presenter.download(mutations)
def _onDownloadFinished(
self, success_items: Dict[str, Dict[str, str]], error_items: List[str]
) -> None:
"""Called when a set of packages have finished downloading
:param success_items:: Dict[package_id, Dict[str, str]]
:param error_items:: List[package_id]
"""
if error_items:
message = i18n_catalog.i18nc(
"@info:generic",
"{} plugins failed to download".format(len(error_items)),
)
self._showErrorMessage(message)
plugin_path = cast(
str, PluginRegistry.getInstance().getPluginPath(self.getPluginId())
)
self._license_presenter = self._license_presenter.resetCopy()
self._license_presenter.licenseAnswers.connect(self._onLicenseAnswers)
self._license_presenter.present(plugin_path, success_items)
# Called when user has accepted / declined all licenses for the downloaded packages
def _onLicenseAnswers(self, answers: List[Dict[str, Any]]) -> None:
has_changes = False # True when at least one package is installed
for item in answers:
if item["accepted"]:
# install and subscribe packages
if not self._package_manager.installPackage(item["package_path"]):
message = "Could not install {}".format(item["package_id"])
self._showErrorMessage(message)
continue
has_changes = True
else:
self._cloud_api.unsubscribe(item["package_id"])
# delete temp file
try:
os.remove(item["package_path"])
except (
EnvironmentError
) as e: # File was already removed, no access rights, etc.
Logger.error(
"Can't delete temporary package file: {err}".format(err=str(e))
)
if has_changes:
self._restart_presenter.present()
def _showErrorMessage(self, text: str):
"""Logs an error and shows it to the user"""
Logger.error(text)
Message(text, lifetime=0, message_type=Message.MessageType.ERROR).show()
|
ui | drawView | from AppKit import NSBezelBorder, NSDragOperationNone
from Foundation import NSURL
from Quartz import PDFDocument, PDFThumbnailView, PDFView
from vanilla import Group
epsPasteBoardType = "CorePasteboardFlavorType 0x41494342"
class DrawBotPDFThumbnailView(PDFThumbnailView):
def draggingUpdated_(self, draggingInfo):
return NSDragOperationNone
class ThumbnailView(Group):
nsViewClass = DrawBotPDFThumbnailView
def setDrawView(self, view):
self.getNSView().setPDFView_(view.getNSView())
def getSelection(self):
try:
# sometimes this goes weirdly wrong...
selection = self.getNSView().selectedPages()
except:
return -1
if selection:
for page in selection:
document = page.document()
index = document.indexForPage_(page)
return index
return -1
class DrawBotPDFView(PDFView):
def performKeyEquivalent_(self, event):
# catch a bug in PDFView
# cmd + ` causes a traceback
# DrawBot[15705]: -[__NSCFConstantString characterAtIndex:]: Range or index out of bounds
try:
return super(DrawBotPDFView, self).performKeyEquivalent_(event)
except:
return False
class DrawView(Group):
nsViewClass = DrawBotPDFView
def __init__(self, posSize):
super(DrawView, self).__init__(posSize)
pdfView = self.getNSView()
pdfView.setAutoScales_(True)
view = pdfView.documentView()
if view is not None:
scrollview = view.enclosingScrollView()
scrollview.setBorderType_(NSBezelBorder)
def get(self):
pdf = self.getNSView().document()
if pdf is None:
return None
return pdf.dataRepresentation()
def set(self, pdfData):
pdf = PDFDocument.alloc().initWithData_(pdfData)
self.setPDFDocument(pdf)
def setPath(self, path):
url = NSURL.fileURLWithPath_(path)
document = PDFDocument.alloc().initWithURL_(url)
self.setPDFDocument(document)
def setPDFDocument(self, document):
if document is None:
document = PDFDocument.alloc().init()
self.getNSView().setDocument_(document)
def getPDFDocument(self):
return self.getNSView().document()
def setScale(self, scale):
self.getNSView().setScaleFactor_(scale)
def scale(self):
return self.getNSView().scaleFactor()
def scrollDown(self):
document = self.getNSView().documentView()
document.scrollPoint_((0, 0))
def scrollToPageIndex(self, index):
pdf = self.getPDFDocument()
if pdf is None:
self.scrollDown()
elif 0 <= index < pdf.pageCount():
try:
# sometimes this goes weirdly wrong...
page = pdf.pageAtIndex_(index)
self.getNSView().goToPage_(page)
except:
self.scrollDown()
else:
self.scrollDown()
|
albums | models | # Copyright 2013 Christoph Reiter
# 2020 Nick Boultbee
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
from quodlibet import app, config
from quodlibet.qltk.models import ObjectModelFilter, ObjectModelSort, ObjectStore
class AlbumItem:
cover = None
scanned = False
def __init__(self, album):
self.album = album
@property
def COVER_SIZE(self):
size = config.getint("browsers", "cover_size")
if size <= 0:
size = 48
return size
def scan_cover(self, force=False, scale_factor=1, callback=None, cancel=None):
if (self.scanned and not force) or not self.album or not self.album.songs:
return
self.scanned = True
def set_cover_cb(pixbuf):
self.cover = pixbuf
callback()
s = self.COVER_SIZE * scale_factor
app.cover_manager.get_pixbuf_many_async(
self.album.songs, s, s, cancel, set_cover_cb
)
def __repr__(self):
return repr(self.album)
class AlbumModelMixin:
def get_items(self, paths):
items = []
for path in paths:
item = self.get_value(self.get_iter(path))
if item.album is None:
return [i for i in self.itervalues() if i.album is not None]
items.append(item)
return items
def get_albums(self, paths):
return [i.album for i in self.get_items(paths)]
def get_album(self, iter_):
return self.get_value(iter_, 0).album
class AlbumModel(ObjectStore, AlbumModelMixin):
def __init__(self, library):
super().__init__()
self.__library = library
albums = library.albums
self.__sigs = [
albums.connect("added", self._add_albums),
albums.connect("removed", self._remove_albums),
albums.connect("changed", self._change_albums),
]
self.append(row=[AlbumItem(None)])
self.append_many((AlbumItem(a) for a in albums.values()))
def refresh_all(self):
"""Trigger redraws for all rows"""
for iter_, value in self.iterrows():
self.row_changed(self.get_path(iter_), iter_)
def destroy(self):
library = self.__library
for sig in self.__sigs:
library.albums.disconnect(sig)
self.__library = None
self.clear()
def _update_all(self):
if not self.is_empty():
row = self[0]
self.row_changed(row.path, row.iter)
def _add_albums(self, library, added):
self.append_many((AlbumItem(a) for a in added))
self._update_all()
def _remove_albums(self, library, removed):
removed_albums = removed.copy()
iters_remove = []
for iter_, item in self.iterrows():
album = item.album
if album is not None and album in removed_albums:
removed_albums.remove(album)
iters_remove.append(iter_)
if not removed_albums:
break
for iter_ in iters_remove:
self.remove(iter_)
self._update_all()
def _change_albums(self, library, changed):
"""Trigger a row redraw for each album that changed"""
changed_albums = changed.copy()
for iter_, item in self.iterrows():
album = item.album
if album is not None and album in changed_albums:
changed_albums.remove(album)
self.row_changed(self.get_path(iter_), iter_)
if not changed_albums:
break
class AlbumFilterModel(ObjectModelFilter, AlbumModelMixin):
def contains_all(self, paths):
values = (self.get_value(self.get_iter(p), 0).album for p in paths)
return None in values
class AlbumSortModel(ObjectModelSort, AlbumModelMixin):
pass
|
customize | conversion | # -*- coding: utf-8 -*-
"""
Defines the plugin system for conversions.
"""
import os
import re
import shutil
from calibre import CurrentDir
from calibre.customize import Plugin
class ConversionOption(object):
"""
Class representing conversion options
"""
def __init__(
self, name=None, help=None, long_switch=None, short_switch=None, choices=None
):
self.name = name
self.help = help
self.long_switch = long_switch
self.short_switch = short_switch
self.choices = choices
if self.long_switch is None:
self.long_switch = self.name.replace("_", "-")
self.validate_parameters()
def validate_parameters(self):
"""
Validate the parameters passed to :meth:`__init__`.
"""
if re.match(r"[a-zA-Z_]([a-zA-Z0-9_])*", self.name) is None:
raise ValueError(self.name + " is not a valid Python identifier")
if not self.help:
raise ValueError("You must set the help text")
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
return hash(self) == hash(other)
def clone(self):
return ConversionOption(
name=self.name,
help=self.help,
long_switch=self.long_switch,
short_switch=self.short_switch,
choices=self.choices,
)
class OptionRecommendation(object):
LOW = 1
MED = 2
HIGH = 3
def __init__(self, recommended_value=None, level=LOW, **kwargs):
"""
An option recommendation. That is, an option as well as its recommended
value and the level of the recommendation.
"""
self.level = level
self.recommended_value = recommended_value
self.option = kwargs.pop("option", None)
if self.option is None:
self.option = ConversionOption(**kwargs)
self.validate_parameters()
@property
def help(self):
return self.option.help
def clone(self):
return OptionRecommendation(
recommended_value=self.recommended_value,
level=self.level,
option=self.option.clone(),
)
def validate_parameters(self):
if self.option.choices and self.recommended_value not in self.option.choices:
raise ValueError(
"OpRec: %s: Recommended value not in choices" % self.option.name
)
if not (
isinstance(self.recommended_value, (int, float, str, unicode))
or self.recommended_value is None
):
raise ValueError(
"OpRec: %s:" % self.option.name
+ repr(self.recommended_value)
+ " is not a string or a number"
)
class DummyReporter(object):
def __init__(self):
self.cancel_requested = False
def __call__(self, percent, msg=""):
pass
def gui_configuration_widget(
name, parent, get_option_by_name, get_option_help, db, book_id, for_output=True
):
import importlib
def widget_factory(cls):
return cls(parent, get_option_by_name, get_option_help, db, book_id)
if for_output:
try:
output_widget = importlib.import_module("calibre.gui2.convert." + name)
pw = output_widget.PluginWidget
pw.ICON = I("back.png")
pw.HELP = _("Options specific to the output format.")
return widget_factory(pw)
except ImportError:
pass
else:
try:
input_widget = importlib.import_module("calibre.gui2.convert." + name)
pw = input_widget.PluginWidget
pw.ICON = I("forward.png")
pw.HELP = _("Options specific to the input format.")
return widget_factory(pw)
except ImportError:
pass
return None
class InputFormatPlugin(Plugin):
"""
InputFormatPlugins are responsible for converting a document into
HTML+OPF+CSS+etc.
The results of the conversion *must* be encoded in UTF-8.
The main action happens in :meth:`convert`.
"""
type = _("Conversion Input")
can_be_disabled = False
supported_platforms = ["windows", "osx", "linux"]
#: Set of file types for which this plugin should be run
#: For example: ``set(['azw', 'mobi', 'prc'])``
file_types = set([])
#: If True, this input plugin generates a collection of images,
#: one per HTML file. You can obtain access to the images via
#: convenience method, :meth:`get_image_collection`.
is_image_collection = False
#: Number of CPU cores used by this plugin
#: A value of -1 means that it uses all available cores
core_usage = 1
#: If set to True, the input plugin will perform special processing
#: to make its output suitable for viewing
for_viewer = False
#: The encoding that this input plugin creates files in. A value of
#: None means that the encoding is undefined and must be
#: detected individually
output_encoding = "utf-8"
#: Options shared by all Input format plugins. Do not override
#: in sub-classes. Use :attr:`options` instead. Every option must be an
#: instance of :class:`OptionRecommendation`.
common_options = set(
[
OptionRecommendation(
name="input_encoding",
recommended_value=None,
level=OptionRecommendation.LOW,
help=_(
"Specify the character encoding of the input document. If "
"set this option will override any encoding declared by the "
"document itself. Particularly useful for documents that "
"do not declare an encoding or that have erroneous "
"encoding declarations."
),
),
]
)
#: Options to customize the behavior of this plugin. Every option must be an
#: instance of :class:`OptionRecommendation`.
options = set([])
#: A set of 3-tuples of the form
#: (option_name, recommended_value, recommendation_level)
recommendations = set([])
def __init__(self, *args):
Plugin.__init__(self, *args)
self.report_progress = DummyReporter()
def get_images(self):
"""
Return a list of absolute paths to the images, if this input plugin
represents an image collection. The list of images is in the same order
as the spine and the TOC.
"""
raise NotImplementedError()
def convert(self, stream, options, file_ext, log, accelerators):
"""
This method must be implemented in sub-classes. It must return
the path to the created OPF file or an :class:`OEBBook` instance.
All output should be contained in the current directory.
If this plugin creates files outside the current
directory they must be deleted/marked for deletion before this method
returns.
:param stream: A file like object that contains the input file.
:param options: Options to customize the conversion process.
Guaranteed to have attributes corresponding
to all the options declared by this plugin. In
addition, it will have a verbose attribute that
takes integral values from zero upwards. Higher numbers
mean be more verbose. Another useful attribute is
``input_profile`` that is an instance of
:class:`calibre.customize.profiles.InputProfile`.
:param file_ext: The extension (without the .) of the input file. It
is guaranteed to be one of the `file_types` supported
by this plugin.
:param log: A :class:`calibre.utils.logging.Log` object. All output
should use this object.
:param accelarators: A dictionary of various information that the input
plugin can get easily that would speed up the
subsequent stages of the conversion.
"""
raise NotImplementedError
def __call__(self, stream, options, file_ext, log, accelerators, output_dir):
try:
log.info("InputFormatPlugin: %s running" % self.name)
if hasattr(stream, "name"):
log.info("on %s" % stream.name)
except:
# In case stdout is broken
pass
with CurrentDir(output_dir):
for x in os.listdir("."):
shutil.rmtree(x) if os.path.isdir(x) else os.remove(x)
ret = self.convert(stream, options, file_ext, log, accelerators)
return ret
def postprocess_book(self, oeb, opts, log):
"""
Called to allow the input plugin to perform postprocessing after
the book has been parsed.
"""
pass
def specialize(self, oeb, opts, log, output_fmt):
"""
Called to allow the input plugin to specialize the parsed book
for a particular output format. Called after postprocess_book
and before any transforms are performed on the parsed book.
"""
pass
def gui_configuration_widget(
self, parent, get_option_by_name, get_option_help, db, book_id=None
):
"""
Called to create the widget used for configuring this plugin in the
calibre GUI. The widget must be an instance of the PluginWidget class.
See the builting input plugins for examples.
"""
name = self.name.lower().replace(" ", "_")
return gui_configuration_widget(
name,
parent,
get_option_by_name,
get_option_help,
db,
book_id,
for_output=False,
)
class OutputFormatPlugin(Plugin):
"""
OutputFormatPlugins are responsible for converting an OEB document
(OPF+HTML) into an output ebook.
The OEB document can be assumed to be encoded in UTF-8.
The main action happens in :meth:`convert`.
"""
type = _("Conversion Output")
can_be_disabled = False
supported_platforms = ["windows", "osx", "linux"]
#: The file type (extension without leading period) that this
#: plugin outputs
file_type = None
#: Options shared by all Input format plugins. Do not override
#: in sub-classes. Use :attr:`options` instead. Every option must be an
#: instance of :class:`OptionRecommendation`.
common_options = set(
[
OptionRecommendation(
name="pretty_print",
recommended_value=False,
level=OptionRecommendation.LOW,
help=_(
"If specified, the output plugin will try to create output "
"that is as human readable as possible. May not have any effect "
"for some output plugins."
),
),
]
)
#: Options to customize the behavior of this plugin. Every option must be an
#: instance of :class:`OptionRecommendation`.
options = set([])
#: A set of 3-tuples of the form
#: (option_name, recommended_value, recommendation_level)
recommendations = set([])
@property
def description(self):
return _("Convert ebooks to the %s format") % self.file_type
def __init__(self, *args):
Plugin.__init__(self, *args)
self.report_progress = DummyReporter()
def convert(self, oeb_book, output, input_plugin, opts, log):
"""
Render the contents of `oeb_book` (which is an instance of
:class:`calibre.ebooks.oeb.OEBBook` to the file specified by output.
:param output: Either a file like object or a string. If it is a string
it is the path to a directory that may or may not exist. The output
plugin should write its output into that directory. If it is a file like
object, the output plugin should write its output into the file.
:param input_plugin: The input plugin that was used at the beginning of
the conversion pipeline.
:param opts: Conversion options. Guaranteed to have attributes
corresponding to the OptionRecommendations of this plugin.
:param log: The logger. Print debug/info messages etc. using this.
"""
raise NotImplementedError
@property
def is_periodical(self):
return self.oeb.metadata.publication_type and unicode(
self.oeb.metadata.publication_type[0]
).startswith("periodical:")
def specialize_css_for_output(self, log, opts, item, stylizer):
"""
Can be used to make changes to the css during the CSS flattening
process.
:param item: The item (HTML file) being processed
:param stylizer: A Stylizer object containing the flattened styles for
item. You can get the style for any element by
stylizer.style(element).
"""
pass
def gui_configuration_widget(
self, parent, get_option_by_name, get_option_help, db, book_id=None
):
"""
Called to create the widget used for configuring this plugin in the
calibre GUI. The widget must be an instance of the PluginWidget class.
See the builtin output plugins for examples.
"""
name = self.name.lower().replace(" ", "_")
return gui_configuration_widget(
name,
parent,
get_option_by_name,
get_option_help,
db,
book_id,
for_output=True,
)
|
backend | oaireader | # -*- encoding: utf-8 -*-
# Dissemin: open access policy enforcement tool
# Copyright (C) 2014 Antonin Delpeuch
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
from oaipmh.metadata import MetadataReader
base_dc_reader = MetadataReader(
fields={
"title": ("textList", "base_dc:dc/dc:title/text()"),
"creator": ("textList", "base_dc:dc/dc:creator/text()"),
"subject": ("textList", "base_dc:dc/dc:subject/text()"),
"description": ("textList", "base_dc:dc/dc:description/text()"),
"publisher": ("textList", "base_dc:dc/dc:publisher/text()"),
"contributor": ("textList", "base_dc:dc/dc:contributor/text()"),
"date": ("textList", "base_dc:dc/dc:date/text()"),
"type": ("textList", "base_dc:dc/dc:type/text()"),
"format": ("textList", "base_dc:dc/dc:format/text()"),
"identifier": ("textList", "base_dc:dc/dc:identifier/text()"),
"source": ("textList", "base_dc:dc/dc:source/text()"),
"language": ("textList", "base_dc:dc/dc:language/text()"),
"relation": ("textList", "base_dc:dc/dc:relation/text()"),
"rights": ("textList", "base_dc:dc/dc:rights/text()"),
"autoclasscode": ("textList", "base_dc:dc/base_dc:autoclasscode/text()"),
"classcode": ("textList", "base_dc:dc/base_dc:classcode/text()"),
"collection": ("textList", "base_dc:dc/base_dc:collection/text()"),
"collname": ("textList", "base_dc:dc/base_dc:collname/text()"),
"continent": ("textList", "base_dc:dc/base_dc:continent/text()"),
"country": ("textList", "base_dc:dc/base_dc:country/text()"),
"coverage": ("textList", "oai_dc:dc/dc:coverage/text()"),
"lang": ("textList", "base_dc:dc/base_dc:lang/text()"),
"link": ("textList", "base_dc:dc/base_dc:link/text()"),
"oa": ("textList", "base_dc:dc/base_dc:oa/text()"),
"rightsnorm": ("textList", "base_dc:dc/base_dc:rightsnorm/text()"),
"typenorm": ("textList", "base_dc:dc/base_dc:typenorm/text()"),
"year": ("textList", "base_dc:dc/base_dc:year/text()"),
},
namespaces={
"base_dc": "http://oai.base-search.net/base_dc/",
"oai_dc": "http://www.openarchives.org/OAI/2.0/oai_dc/",
"dc": "http://purl.org/dc/elements/1.1/",
},
)
|
browser | eventfilter | # SPDX-FileCopyrightText: Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# SPDX-License-Identifier: GPL-3.0-or-later
"""Event handling for a browser tab."""
from qutebrowser.config import config
from qutebrowser.keyinput import modeman
from qutebrowser.qt import machinery
from qutebrowser.qt.core import QEvent, QObject, Qt, QTimer
from qutebrowser.qt.widgets import QWidget
from qutebrowser.utils import log, message, qtutils, usertypes
class ChildEventFilter(QObject):
"""An event filter re-adding TabEventFilter on ChildEvent.
This is needed because QtWebEngine likes to randomly change its
focusProxy...
FIXME:qtwebengine Add a test for this happening
Attributes:
_filter: The event filter to install.
_widget: The widget expected to send out childEvents.
"""
def __init__(self, *, eventfilter, widget=None, parent=None):
super().__init__(parent)
self._filter = eventfilter
self._widget = widget
def eventFilter(self, obj, event):
"""Act on ChildAdded events."""
if event.type() == QEvent.Type.ChildAdded:
child = event.child()
if not isinstance(child, QWidget):
# Can e.g. happen when dragging text
log.misc.debug(f"Ignoring new child {qtutils.qobj_repr(child)}")
return False
log.misc.debug(
f"{qtutils.qobj_repr(obj)} got new child {qtutils.qobj_repr(child)}, "
"installing filter"
)
# Additional sanity check, but optional
if self._widget is not None:
assert obj is self._widget
# WORKAROUND for unknown Qt bug losing focus on child change
# Carry on keyboard focus to the new child if:
# - This is a child event filter on a tab (self._widget is not None)
# - We find an old existing child which is a QQuickWidget and is
# currently focused.
# - We're using QtWebEngine >= 6.4 (older versions are not affected)
children = [
c
for c in self._widget.findChildren(
QWidget, "", Qt.FindChildOption.FindDirectChildrenOnly
)
if c is not child
and c.hasFocus()
and c.metaObject() is not None
and c.metaObject().className() == "QQuickWidget"
]
if children:
log.misc.debug("Focusing new child")
child.setFocus()
child.installEventFilter(self._filter)
elif event.type() == QEvent.Type.ChildRemoved:
child = event.child()
log.misc.debug(
f"{qtutils.qobj_repr(obj)}: removed child {qtutils.qobj_repr(child)}"
)
return False
class TabEventFilter(QObject):
"""Handle mouse/keyboard events on a tab.
Attributes:
_tab: The browsertab object this filter is installed on.
_handlers: A dict of handler functions for the handled events.
_ignore_wheel_event: Whether to ignore the next wheelEvent.
_check_insertmode_on_release: Whether an insertmode check should be
done when the mouse is released.
"""
def __init__(self, tab, *, parent=None):
super().__init__(parent)
self._tab = tab
self._handlers = {
QEvent.Type.MouseButtonPress: self._handle_mouse_press,
QEvent.Type.MouseButtonRelease: self._handle_mouse_release,
QEvent.Type.Wheel: self._handle_wheel,
}
self._ignore_wheel_event = False
self._check_insertmode_on_release = False
def _handle_mouse_press(self, e):
"""Handle pressing of a mouse button.
Args:
e: The QMouseEvent.
Return:
True if the event should be filtered, False otherwise.
"""
is_rocker_gesture = (
config.val.input.mouse.rocker_gestures
and e.buttons() == Qt.MouseButton.LeftButton | Qt.MouseButton.RightButton
)
if (
e.button() in [Qt.MouseButton.XButton1, Qt.MouseButton.XButton2]
or is_rocker_gesture
):
if not machinery.IS_QT6:
self._mousepress_backforward(e)
# FIXME:qt6 For some reason, this doesn't filter the action on
# Qt 6...
return True
self._ignore_wheel_event = True
pos = e.pos()
if pos.x() < 0 or pos.y() < 0:
log.mouse.warning("Ignoring invalid click at {}".format(pos))
return False
if e.button() != Qt.MouseButton.NoButton:
self._tab.elements.find_at_pos(pos, self._mousepress_insertmode_cb)
return False
def _handle_mouse_release(self, _e):
"""Handle releasing of a mouse button.
Args:
e: The QMouseEvent.
Return:
True if the event should be filtered, False otherwise.
"""
# We want to make sure we check the focus element after the WebView is
# updated completely.
QTimer.singleShot(0, self._mouserelease_insertmode)
return False
def _handle_wheel(self, e):
"""Zoom on Ctrl-Mousewheel.
Args:
e: The QWheelEvent.
Return:
True if the event should be filtered, False otherwise.
"""
if self._ignore_wheel_event:
# See https://github.com/qutebrowser/qutebrowser/issues/395
self._ignore_wheel_event = False
return True
# Don't allow scrolling while hinting
mode = modeman.instance(self._tab.win_id).mode
if mode == usertypes.KeyMode.hint:
return True
elif e.modifiers() & Qt.KeyboardModifier.ControlModifier:
if mode == usertypes.KeyMode.passthrough:
return False
divider = config.val.zoom.mouse_divider
if divider == 0:
# Disable mouse zooming
return True
factor = self._tab.zoom.factor() + (e.angleDelta().y() / divider)
if factor < 0:
return True
perc = int(100 * factor)
message.info(f"Zoom level: {perc}%", replace="zoom-level")
self._tab.zoom.set_factor(factor)
return True
return False
def _mousepress_insertmode_cb(self, elem):
"""Check if the clicked element is editable."""
if elem is None:
# Something didn't work out, let's find the focus element after
# a mouse release.
log.mouse.debug("Got None element, scheduling check on " "mouse release")
self._check_insertmode_on_release = True
return
if elem.is_editable():
log.mouse.debug("Clicked editable element!")
if config.val.input.insert_mode.auto_enter:
modeman.enter(
self._tab.win_id,
usertypes.KeyMode.insert,
"click",
only_if_normal=True,
)
else:
log.mouse.debug("Clicked non-editable element!")
if config.val.input.insert_mode.auto_leave:
modeman.leave(
self._tab.win_id, usertypes.KeyMode.insert, "click", maybe=True
)
def _mouserelease_insertmode(self):
"""If we have an insertmode check scheduled, handle it."""
if not self._check_insertmode_on_release:
return
self._check_insertmode_on_release = False
def mouserelease_insertmode_cb(elem):
"""Callback which gets called from JS."""
if elem is None:
log.mouse.debug("Element vanished!")
return
if elem.is_editable():
log.mouse.debug("Clicked editable element (delayed)!")
modeman.enter(
self._tab.win_id,
usertypes.KeyMode.insert,
"click-delayed",
only_if_normal=True,
)
else:
log.mouse.debug("Clicked non-editable element (delayed)!")
if config.val.input.insert_mode.auto_leave:
modeman.leave(
self._tab.win_id,
usertypes.KeyMode.insert,
"click-delayed",
maybe=True,
)
self._tab.elements.find_focused(mouserelease_insertmode_cb)
def _mousepress_backforward(self, e):
"""Handle back/forward mouse button presses.
Args:
e: The QMouseEvent.
Return:
True if the event should be filtered, False otherwise.
"""
if not config.val.input.mouse.back_forward_buttons and e.button() in [
Qt.MouseButton.XButton1,
Qt.MouseButton.XButton2,
]:
# Back and forward on mice are disabled
return
if e.button() in [Qt.MouseButton.XButton1, Qt.MouseButton.LeftButton]:
# Back button on mice which have it, or rocker gesture
if self._tab.history.can_go_back():
self._tab.history.back()
else:
message.error("At beginning of history.")
elif e.button() in [Qt.MouseButton.XButton2, Qt.MouseButton.RightButton]:
# Forward button on mice which have it, or rocker gesture
if self._tab.history.can_go_forward():
self._tab.history.forward()
else:
message.error("At end of history.")
def eventFilter(self, obj, event):
"""Filter events going to a QWeb(Engine)View.
Return:
True if the event should be filtered, False otherwise.
"""
evtype = event.type()
if evtype not in self._handlers:
return False
if obj is not self._tab.private_api.event_target():
log.mouse.debug("Ignoring {} to {}".format(event.__class__.__name__, obj))
return False
return self._handlers[evtype](event)
|
doc | conf | # -*- coding: utf-8 -*-
#
# Tribler documentation build configuration file, created by
# sphinx-quickstart on Wed Jun 15 19:59:31 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import asyncio
import logging
import os
import sys
logging.basicConfig(level=logging.INFO)
logging.info("Start to execute conf.py")
# root_dir = Path(__file__).parent.parent
root_dir = os.path.abspath(os.path.join(os.path.dirname(__name__), ".."))
tribler_source_dirs = [
os.path.join(root_dir, "src"),
os.path.join(root_dir, "doc"),
]
for source_dir in tribler_source_dirs:
logging.info(f"Add source dir: {source_dir}")
sys.path.append(str(source_dir))
from tribler.core.utilities.dependencies import Scope, get_dependencies
# pylint: disable=wrong-import-position
from tribler.core.utilities.patch_import import patch_import
# extra modules that can't be extracted from the `requirements-core.txt` file
modules = [
"validate", # automatically installed alongside with configobj 5.x, should be fixed in configobj 6.0
]
modules.extend(set(get_dependencies(scope=Scope.core)))
with patch_import(modules):
from tribler.core.components.restapi.rest.root_endpoint import RootEndpoint
add_endpoint = RootEndpoint.add_endpoint
RootEndpoint.add_endpoint = (
lambda self, path, ep: add_endpoint(self, path, ep)
if path not in ["/ipv8", "/market", "/wallets"]
else None
)
# Extract Swagger docs
from extract_swagger import extract_swagger
asyncio.run(extract_swagger("restapi/swagger.yaml"))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.viewcode",
"sphinxcontrib.httpdomain",
"sphinx.ext.autosummary",
"sphinxcontrib.openapi",
]
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "Tribler"
copyright = "2020, Tribler devs"
author = "Tribler devs"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "7.5"
# The full version, including alpha/beta/rc tags.
release = "7.5.0"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "_templates", "Thumbs.db", ".DS_Store"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'Tribler v6.6'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "Triblerdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "Tribler.tex", "Tribler Documentation", "Tribler devs", "manual"),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "tribler", "Tribler Documentation", [author], 1)]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"Tribler",
"Tribler Documentation",
author,
"Tribler",
"One line description of project.",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
|
config | scripts | __license__ = "GNU Affero General Public License http://www.gnu.org/licenses/agpl.html"
__copyright__ = "Copyright (C) 2022 The OctoPrint Project - Released under terms of the AGPLv3 License"
from typing import Dict, Optional
from octoprint.schema import BaseModel
from octoprint.vendor.with_attrs_docs import with_attrs_docs
@with_attrs_docs
class GcodeScriptsConfig(BaseModel):
afterPrinterConnected: Optional[str] = None
beforePrinterDisconnected: Optional[str] = None
beforePrintStarted: Optional[str] = None
afterPrintCancelled: Optional[
str
] = "; disable motors\nM84\n\n;disable all heaters\n{% snippet 'disable_hotends' %}\n{% snippet 'disable_bed' %}\n;disable fan\nM106 S0"
afterPrintDone: Optional[str] = None
beforePrintPaused: Optional[str] = None
afterPrintResumed: Optional[str] = None
beforeToolChange: Optional[str] = None
afterToolChange: Optional[str] = None
snippets: Dict[str, str] = {
"disable_hotends": "{% if printer_profile.extruder.sharedNozzle %}M104 T0 S0\n{% else %}{% for tool in range(printer_profile.extruder.count) %}M104 T{{ tool }} S0\n{% endfor %}{% endif %}",
"disable_bed": "{% if printer_profile.heatedBed %}M140 S0\n{% endif %}",
}
@with_attrs_docs
class ScriptsConfig(BaseModel):
gcode: GcodeScriptsConfig = GcodeScriptsConfig()
|
accounts | MegasharesCom | # -*- coding: utf-8 -*-
import re
import time
from ..base.account import BaseAccount
class MegasharesCom(BaseAccount):
__name__ = "MegasharesCom"
__type__ = "account"
__version__ = "0.11"
__status__ = "testing"
__description__ = """Megashares.com account plugin"""
__license__ = "GPLv3"
__authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
VALID_UNTIL_PATTERN = (
r'<p class="premium_info_box">Period Ends: (\w{3} \d{1,2}, \d{4})</p>'
)
def grab_info(self, user, password, data):
html = self.load("http://d01.megashares.com/myms.php")
premium = ">Premium Upgrade<" not in html
validuntil = trafficleft = -1
try:
timestr = re.search(self.VALID_UNTIL_PATTERN, html).group(1)
self.log_debug(timestr)
validuntil = time.mktime(time.strptime(timestr, "%b %d, %Y"))
except Exception as exc:
self.log_error(
exc, exc_info=self.pyload.debug > 1, stack_info=self.pyload.debug > 2
)
return {"validuntil": validuntil, "trafficleft": -1, "premium": premium}
def signin(self, user, password, data):
html = self.load(
"http://d01.megashares.com/myms_login.php",
post={
"httpref": "",
"myms_login": "Login",
"mymslogin_name": user,
"mymspassword": password,
},
)
if '<span class="b ml">{}</span>'.format(user) not in html:
self.fail_login()
|
PyObjCTest | test_cfxmlnode | from CoreFoundation import *
from PyObjCTools.TestSupport import *
try:
long
except NameError:
long = int
class TestXMLNode(TestCase):
# NOTE: This doesn't actually test the API
def testTypes(self):
self.assertIsCFType(CFXMLNodeRef)
def testConstants(self):
self.assertEqual(kCFXMLNodeCurrentVersion, 1)
self.assertEqual(kCFXMLNodeTypeDocument, 1)
self.assertEqual(kCFXMLNodeTypeElement, 2)
self.assertEqual(kCFXMLNodeTypeAttribute, 3)
self.assertEqual(kCFXMLNodeTypeProcessingInstruction, 4)
self.assertEqual(kCFXMLNodeTypeComment, 5)
self.assertEqual(kCFXMLNodeTypeText, 6)
self.assertEqual(kCFXMLNodeTypeCDATASection, 7)
self.assertEqual(kCFXMLNodeTypeDocumentFragment, 8)
self.assertEqual(kCFXMLNodeTypeEntity, 9)
self.assertEqual(kCFXMLNodeTypeEntityReference, 10)
self.assertEqual(kCFXMLNodeTypeDocumentType, 11)
self.assertEqual(kCFXMLNodeTypeWhitespace, 12)
self.assertEqual(kCFXMLNodeTypeNotation, 13)
self.assertEqual(kCFXMLNodeTypeElementTypeDeclaration, 14)
self.assertEqual(kCFXMLNodeTypeAttributeListDeclaration, 15)
self.assertEqual(kCFXMLEntityTypeParameter, 0)
self.assertEqual(kCFXMLEntityTypeParsedInternal, 1)
self.assertEqual(kCFXMLEntityTypeParsedExternal, 2)
self.assertEqual(kCFXMLEntityTypeUnparsed, 3)
self.assertEqual(kCFXMLEntityTypeCharacter, 4)
def testStructs(self):
return
o = CFXMLElementInfo()
self.assertHasAttr(o, "attributes")
self.assertHasAttr(o, "attributeOrder")
self.assertHasAttr(o, "isEmpty")
self.assertHasAttr(o, "_reserved")
o = CFXMLProcessingInstructionInfo()
self.assertHasAttr(o, "dataString")
o = CFXMLDocumentInfo()
self.assertHasAttr(o, "sourceURL")
self.assertHasAttr(o, "encoding")
o = CFXMLExternalID()
self.assertHasAttr(o, "systemID")
self.assertHasAttr(o, "publicID")
o = CFXMLDocumentTypeInfo()
self.assertHasAttr(o, "externalID")
o = CFXMLNotationInfo()
self.assertHasAttr(o, "externalID")
o = CFXMLElementTypeDeclarationInfo()
self.assertHasAttr(o, "contentDescription")
o = CFXMLAttributeDeclarationInfo()
self.assertHasAttr(o, "attributeName")
self.assertHasAttr(o, "typeString")
self.assertHasAttr(o, "defaultString")
o = CFXMLAttributeListDeclarationInfo()
self.assertHasAttr(o, "numberOfAttributes")
self.assertHasAttr(o, "attributes")
o = CFXMLEntityInfo()
self.assertHasAttr(o, "entityType")
self.assertHasAttr(o, "replacementText")
self.assertHasAttr(o, "entityID")
self.assertHasAttr(o, "notationName")
o = CFXMLEntityReferenceInfo()
self.assertHasAttr(o, "entityType")
def testFunctions(self):
self.assertIsInstance(CFXMLNodeGetTypeID(), (int, long))
# CFXMLNodeCreate: requires manual binding
# CFXMLNodeGetInfoPtr: likewise
# Add tests that create all valid types of nodes with there additional info and
# try to extract the information.
self.assertResultIsCFRetained(CFXMLNodeCreateCopy)
self.assertResultIsCFRetained(CFXMLTreeCreateWithNode)
@expectedFailure
def testMissingWrappers(self):
self.fail("CFXML requires manual wrappers (low prio)")
if __name__ == "__main__":
main()
|
basemodels | Facenet | import os
from tensorflow.keras import backend as K
from tensorflow.keras.layers import (
Activation,
BatchNormalization,
Concatenate,
Conv2D,
Dense,
Dropout,
GlobalAveragePooling2D,
Input,
Lambda,
MaxPooling2D,
add,
)
from tensorflow.keras.models import Model
def scaling(x, scale):
return x * scale
def InceptionResNetV2():
inputs = Input(shape=(160, 160, 3))
x = Conv2D(32, 3, strides=2, padding="valid", use_bias=False, name="Conv2d_1a_3x3")(
inputs
)
x = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Conv2d_1a_3x3_BatchNorm",
)(x)
x = Activation("relu", name="Conv2d_1a_3x3_Activation")(x)
x = Conv2D(32, 3, strides=1, padding="valid", use_bias=False, name="Conv2d_2a_3x3")(
x
)
x = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Conv2d_2a_3x3_BatchNorm",
)(x)
x = Activation("relu", name="Conv2d_2a_3x3_Activation")(x)
x = Conv2D(64, 3, strides=1, padding="same", use_bias=False, name="Conv2d_2b_3x3")(
x
)
x = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Conv2d_2b_3x3_BatchNorm",
)(x)
x = Activation("relu", name="Conv2d_2b_3x3_Activation")(x)
x = MaxPooling2D(3, strides=2, name="MaxPool_3a_3x3")(x)
x = Conv2D(80, 1, strides=1, padding="valid", use_bias=False, name="Conv2d_3b_1x1")(
x
)
x = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Conv2d_3b_1x1_BatchNorm",
)(x)
x = Activation("relu", name="Conv2d_3b_1x1_Activation")(x)
x = Conv2D(
192, 3, strides=1, padding="valid", use_bias=False, name="Conv2d_4a_3x3"
)(x)
x = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Conv2d_4a_3x3_BatchNorm",
)(x)
x = Activation("relu", name="Conv2d_4a_3x3_Activation")(x)
x = Conv2D(
256, 3, strides=2, padding="valid", use_bias=False, name="Conv2d_4b_3x3"
)(x)
x = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Conv2d_4b_3x3_BatchNorm",
)(x)
x = Activation("relu", name="Conv2d_4b_3x3_Activation")(x)
# 5x Block35 (Inception-ResNet-A block):
branch_0 = Conv2D(
32,
1,
strides=1,
padding="same",
use_bias=False,
name="Block35_1_Branch_0_Conv2d_1x1",
)(x)
branch_0 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block35_1_Branch_0_Conv2d_1x1_BatchNorm",
)(branch_0)
branch_0 = Activation("relu", name="Block35_1_Branch_0_Conv2d_1x1_Activation")(
branch_0
)
branch_1 = Conv2D(
32,
1,
strides=1,
padding="same",
use_bias=False,
name="Block35_1_Branch_1_Conv2d_0a_1x1",
)(x)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block35_1_Branch_1_Conv2d_0a_1x1_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block35_1_Branch_1_Conv2d_0a_1x1_Activation")(
branch_1
)
branch_1 = Conv2D(
32,
3,
strides=1,
padding="same",
use_bias=False,
name="Block35_1_Branch_1_Conv2d_0b_3x3",
)(branch_1)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block35_1_Branch_1_Conv2d_0b_3x3_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block35_1_Branch_1_Conv2d_0b_3x3_Activation")(
branch_1
)
branch_2 = Conv2D(
32,
1,
strides=1,
padding="same",
use_bias=False,
name="Block35_1_Branch_2_Conv2d_0a_1x1",
)(x)
branch_2 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block35_1_Branch_2_Conv2d_0a_1x1_BatchNorm",
)(branch_2)
branch_2 = Activation("relu", name="Block35_1_Branch_2_Conv2d_0a_1x1_Activation")(
branch_2
)
branch_2 = Conv2D(
32,
3,
strides=1,
padding="same",
use_bias=False,
name="Block35_1_Branch_2_Conv2d_0b_3x3",
)(branch_2)
branch_2 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block35_1_Branch_2_Conv2d_0b_3x3_BatchNorm",
)(branch_2)
branch_2 = Activation("relu", name="Block35_1_Branch_2_Conv2d_0b_3x3_Activation")(
branch_2
)
branch_2 = Conv2D(
32,
3,
strides=1,
padding="same",
use_bias=False,
name="Block35_1_Branch_2_Conv2d_0c_3x3",
)(branch_2)
branch_2 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block35_1_Branch_2_Conv2d_0c_3x3_BatchNorm",
)(branch_2)
branch_2 = Activation("relu", name="Block35_1_Branch_2_Conv2d_0c_3x3_Activation")(
branch_2
)
branches = [branch_0, branch_1, branch_2]
mixed = Concatenate(axis=3, name="Block35_1_Concatenate")(branches)
up = Conv2D(
256, 1, strides=1, padding="same", use_bias=True, name="Block35_1_Conv2d_1x1"
)(mixed)
up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.17})(
up
)
x = add([x, up])
x = Activation("relu", name="Block35_1_Activation")(x)
branch_0 = Conv2D(
32,
1,
strides=1,
padding="same",
use_bias=False,
name="Block35_2_Branch_0_Conv2d_1x1",
)(x)
branch_0 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block35_2_Branch_0_Conv2d_1x1_BatchNorm",
)(branch_0)
branch_0 = Activation("relu", name="Block35_2_Branch_0_Conv2d_1x1_Activation")(
branch_0
)
branch_1 = Conv2D(
32,
1,
strides=1,
padding="same",
use_bias=False,
name="Block35_2_Branch_1_Conv2d_0a_1x1",
)(x)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block35_2_Branch_1_Conv2d_0a_1x1_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block35_2_Branch_1_Conv2d_0a_1x1_Activation")(
branch_1
)
branch_1 = Conv2D(
32,
3,
strides=1,
padding="same",
use_bias=False,
name="Block35_2_Branch_1_Conv2d_0b_3x3",
)(branch_1)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block35_2_Branch_1_Conv2d_0b_3x3_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block35_2_Branch_1_Conv2d_0b_3x3_Activation")(
branch_1
)
branch_2 = Conv2D(
32,
1,
strides=1,
padding="same",
use_bias=False,
name="Block35_2_Branch_2_Conv2d_0a_1x1",
)(x)
branch_2 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block35_2_Branch_2_Conv2d_0a_1x1_BatchNorm",
)(branch_2)
branch_2 = Activation("relu", name="Block35_2_Branch_2_Conv2d_0a_1x1_Activation")(
branch_2
)
branch_2 = Conv2D(
32,
3,
strides=1,
padding="same",
use_bias=False,
name="Block35_2_Branch_2_Conv2d_0b_3x3",
)(branch_2)
branch_2 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block35_2_Branch_2_Conv2d_0b_3x3_BatchNorm",
)(branch_2)
branch_2 = Activation("relu", name="Block35_2_Branch_2_Conv2d_0b_3x3_Activation")(
branch_2
)
branch_2 = Conv2D(
32,
3,
strides=1,
padding="same",
use_bias=False,
name="Block35_2_Branch_2_Conv2d_0c_3x3",
)(branch_2)
branch_2 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block35_2_Branch_2_Conv2d_0c_3x3_BatchNorm",
)(branch_2)
branch_2 = Activation("relu", name="Block35_2_Branch_2_Conv2d_0c_3x3_Activation")(
branch_2
)
branches = [branch_0, branch_1, branch_2]
mixed = Concatenate(axis=3, name="Block35_2_Concatenate")(branches)
up = Conv2D(
256, 1, strides=1, padding="same", use_bias=True, name="Block35_2_Conv2d_1x1"
)(mixed)
up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.17})(
up
)
x = add([x, up])
x = Activation("relu", name="Block35_2_Activation")(x)
branch_0 = Conv2D(
32,
1,
strides=1,
padding="same",
use_bias=False,
name="Block35_3_Branch_0_Conv2d_1x1",
)(x)
branch_0 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block35_3_Branch_0_Conv2d_1x1_BatchNorm",
)(branch_0)
branch_0 = Activation("relu", name="Block35_3_Branch_0_Conv2d_1x1_Activation")(
branch_0
)
branch_1 = Conv2D(
32,
1,
strides=1,
padding="same",
use_bias=False,
name="Block35_3_Branch_1_Conv2d_0a_1x1",
)(x)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block35_3_Branch_1_Conv2d_0a_1x1_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block35_3_Branch_1_Conv2d_0a_1x1_Activation")(
branch_1
)
branch_1 = Conv2D(
32,
3,
strides=1,
padding="same",
use_bias=False,
name="Block35_3_Branch_1_Conv2d_0b_3x3",
)(branch_1)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block35_3_Branch_1_Conv2d_0b_3x3_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block35_3_Branch_1_Conv2d_0b_3x3_Activation")(
branch_1
)
branch_2 = Conv2D(
32,
1,
strides=1,
padding="same",
use_bias=False,
name="Block35_3_Branch_2_Conv2d_0a_1x1",
)(x)
branch_2 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block35_3_Branch_2_Conv2d_0a_1x1_BatchNorm",
)(branch_2)
branch_2 = Activation("relu", name="Block35_3_Branch_2_Conv2d_0a_1x1_Activation")(
branch_2
)
branch_2 = Conv2D(
32,
3,
strides=1,
padding="same",
use_bias=False,
name="Block35_3_Branch_2_Conv2d_0b_3x3",
)(branch_2)
branch_2 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block35_3_Branch_2_Conv2d_0b_3x3_BatchNorm",
)(branch_2)
branch_2 = Activation("relu", name="Block35_3_Branch_2_Conv2d_0b_3x3_Activation")(
branch_2
)
branch_2 = Conv2D(
32,
3,
strides=1,
padding="same",
use_bias=False,
name="Block35_3_Branch_2_Conv2d_0c_3x3",
)(branch_2)
branch_2 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block35_3_Branch_2_Conv2d_0c_3x3_BatchNorm",
)(branch_2)
branch_2 = Activation("relu", name="Block35_3_Branch_2_Conv2d_0c_3x3_Activation")(
branch_2
)
branches = [branch_0, branch_1, branch_2]
mixed = Concatenate(axis=3, name="Block35_3_Concatenate")(branches)
up = Conv2D(
256, 1, strides=1, padding="same", use_bias=True, name="Block35_3_Conv2d_1x1"
)(mixed)
up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.17})(
up
)
x = add([x, up])
x = Activation("relu", name="Block35_3_Activation")(x)
branch_0 = Conv2D(
32,
1,
strides=1,
padding="same",
use_bias=False,
name="Block35_4_Branch_0_Conv2d_1x1",
)(x)
branch_0 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block35_4_Branch_0_Conv2d_1x1_BatchNorm",
)(branch_0)
branch_0 = Activation("relu", name="Block35_4_Branch_0_Conv2d_1x1_Activation")(
branch_0
)
branch_1 = Conv2D(
32,
1,
strides=1,
padding="same",
use_bias=False,
name="Block35_4_Branch_1_Conv2d_0a_1x1",
)(x)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block35_4_Branch_1_Conv2d_0a_1x1_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block35_4_Branch_1_Conv2d_0a_1x1_Activation")(
branch_1
)
branch_1 = Conv2D(
32,
3,
strides=1,
padding="same",
use_bias=False,
name="Block35_4_Branch_1_Conv2d_0b_3x3",
)(branch_1)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block35_4_Branch_1_Conv2d_0b_3x3_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block35_4_Branch_1_Conv2d_0b_3x3_Activation")(
branch_1
)
branch_2 = Conv2D(
32,
1,
strides=1,
padding="same",
use_bias=False,
name="Block35_4_Branch_2_Conv2d_0a_1x1",
)(x)
branch_2 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block35_4_Branch_2_Conv2d_0a_1x1_BatchNorm",
)(branch_2)
branch_2 = Activation("relu", name="Block35_4_Branch_2_Conv2d_0a_1x1_Activation")(
branch_2
)
branch_2 = Conv2D(
32,
3,
strides=1,
padding="same",
use_bias=False,
name="Block35_4_Branch_2_Conv2d_0b_3x3",
)(branch_2)
branch_2 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block35_4_Branch_2_Conv2d_0b_3x3_BatchNorm",
)(branch_2)
branch_2 = Activation("relu", name="Block35_4_Branch_2_Conv2d_0b_3x3_Activation")(
branch_2
)
branch_2 = Conv2D(
32,
3,
strides=1,
padding="same",
use_bias=False,
name="Block35_4_Branch_2_Conv2d_0c_3x3",
)(branch_2)
branch_2 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block35_4_Branch_2_Conv2d_0c_3x3_BatchNorm",
)(branch_2)
branch_2 = Activation("relu", name="Block35_4_Branch_2_Conv2d_0c_3x3_Activation")(
branch_2
)
branches = [branch_0, branch_1, branch_2]
mixed = Concatenate(axis=3, name="Block35_4_Concatenate")(branches)
up = Conv2D(
256, 1, strides=1, padding="same", use_bias=True, name="Block35_4_Conv2d_1x1"
)(mixed)
up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.17})(
up
)
x = add([x, up])
x = Activation("relu", name="Block35_4_Activation")(x)
branch_0 = Conv2D(
32,
1,
strides=1,
padding="same",
use_bias=False,
name="Block35_5_Branch_0_Conv2d_1x1",
)(x)
branch_0 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block35_5_Branch_0_Conv2d_1x1_BatchNorm",
)(branch_0)
branch_0 = Activation("relu", name="Block35_5_Branch_0_Conv2d_1x1_Activation")(
branch_0
)
branch_1 = Conv2D(
32,
1,
strides=1,
padding="same",
use_bias=False,
name="Block35_5_Branch_1_Conv2d_0a_1x1",
)(x)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block35_5_Branch_1_Conv2d_0a_1x1_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block35_5_Branch_1_Conv2d_0a_1x1_Activation")(
branch_1
)
branch_1 = Conv2D(
32,
3,
strides=1,
padding="same",
use_bias=False,
name="Block35_5_Branch_1_Conv2d_0b_3x3",
)(branch_1)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block35_5_Branch_1_Conv2d_0b_3x3_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block35_5_Branch_1_Conv2d_0b_3x3_Activation")(
branch_1
)
branch_2 = Conv2D(
32,
1,
strides=1,
padding="same",
use_bias=False,
name="Block35_5_Branch_2_Conv2d_0a_1x1",
)(x)
branch_2 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block35_5_Branch_2_Conv2d_0a_1x1_BatchNorm",
)(branch_2)
branch_2 = Activation("relu", name="Block35_5_Branch_2_Conv2d_0a_1x1_Activation")(
branch_2
)
branch_2 = Conv2D(
32,
3,
strides=1,
padding="same",
use_bias=False,
name="Block35_5_Branch_2_Conv2d_0b_3x3",
)(branch_2)
branch_2 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block35_5_Branch_2_Conv2d_0b_3x3_BatchNorm",
)(branch_2)
branch_2 = Activation("relu", name="Block35_5_Branch_2_Conv2d_0b_3x3_Activation")(
branch_2
)
branch_2 = Conv2D(
32,
3,
strides=1,
padding="same",
use_bias=False,
name="Block35_5_Branch_2_Conv2d_0c_3x3",
)(branch_2)
branch_2 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block35_5_Branch_2_Conv2d_0c_3x3_BatchNorm",
)(branch_2)
branch_2 = Activation("relu", name="Block35_5_Branch_2_Conv2d_0c_3x3_Activation")(
branch_2
)
branches = [branch_0, branch_1, branch_2]
mixed = Concatenate(axis=3, name="Block35_5_Concatenate")(branches)
up = Conv2D(
256, 1, strides=1, padding="same", use_bias=True, name="Block35_5_Conv2d_1x1"
)(mixed)
up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.17})(
up
)
x = add([x, up])
x = Activation("relu", name="Block35_5_Activation")(x)
# Mixed 6a (Reduction-A block):
branch_0 = Conv2D(
384,
3,
strides=2,
padding="valid",
use_bias=False,
name="Mixed_6a_Branch_0_Conv2d_1a_3x3",
)(x)
branch_0 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Mixed_6a_Branch_0_Conv2d_1a_3x3_BatchNorm",
)(branch_0)
branch_0 = Activation("relu", name="Mixed_6a_Branch_0_Conv2d_1a_3x3_Activation")(
branch_0
)
branch_1 = Conv2D(
192,
1,
strides=1,
padding="same",
use_bias=False,
name="Mixed_6a_Branch_1_Conv2d_0a_1x1",
)(x)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Mixed_6a_Branch_1_Conv2d_0a_1x1_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Mixed_6a_Branch_1_Conv2d_0a_1x1_Activation")(
branch_1
)
branch_1 = Conv2D(
192,
3,
strides=1,
padding="same",
use_bias=False,
name="Mixed_6a_Branch_1_Conv2d_0b_3x3",
)(branch_1)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Mixed_6a_Branch_1_Conv2d_0b_3x3_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Mixed_6a_Branch_1_Conv2d_0b_3x3_Activation")(
branch_1
)
branch_1 = Conv2D(
256,
3,
strides=2,
padding="valid",
use_bias=False,
name="Mixed_6a_Branch_1_Conv2d_1a_3x3",
)(branch_1)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Mixed_6a_Branch_1_Conv2d_1a_3x3_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Mixed_6a_Branch_1_Conv2d_1a_3x3_Activation")(
branch_1
)
branch_pool = MaxPooling2D(
3, strides=2, padding="valid", name="Mixed_6a_Branch_2_MaxPool_1a_3x3"
)(x)
branches = [branch_0, branch_1, branch_pool]
x = Concatenate(axis=3, name="Mixed_6a")(branches)
# 10x Block17 (Inception-ResNet-B block):
branch_0 = Conv2D(
128,
1,
strides=1,
padding="same",
use_bias=False,
name="Block17_1_Branch_0_Conv2d_1x1",
)(x)
branch_0 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block17_1_Branch_0_Conv2d_1x1_BatchNorm",
)(branch_0)
branch_0 = Activation("relu", name="Block17_1_Branch_0_Conv2d_1x1_Activation")(
branch_0
)
branch_1 = Conv2D(
128,
1,
strides=1,
padding="same",
use_bias=False,
name="Block17_1_Branch_1_Conv2d_0a_1x1",
)(x)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block17_1_Branch_1_Conv2d_0a_1x1_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block17_1_Branch_1_Conv2d_0a_1x1_Activation")(
branch_1
)
branch_1 = Conv2D(
128,
[1, 7],
strides=1,
padding="same",
use_bias=False,
name="Block17_1_Branch_1_Conv2d_0b_1x7",
)(branch_1)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block17_1_Branch_1_Conv2d_0b_1x7_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block17_1_Branch_1_Conv2d_0b_1x7_Activation")(
branch_1
)
branch_1 = Conv2D(
128,
[7, 1],
strides=1,
padding="same",
use_bias=False,
name="Block17_1_Branch_1_Conv2d_0c_7x1",
)(branch_1)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block17_1_Branch_1_Conv2d_0c_7x1_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block17_1_Branch_1_Conv2d_0c_7x1_Activation")(
branch_1
)
branches = [branch_0, branch_1]
mixed = Concatenate(axis=3, name="Block17_1_Concatenate")(branches)
up = Conv2D(
896, 1, strides=1, padding="same", use_bias=True, name="Block17_1_Conv2d_1x1"
)(mixed)
up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.1})(up)
x = add([x, up])
x = Activation("relu", name="Block17_1_Activation")(x)
branch_0 = Conv2D(
128,
1,
strides=1,
padding="same",
use_bias=False,
name="Block17_2_Branch_0_Conv2d_1x1",
)(x)
branch_0 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block17_2_Branch_0_Conv2d_1x1_BatchNorm",
)(branch_0)
branch_0 = Activation("relu", name="Block17_2_Branch_0_Conv2d_1x1_Activation")(
branch_0
)
branch_1 = Conv2D(
128,
1,
strides=1,
padding="same",
use_bias=False,
name="Block17_2_Branch_2_Conv2d_0a_1x1",
)(x)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block17_2_Branch_2_Conv2d_0a_1x1_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block17_2_Branch_2_Conv2d_0a_1x1_Activation")(
branch_1
)
branch_1 = Conv2D(
128,
[1, 7],
strides=1,
padding="same",
use_bias=False,
name="Block17_2_Branch_2_Conv2d_0b_1x7",
)(branch_1)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block17_2_Branch_2_Conv2d_0b_1x7_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block17_2_Branch_2_Conv2d_0b_1x7_Activation")(
branch_1
)
branch_1 = Conv2D(
128,
[7, 1],
strides=1,
padding="same",
use_bias=False,
name="Block17_2_Branch_2_Conv2d_0c_7x1",
)(branch_1)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block17_2_Branch_2_Conv2d_0c_7x1_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block17_2_Branch_2_Conv2d_0c_7x1_Activation")(
branch_1
)
branches = [branch_0, branch_1]
mixed = Concatenate(axis=3, name="Block17_2_Concatenate")(branches)
up = Conv2D(
896, 1, strides=1, padding="same", use_bias=True, name="Block17_2_Conv2d_1x1"
)(mixed)
up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.1})(up)
x = add([x, up])
x = Activation("relu", name="Block17_2_Activation")(x)
branch_0 = Conv2D(
128,
1,
strides=1,
padding="same",
use_bias=False,
name="Block17_3_Branch_0_Conv2d_1x1",
)(x)
branch_0 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block17_3_Branch_0_Conv2d_1x1_BatchNorm",
)(branch_0)
branch_0 = Activation("relu", name="Block17_3_Branch_0_Conv2d_1x1_Activation")(
branch_0
)
branch_1 = Conv2D(
128,
1,
strides=1,
padding="same",
use_bias=False,
name="Block17_3_Branch_3_Conv2d_0a_1x1",
)(x)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block17_3_Branch_3_Conv2d_0a_1x1_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block17_3_Branch_3_Conv2d_0a_1x1_Activation")(
branch_1
)
branch_1 = Conv2D(
128,
[1, 7],
strides=1,
padding="same",
use_bias=False,
name="Block17_3_Branch_3_Conv2d_0b_1x7",
)(branch_1)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block17_3_Branch_3_Conv2d_0b_1x7_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block17_3_Branch_3_Conv2d_0b_1x7_Activation")(
branch_1
)
branch_1 = Conv2D(
128,
[7, 1],
strides=1,
padding="same",
use_bias=False,
name="Block17_3_Branch_3_Conv2d_0c_7x1",
)(branch_1)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block17_3_Branch_3_Conv2d_0c_7x1_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block17_3_Branch_3_Conv2d_0c_7x1_Activation")(
branch_1
)
branches = [branch_0, branch_1]
mixed = Concatenate(axis=3, name="Block17_3_Concatenate")(branches)
up = Conv2D(
896, 1, strides=1, padding="same", use_bias=True, name="Block17_3_Conv2d_1x1"
)(mixed)
up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.1})(up)
x = add([x, up])
x = Activation("relu", name="Block17_3_Activation")(x)
branch_0 = Conv2D(
128,
1,
strides=1,
padding="same",
use_bias=False,
name="Block17_4_Branch_0_Conv2d_1x1",
)(x)
branch_0 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block17_4_Branch_0_Conv2d_1x1_BatchNorm",
)(branch_0)
branch_0 = Activation("relu", name="Block17_4_Branch_0_Conv2d_1x1_Activation")(
branch_0
)
branch_1 = Conv2D(
128,
1,
strides=1,
padding="same",
use_bias=False,
name="Block17_4_Branch_4_Conv2d_0a_1x1",
)(x)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block17_4_Branch_4_Conv2d_0a_1x1_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block17_4_Branch_4_Conv2d_0a_1x1_Activation")(
branch_1
)
branch_1 = Conv2D(
128,
[1, 7],
strides=1,
padding="same",
use_bias=False,
name="Block17_4_Branch_4_Conv2d_0b_1x7",
)(branch_1)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block17_4_Branch_4_Conv2d_0b_1x7_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block17_4_Branch_4_Conv2d_0b_1x7_Activation")(
branch_1
)
branch_1 = Conv2D(
128,
[7, 1],
strides=1,
padding="same",
use_bias=False,
name="Block17_4_Branch_4_Conv2d_0c_7x1",
)(branch_1)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block17_4_Branch_4_Conv2d_0c_7x1_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block17_4_Branch_4_Conv2d_0c_7x1_Activation")(
branch_1
)
branches = [branch_0, branch_1]
mixed = Concatenate(axis=3, name="Block17_4_Concatenate")(branches)
up = Conv2D(
896, 1, strides=1, padding="same", use_bias=True, name="Block17_4_Conv2d_1x1"
)(mixed)
up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.1})(up)
x = add([x, up])
x = Activation("relu", name="Block17_4_Activation")(x)
branch_0 = Conv2D(
128,
1,
strides=1,
padding="same",
use_bias=False,
name="Block17_5_Branch_0_Conv2d_1x1",
)(x)
branch_0 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block17_5_Branch_0_Conv2d_1x1_BatchNorm",
)(branch_0)
branch_0 = Activation("relu", name="Block17_5_Branch_0_Conv2d_1x1_Activation")(
branch_0
)
branch_1 = Conv2D(
128,
1,
strides=1,
padding="same",
use_bias=False,
name="Block17_5_Branch_5_Conv2d_0a_1x1",
)(x)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block17_5_Branch_5_Conv2d_0a_1x1_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block17_5_Branch_5_Conv2d_0a_1x1_Activation")(
branch_1
)
branch_1 = Conv2D(
128,
[1, 7],
strides=1,
padding="same",
use_bias=False,
name="Block17_5_Branch_5_Conv2d_0b_1x7",
)(branch_1)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block17_5_Branch_5_Conv2d_0b_1x7_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block17_5_Branch_5_Conv2d_0b_1x7_Activation")(
branch_1
)
branch_1 = Conv2D(
128,
[7, 1],
strides=1,
padding="same",
use_bias=False,
name="Block17_5_Branch_5_Conv2d_0c_7x1",
)(branch_1)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block17_5_Branch_5_Conv2d_0c_7x1_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block17_5_Branch_5_Conv2d_0c_7x1_Activation")(
branch_1
)
branches = [branch_0, branch_1]
mixed = Concatenate(axis=3, name="Block17_5_Concatenate")(branches)
up = Conv2D(
896, 1, strides=1, padding="same", use_bias=True, name="Block17_5_Conv2d_1x1"
)(mixed)
up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.1})(up)
x = add([x, up])
x = Activation("relu", name="Block17_5_Activation")(x)
branch_0 = Conv2D(
128,
1,
strides=1,
padding="same",
use_bias=False,
name="Block17_6_Branch_0_Conv2d_1x1",
)(x)
branch_0 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block17_6_Branch_0_Conv2d_1x1_BatchNorm",
)(branch_0)
branch_0 = Activation("relu", name="Block17_6_Branch_0_Conv2d_1x1_Activation")(
branch_0
)
branch_1 = Conv2D(
128,
1,
strides=1,
padding="same",
use_bias=False,
name="Block17_6_Branch_6_Conv2d_0a_1x1",
)(x)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block17_6_Branch_6_Conv2d_0a_1x1_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block17_6_Branch_6_Conv2d_0a_1x1_Activation")(
branch_1
)
branch_1 = Conv2D(
128,
[1, 7],
strides=1,
padding="same",
use_bias=False,
name="Block17_6_Branch_6_Conv2d_0b_1x7",
)(branch_1)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block17_6_Branch_6_Conv2d_0b_1x7_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block17_6_Branch_6_Conv2d_0b_1x7_Activation")(
branch_1
)
branch_1 = Conv2D(
128,
[7, 1],
strides=1,
padding="same",
use_bias=False,
name="Block17_6_Branch_6_Conv2d_0c_7x1",
)(branch_1)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block17_6_Branch_6_Conv2d_0c_7x1_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block17_6_Branch_6_Conv2d_0c_7x1_Activation")(
branch_1
)
branches = [branch_0, branch_1]
mixed = Concatenate(axis=3, name="Block17_6_Concatenate")(branches)
up = Conv2D(
896, 1, strides=1, padding="same", use_bias=True, name="Block17_6_Conv2d_1x1"
)(mixed)
up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.1})(up)
x = add([x, up])
x = Activation("relu", name="Block17_6_Activation")(x)
branch_0 = Conv2D(
128,
1,
strides=1,
padding="same",
use_bias=False,
name="Block17_7_Branch_0_Conv2d_1x1",
)(x)
branch_0 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block17_7_Branch_0_Conv2d_1x1_BatchNorm",
)(branch_0)
branch_0 = Activation("relu", name="Block17_7_Branch_0_Conv2d_1x1_Activation")(
branch_0
)
branch_1 = Conv2D(
128,
1,
strides=1,
padding="same",
use_bias=False,
name="Block17_7_Branch_7_Conv2d_0a_1x1",
)(x)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block17_7_Branch_7_Conv2d_0a_1x1_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block17_7_Branch_7_Conv2d_0a_1x1_Activation")(
branch_1
)
branch_1 = Conv2D(
128,
[1, 7],
strides=1,
padding="same",
use_bias=False,
name="Block17_7_Branch_7_Conv2d_0b_1x7",
)(branch_1)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block17_7_Branch_7_Conv2d_0b_1x7_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block17_7_Branch_7_Conv2d_0b_1x7_Activation")(
branch_1
)
branch_1 = Conv2D(
128,
[7, 1],
strides=1,
padding="same",
use_bias=False,
name="Block17_7_Branch_7_Conv2d_0c_7x1",
)(branch_1)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block17_7_Branch_7_Conv2d_0c_7x1_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block17_7_Branch_7_Conv2d_0c_7x1_Activation")(
branch_1
)
branches = [branch_0, branch_1]
mixed = Concatenate(axis=3, name="Block17_7_Concatenate")(branches)
up = Conv2D(
896, 1, strides=1, padding="same", use_bias=True, name="Block17_7_Conv2d_1x1"
)(mixed)
up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.1})(up)
x = add([x, up])
x = Activation("relu", name="Block17_7_Activation")(x)
branch_0 = Conv2D(
128,
1,
strides=1,
padding="same",
use_bias=False,
name="Block17_8_Branch_0_Conv2d_1x1",
)(x)
branch_0 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block17_8_Branch_0_Conv2d_1x1_BatchNorm",
)(branch_0)
branch_0 = Activation("relu", name="Block17_8_Branch_0_Conv2d_1x1_Activation")(
branch_0
)
branch_1 = Conv2D(
128,
1,
strides=1,
padding="same",
use_bias=False,
name="Block17_8_Branch_8_Conv2d_0a_1x1",
)(x)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block17_8_Branch_8_Conv2d_0a_1x1_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block17_8_Branch_8_Conv2d_0a_1x1_Activation")(
branch_1
)
branch_1 = Conv2D(
128,
[1, 7],
strides=1,
padding="same",
use_bias=False,
name="Block17_8_Branch_8_Conv2d_0b_1x7",
)(branch_1)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block17_8_Branch_8_Conv2d_0b_1x7_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block17_8_Branch_8_Conv2d_0b_1x7_Activation")(
branch_1
)
branch_1 = Conv2D(
128,
[7, 1],
strides=1,
padding="same",
use_bias=False,
name="Block17_8_Branch_8_Conv2d_0c_7x1",
)(branch_1)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block17_8_Branch_8_Conv2d_0c_7x1_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block17_8_Branch_8_Conv2d_0c_7x1_Activation")(
branch_1
)
branches = [branch_0, branch_1]
mixed = Concatenate(axis=3, name="Block17_8_Concatenate")(branches)
up = Conv2D(
896, 1, strides=1, padding="same", use_bias=True, name="Block17_8_Conv2d_1x1"
)(mixed)
up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.1})(up)
x = add([x, up])
x = Activation("relu", name="Block17_8_Activation")(x)
branch_0 = Conv2D(
128,
1,
strides=1,
padding="same",
use_bias=False,
name="Block17_9_Branch_0_Conv2d_1x1",
)(x)
branch_0 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block17_9_Branch_0_Conv2d_1x1_BatchNorm",
)(branch_0)
branch_0 = Activation("relu", name="Block17_9_Branch_0_Conv2d_1x1_Activation")(
branch_0
)
branch_1 = Conv2D(
128,
1,
strides=1,
padding="same",
use_bias=False,
name="Block17_9_Branch_9_Conv2d_0a_1x1",
)(x)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block17_9_Branch_9_Conv2d_0a_1x1_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block17_9_Branch_9_Conv2d_0a_1x1_Activation")(
branch_1
)
branch_1 = Conv2D(
128,
[1, 7],
strides=1,
padding="same",
use_bias=False,
name="Block17_9_Branch_9_Conv2d_0b_1x7",
)(branch_1)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block17_9_Branch_9_Conv2d_0b_1x7_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block17_9_Branch_9_Conv2d_0b_1x7_Activation")(
branch_1
)
branch_1 = Conv2D(
128,
[7, 1],
strides=1,
padding="same",
use_bias=False,
name="Block17_9_Branch_9_Conv2d_0c_7x1",
)(branch_1)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block17_9_Branch_9_Conv2d_0c_7x1_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block17_9_Branch_9_Conv2d_0c_7x1_Activation")(
branch_1
)
branches = [branch_0, branch_1]
mixed = Concatenate(axis=3, name="Block17_9_Concatenate")(branches)
up = Conv2D(
896, 1, strides=1, padding="same", use_bias=True, name="Block17_9_Conv2d_1x1"
)(mixed)
up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.1})(up)
x = add([x, up])
x = Activation("relu", name="Block17_9_Activation")(x)
branch_0 = Conv2D(
128,
1,
strides=1,
padding="same",
use_bias=False,
name="Block17_10_Branch_0_Conv2d_1x1",
)(x)
branch_0 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block17_10_Branch_0_Conv2d_1x1_BatchNorm",
)(branch_0)
branch_0 = Activation("relu", name="Block17_10_Branch_0_Conv2d_1x1_Activation")(
branch_0
)
branch_1 = Conv2D(
128,
1,
strides=1,
padding="same",
use_bias=False,
name="Block17_10_Branch_10_Conv2d_0a_1x1",
)(x)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block17_10_Branch_10_Conv2d_0a_1x1_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block17_10_Branch_10_Conv2d_0a_1x1_Activation")(
branch_1
)
branch_1 = Conv2D(
128,
[1, 7],
strides=1,
padding="same",
use_bias=False,
name="Block17_10_Branch_10_Conv2d_0b_1x7",
)(branch_1)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block17_10_Branch_10_Conv2d_0b_1x7_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block17_10_Branch_10_Conv2d_0b_1x7_Activation")(
branch_1
)
branch_1 = Conv2D(
128,
[7, 1],
strides=1,
padding="same",
use_bias=False,
name="Block17_10_Branch_10_Conv2d_0c_7x1",
)(branch_1)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block17_10_Branch_10_Conv2d_0c_7x1_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block17_10_Branch_10_Conv2d_0c_7x1_Activation")(
branch_1
)
branches = [branch_0, branch_1]
mixed = Concatenate(axis=3, name="Block17_10_Concatenate")(branches)
up = Conv2D(
896, 1, strides=1, padding="same", use_bias=True, name="Block17_10_Conv2d_1x1"
)(mixed)
up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.1})(up)
x = add([x, up])
x = Activation("relu", name="Block17_10_Activation")(x)
# Mixed 7a (Reduction-B block): 8 x 8 x 2080
branch_0 = Conv2D(
256,
1,
strides=1,
padding="same",
use_bias=False,
name="Mixed_7a_Branch_0_Conv2d_0a_1x1",
)(x)
branch_0 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Mixed_7a_Branch_0_Conv2d_0a_1x1_BatchNorm",
)(branch_0)
branch_0 = Activation("relu", name="Mixed_7a_Branch_0_Conv2d_0a_1x1_Activation")(
branch_0
)
branch_0 = Conv2D(
384,
3,
strides=2,
padding="valid",
use_bias=False,
name="Mixed_7a_Branch_0_Conv2d_1a_3x3",
)(branch_0)
branch_0 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Mixed_7a_Branch_0_Conv2d_1a_3x3_BatchNorm",
)(branch_0)
branch_0 = Activation("relu", name="Mixed_7a_Branch_0_Conv2d_1a_3x3_Activation")(
branch_0
)
branch_1 = Conv2D(
256,
1,
strides=1,
padding="same",
use_bias=False,
name="Mixed_7a_Branch_1_Conv2d_0a_1x1",
)(x)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Mixed_7a_Branch_1_Conv2d_0a_1x1_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Mixed_7a_Branch_1_Conv2d_0a_1x1_Activation")(
branch_1
)
branch_1 = Conv2D(
256,
3,
strides=2,
padding="valid",
use_bias=False,
name="Mixed_7a_Branch_1_Conv2d_1a_3x3",
)(branch_1)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Mixed_7a_Branch_1_Conv2d_1a_3x3_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Mixed_7a_Branch_1_Conv2d_1a_3x3_Activation")(
branch_1
)
branch_2 = Conv2D(
256,
1,
strides=1,
padding="same",
use_bias=False,
name="Mixed_7a_Branch_2_Conv2d_0a_1x1",
)(x)
branch_2 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Mixed_7a_Branch_2_Conv2d_0a_1x1_BatchNorm",
)(branch_2)
branch_2 = Activation("relu", name="Mixed_7a_Branch_2_Conv2d_0a_1x1_Activation")(
branch_2
)
branch_2 = Conv2D(
256,
3,
strides=1,
padding="same",
use_bias=False,
name="Mixed_7a_Branch_2_Conv2d_0b_3x3",
)(branch_2)
branch_2 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Mixed_7a_Branch_2_Conv2d_0b_3x3_BatchNorm",
)(branch_2)
branch_2 = Activation("relu", name="Mixed_7a_Branch_2_Conv2d_0b_3x3_Activation")(
branch_2
)
branch_2 = Conv2D(
256,
3,
strides=2,
padding="valid",
use_bias=False,
name="Mixed_7a_Branch_2_Conv2d_1a_3x3",
)(branch_2)
branch_2 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Mixed_7a_Branch_2_Conv2d_1a_3x3_BatchNorm",
)(branch_2)
branch_2 = Activation("relu", name="Mixed_7a_Branch_2_Conv2d_1a_3x3_Activation")(
branch_2
)
branch_pool = MaxPooling2D(
3, strides=2, padding="valid", name="Mixed_7a_Branch_3_MaxPool_1a_3x3"
)(x)
branches = [branch_0, branch_1, branch_2, branch_pool]
x = Concatenate(axis=3, name="Mixed_7a")(branches)
# 5x Block8 (Inception-ResNet-C block):
branch_0 = Conv2D(
192,
1,
strides=1,
padding="same",
use_bias=False,
name="Block8_1_Branch_0_Conv2d_1x1",
)(x)
branch_0 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block8_1_Branch_0_Conv2d_1x1_BatchNorm",
)(branch_0)
branch_0 = Activation("relu", name="Block8_1_Branch_0_Conv2d_1x1_Activation")(
branch_0
)
branch_1 = Conv2D(
192,
1,
strides=1,
padding="same",
use_bias=False,
name="Block8_1_Branch_1_Conv2d_0a_1x1",
)(x)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block8_1_Branch_1_Conv2d_0a_1x1_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block8_1_Branch_1_Conv2d_0a_1x1_Activation")(
branch_1
)
branch_1 = Conv2D(
192,
[1, 3],
strides=1,
padding="same",
use_bias=False,
name="Block8_1_Branch_1_Conv2d_0b_1x3",
)(branch_1)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block8_1_Branch_1_Conv2d_0b_1x3_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block8_1_Branch_1_Conv2d_0b_1x3_Activation")(
branch_1
)
branch_1 = Conv2D(
192,
[3, 1],
strides=1,
padding="same",
use_bias=False,
name="Block8_1_Branch_1_Conv2d_0c_3x1",
)(branch_1)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block8_1_Branch_1_Conv2d_0c_3x1_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block8_1_Branch_1_Conv2d_0c_3x1_Activation")(
branch_1
)
branches = [branch_0, branch_1]
mixed = Concatenate(axis=3, name="Block8_1_Concatenate")(branches)
up = Conv2D(
1792, 1, strides=1, padding="same", use_bias=True, name="Block8_1_Conv2d_1x1"
)(mixed)
up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.2})(up)
x = add([x, up])
x = Activation("relu", name="Block8_1_Activation")(x)
branch_0 = Conv2D(
192,
1,
strides=1,
padding="same",
use_bias=False,
name="Block8_2_Branch_0_Conv2d_1x1",
)(x)
branch_0 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block8_2_Branch_0_Conv2d_1x1_BatchNorm",
)(branch_0)
branch_0 = Activation("relu", name="Block8_2_Branch_0_Conv2d_1x1_Activation")(
branch_0
)
branch_1 = Conv2D(
192,
1,
strides=1,
padding="same",
use_bias=False,
name="Block8_2_Branch_2_Conv2d_0a_1x1",
)(x)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block8_2_Branch_2_Conv2d_0a_1x1_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block8_2_Branch_2_Conv2d_0a_1x1_Activation")(
branch_1
)
branch_1 = Conv2D(
192,
[1, 3],
strides=1,
padding="same",
use_bias=False,
name="Block8_2_Branch_2_Conv2d_0b_1x3",
)(branch_1)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block8_2_Branch_2_Conv2d_0b_1x3_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block8_2_Branch_2_Conv2d_0b_1x3_Activation")(
branch_1
)
branch_1 = Conv2D(
192,
[3, 1],
strides=1,
padding="same",
use_bias=False,
name="Block8_2_Branch_2_Conv2d_0c_3x1",
)(branch_1)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block8_2_Branch_2_Conv2d_0c_3x1_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block8_2_Branch_2_Conv2d_0c_3x1_Activation")(
branch_1
)
branches = [branch_0, branch_1]
mixed = Concatenate(axis=3, name="Block8_2_Concatenate")(branches)
up = Conv2D(
1792, 1, strides=1, padding="same", use_bias=True, name="Block8_2_Conv2d_1x1"
)(mixed)
up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.2})(up)
x = add([x, up])
x = Activation("relu", name="Block8_2_Activation")(x)
branch_0 = Conv2D(
192,
1,
strides=1,
padding="same",
use_bias=False,
name="Block8_3_Branch_0_Conv2d_1x1",
)(x)
branch_0 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block8_3_Branch_0_Conv2d_1x1_BatchNorm",
)(branch_0)
branch_0 = Activation("relu", name="Block8_3_Branch_0_Conv2d_1x1_Activation")(
branch_0
)
branch_1 = Conv2D(
192,
1,
strides=1,
padding="same",
use_bias=False,
name="Block8_3_Branch_3_Conv2d_0a_1x1",
)(x)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block8_3_Branch_3_Conv2d_0a_1x1_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block8_3_Branch_3_Conv2d_0a_1x1_Activation")(
branch_1
)
branch_1 = Conv2D(
192,
[1, 3],
strides=1,
padding="same",
use_bias=False,
name="Block8_3_Branch_3_Conv2d_0b_1x3",
)(branch_1)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block8_3_Branch_3_Conv2d_0b_1x3_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block8_3_Branch_3_Conv2d_0b_1x3_Activation")(
branch_1
)
branch_1 = Conv2D(
192,
[3, 1],
strides=1,
padding="same",
use_bias=False,
name="Block8_3_Branch_3_Conv2d_0c_3x1",
)(branch_1)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block8_3_Branch_3_Conv2d_0c_3x1_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block8_3_Branch_3_Conv2d_0c_3x1_Activation")(
branch_1
)
branches = [branch_0, branch_1]
mixed = Concatenate(axis=3, name="Block8_3_Concatenate")(branches)
up = Conv2D(
1792, 1, strides=1, padding="same", use_bias=True, name="Block8_3_Conv2d_1x1"
)(mixed)
up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.2})(up)
x = add([x, up])
x = Activation("relu", name="Block8_3_Activation")(x)
branch_0 = Conv2D(
192,
1,
strides=1,
padding="same",
use_bias=False,
name="Block8_4_Branch_0_Conv2d_1x1",
)(x)
branch_0 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block8_4_Branch_0_Conv2d_1x1_BatchNorm",
)(branch_0)
branch_0 = Activation("relu", name="Block8_4_Branch_0_Conv2d_1x1_Activation")(
branch_0
)
branch_1 = Conv2D(
192,
1,
strides=1,
padding="same",
use_bias=False,
name="Block8_4_Branch_4_Conv2d_0a_1x1",
)(x)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block8_4_Branch_4_Conv2d_0a_1x1_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block8_4_Branch_4_Conv2d_0a_1x1_Activation")(
branch_1
)
branch_1 = Conv2D(
192,
[1, 3],
strides=1,
padding="same",
use_bias=False,
name="Block8_4_Branch_4_Conv2d_0b_1x3",
)(branch_1)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block8_4_Branch_4_Conv2d_0b_1x3_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block8_4_Branch_4_Conv2d_0b_1x3_Activation")(
branch_1
)
branch_1 = Conv2D(
192,
[3, 1],
strides=1,
padding="same",
use_bias=False,
name="Block8_4_Branch_4_Conv2d_0c_3x1",
)(branch_1)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block8_4_Branch_4_Conv2d_0c_3x1_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block8_4_Branch_4_Conv2d_0c_3x1_Activation")(
branch_1
)
branches = [branch_0, branch_1]
mixed = Concatenate(axis=3, name="Block8_4_Concatenate")(branches)
up = Conv2D(
1792, 1, strides=1, padding="same", use_bias=True, name="Block8_4_Conv2d_1x1"
)(mixed)
up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.2})(up)
x = add([x, up])
x = Activation("relu", name="Block8_4_Activation")(x)
branch_0 = Conv2D(
192,
1,
strides=1,
padding="same",
use_bias=False,
name="Block8_5_Branch_0_Conv2d_1x1",
)(x)
branch_0 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block8_5_Branch_0_Conv2d_1x1_BatchNorm",
)(branch_0)
branch_0 = Activation("relu", name="Block8_5_Branch_0_Conv2d_1x1_Activation")(
branch_0
)
branch_1 = Conv2D(
192,
1,
strides=1,
padding="same",
use_bias=False,
name="Block8_5_Branch_5_Conv2d_0a_1x1",
)(x)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block8_5_Branch_5_Conv2d_0a_1x1_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block8_5_Branch_5_Conv2d_0a_1x1_Activation")(
branch_1
)
branch_1 = Conv2D(
192,
[1, 3],
strides=1,
padding="same",
use_bias=False,
name="Block8_5_Branch_5_Conv2d_0b_1x3",
)(branch_1)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block8_5_Branch_5_Conv2d_0b_1x3_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block8_5_Branch_5_Conv2d_0b_1x3_Activation")(
branch_1
)
branch_1 = Conv2D(
192,
[3, 1],
strides=1,
padding="same",
use_bias=False,
name="Block8_5_Branch_5_Conv2d_0c_3x1",
)(branch_1)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block8_5_Branch_5_Conv2d_0c_3x1_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block8_5_Branch_5_Conv2d_0c_3x1_Activation")(
branch_1
)
branches = [branch_0, branch_1]
mixed = Concatenate(axis=3, name="Block8_5_Concatenate")(branches)
up = Conv2D(
1792, 1, strides=1, padding="same", use_bias=True, name="Block8_5_Conv2d_1x1"
)(mixed)
up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.2})(up)
x = add([x, up])
x = Activation("relu", name="Block8_5_Activation")(x)
branch_0 = Conv2D(
192,
1,
strides=1,
padding="same",
use_bias=False,
name="Block8_6_Branch_0_Conv2d_1x1",
)(x)
branch_0 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block8_6_Branch_0_Conv2d_1x1_BatchNorm",
)(branch_0)
branch_0 = Activation("relu", name="Block8_6_Branch_0_Conv2d_1x1_Activation")(
branch_0
)
branch_1 = Conv2D(
192,
1,
strides=1,
padding="same",
use_bias=False,
name="Block8_6_Branch_1_Conv2d_0a_1x1",
)(x)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block8_6_Branch_1_Conv2d_0a_1x1_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block8_6_Branch_1_Conv2d_0a_1x1_Activation")(
branch_1
)
branch_1 = Conv2D(
192,
[1, 3],
strides=1,
padding="same",
use_bias=False,
name="Block8_6_Branch_1_Conv2d_0b_1x3",
)(branch_1)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block8_6_Branch_1_Conv2d_0b_1x3_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block8_6_Branch_1_Conv2d_0b_1x3_Activation")(
branch_1
)
branch_1 = Conv2D(
192,
[3, 1],
strides=1,
padding="same",
use_bias=False,
name="Block8_6_Branch_1_Conv2d_0c_3x1",
)(branch_1)
branch_1 = BatchNormalization(
axis=3,
momentum=0.995,
epsilon=0.001,
scale=False,
name="Block8_6_Branch_1_Conv2d_0c_3x1_BatchNorm",
)(branch_1)
branch_1 = Activation("relu", name="Block8_6_Branch_1_Conv2d_0c_3x1_Activation")(
branch_1
)
branches = [branch_0, branch_1]
mixed = Concatenate(axis=3, name="Block8_6_Concatenate")(branches)
up = Conv2D(
1792, 1, strides=1, padding="same", use_bias=True, name="Block8_6_Conv2d_1x1"
)(mixed)
up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 1})(up)
x = add([x, up])
# Classification block
x = GlobalAveragePooling2D(name="AvgPool")(x)
x = Dropout(1.0 - 0.8, name="Dropout")(x)
# Bottleneck
x = Dense(128, use_bias=False, name="Bottleneck")(x)
x = BatchNormalization(
momentum=0.995, epsilon=0.001, scale=False, name="Bottleneck_BatchNorm"
)(x)
# Create model
model = Model(inputs, x, name="inception_resnet_v1")
return model
def loadModel():
model = InceptionResNetV2()
weights = "/data/models/face/facenet_weights.h5"
if os.path.isfile(weights) != True:
raise FileNotFoundError("Facenet weights does not exist")
model.load_weights(weights)
return model
|
writer8 | toc | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import absolute_import, division, print_function, unicode_literals
__license__ = "GPL v3"
__copyright__ = "2012, Kovid Goyal <kovid@kovidgoyal.net>"
__docformat__ = "restructuredtext en"
from calibre.ebooks.oeb.base import XHTML, XHTML_MIME, XHTML_NS, XPath, urlnormalize
from lxml import etree
DEFAULT_TITLE = __("Table of Contents")
TEMPLATE = """
<html xmlns="{xhtmlns}">
<head>
<title>{title}</title>
<style type="text/css">
li {{ list-style-type: none }}
a {{ text-decoration: none }}
a:hover {{ color: red }}
{extra_css}
{embed_css}
</style>
</head>
<body id="calibre_generated_inline_toc">
<h2>{title}</h2>
<ul>
</ul>
</body>
</html>
"""
def find_previous_calibre_inline_toc(oeb):
if "toc" in oeb.guide:
href = urlnormalize(oeb.guide["toc"].href.partition("#")[0])
if href in oeb.manifest.hrefs:
item = oeb.manifest.hrefs[href]
if hasattr(item.data, "xpath") and XPath(
'//h:body[@id="calibre_generated_inline_toc"]'
)(item.data):
return item
class TOCAdder(object):
def __init__(
self, oeb, opts, replace_previous_inline_toc=False, ignore_existing_toc=False
):
self.oeb, self.opts, self.log = oeb, opts, oeb.log
self.title = opts.toc_title or DEFAULT_TITLE
self.at_start = opts.mobi_toc_at_start
self.generated_item = None
self.added_toc_guide_entry = False
self.has_toc = oeb.toc and oeb.toc.count() > 1
self.tocitem = tocitem = None
if find_previous_calibre_inline_toc:
tocitem = self.tocitem = find_previous_calibre_inline_toc(oeb)
if ignore_existing_toc and "toc" in oeb.guide:
oeb.guide.remove("toc")
if "toc" in oeb.guide:
# Remove spurious toc entry from guide if it is not in spine or it
# does not have any hyperlinks
href = urlnormalize(oeb.guide["toc"].href.partition("#")[0])
if href in oeb.manifest.hrefs:
item = oeb.manifest.hrefs[href]
if hasattr(item.data, "xpath") and XPath("//h:a[@href]")(item.data):
if oeb.spine.index(item) < 0:
oeb.spine.add(item, linear=False)
return
elif self.has_toc:
oeb.guide.remove("toc")
else:
oeb.guide.remove("toc")
if (
not self.has_toc
or "toc" in oeb.guide
or opts.no_inline_toc
or getattr(opts, "mobi_passthrough", False)
):
return
self.log.info("\tGenerating in-line ToC")
embed_css = ""
s = getattr(oeb, "store_embed_font_rules", None)
if getattr(s, "body_font_family", None):
css = [x.cssText for x in s.rules] + [
"body { font-family: %s }" % s.body_font_family
]
embed_css = "\n\n".join(css)
root = etree.fromstring(
TEMPLATE.format(
xhtmlns=XHTML_NS,
title=self.title,
embed_css=embed_css,
extra_css=(opts.extra_css or ""),
)
)
parent = XPath("//h:ul")(root)[0]
parent.text = "\n\t"
for child in self.oeb.toc:
self.process_toc_node(child, parent)
if tocitem is not None:
href = tocitem.href
if oeb.spine.index(tocitem) > -1:
oeb.spine.remove(tocitem)
tocitem.data = root
else:
id, href = oeb.manifest.generate("contents", "contents.xhtml")
tocitem = self.generated_item = oeb.manifest.add(
id, href, XHTML_MIME, data=root
)
if self.at_start:
oeb.spine.insert(0, tocitem, linear=True)
else:
oeb.spine.add(tocitem, linear=False)
oeb.guide.add("toc", "Table of Contents", href)
def process_toc_node(self, toc, parent, level=0):
li = parent.makeelement(XHTML("li"))
li.tail = "\n" + ("\t" * level)
parent.append(li)
href = toc.href
if self.tocitem is not None and href:
href = self.tocitem.relhref(toc.href)
a = parent.makeelement(XHTML("a"), href=href or "#")
a.text = toc.title
li.append(a)
if toc.count() > 0:
parent = li.makeelement(XHTML("ul"))
li.append(parent)
a.tail = "\n" + ("\t" * level)
parent.text = "\n" + ("\t" * (level + 1))
parent.tail = "\n" + ("\t" * level)
for child in toc:
self.process_toc_node(child, parent, level + 1)
def remove_generated_toc(self):
if self.generated_item is not None:
self.oeb.manifest.remove(self.generated_item)
self.generated_item = None
if self.added_toc_guide_entry:
self.oeb.guide.remove("toc")
self.added_toc_guide_entry = False
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.